Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.23
      1 /*	$NetBSD: if_iwm.c,v 1.23 2015/03/03 09:45:58 nonaka Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.18 2015/02/11 01:12:42 brad Exp	*/
      3 
      4 /*
      5  * Copyright (c) 2014 genua mbh <info (at) genua.de>
      6  * Copyright (c) 2014 Fixup Software Ltd.
      7  *
      8  * Permission to use, copy, modify, and distribute this software for any
      9  * purpose with or without fee is hereby granted, provided that the above
     10  * copyright notice and this permission notice appear in all copies.
     11  *
     12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19  */
     20 
     21 /*-
     22  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     23  * which were used as the reference documentation for this implementation.
     24  *
     25  * Driver version we are currently based off of is
     26  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
     27  *
     28  ***********************************************************************
     29  *
     30  * This file is provided under a dual BSD/GPLv2 license.  When using or
     31  * redistributing this file, you may do so under either license.
     32  *
     33  * GPL LICENSE SUMMARY
     34  *
     35  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * All rights reserved.
     63  *
     64  * Redistribution and use in source and binary forms, with or without
     65  * modification, are permitted provided that the following conditions
     66  * are met:
     67  *
     68  *  * Redistributions of source code must retain the above copyright
     69  *    notice, this list of conditions and the following disclaimer.
     70  *  * Redistributions in binary form must reproduce the above copyright
     71  *    notice, this list of conditions and the following disclaimer in
     72  *    the documentation and/or other materials provided with the
     73  *    distribution.
     74  *  * Neither the name Intel Corporation nor the names of its
     75  *    contributors may be used to endorse or promote products derived
     76  *    from this software without specific prior written permission.
     77  *
     78  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     79  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     80  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     81  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     82  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     83  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     84  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     85  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     86  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     87  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     88  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     89  */
     90 
     91 /*-
     92  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     93  *
     94  * Permission to use, copy, modify, and distribute this software for any
     95  * purpose with or without fee is hereby granted, provided that the above
     96  * copyright notice and this permission notice appear in all copies.
     97  *
     98  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     99  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    100  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    101  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    102  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    103  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    104  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    105  */
    106 
    107 #include <sys/cdefs.h>
    108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.23 2015/03/03 09:45:58 nonaka Exp $");
    109 
    110 #include <sys/param.h>
    111 #include <sys/conf.h>
    112 #include <sys/kernel.h>
    113 #include <sys/kmem.h>
    114 #include <sys/mbuf.h>
    115 #include <sys/mutex.h>
    116 #include <sys/proc.h>
    117 #include <sys/socket.h>
    118 #include <sys/sockio.h>
    119 #include <sys/systm.h>
    120 
    121 #include <sys/cpu.h>
    122 #include <sys/bus.h>
    123 #include <sys/workqueue.h>
    124 #include <machine/endian.h>
    125 #include <machine/intr.h>
    126 
    127 #include <dev/pci/pcireg.h>
    128 #include <dev/pci/pcivar.h>
    129 #include <dev/pci/pcidevs.h>
    130 #include <dev/firmload.h>
    131 
    132 #include <net/bpf.h>
    133 #include <net/if.h>
    134 #include <net/if_arp.h>
    135 #include <net/if_dl.h>
    136 #include <net/if_media.h>
    137 #include <net/if_types.h>
    138 #include <net/if_ether.h>
    139 
    140 #include <netinet/in.h>
    141 #include <netinet/in_systm.h>
    142 #include <netinet/ip.h>
    143 
    144 #include <net80211/ieee80211_var.h>
    145 #include <net80211/ieee80211_amrr.h>
    146 #include <net80211/ieee80211_radiotap.h>
    147 
    148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    150 
    151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    153 
    154 #ifdef IWM_DEBUG
    155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    157 int iwm_debug = 1;
    158 #else
    159 #define DPRINTF(x)	do { ; } while (0)
    160 #define DPRINTFN(n, x)	do { ; } while (0)
    161 #endif
    162 
    163 #include <dev/pci/if_iwmreg.h>
    164 #include <dev/pci/if_iwmvar.h>
    165 
    166 static const uint8_t iwm_nvm_channels[] = {
    167 	/* 2.4 GHz */
    168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    169 	/* 5 GHz */
    170 	36, 40, 44 , 48, 52, 56, 60, 64,
    171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    172 	149, 153, 157, 161, 165
    173 };
    174 #define IWM_NUM_2GHZ_CHANNELS	14
    175 
    176 /* It looks like 11a TX is broken, unfortunately. */
    177 #define IWM_NO_5GHZ		1
    178 
    179 static const struct iwm_rate {
    180 	uint8_t rate;
    181 	uint8_t plcp;
    182 } iwm_rates[] = {
    183 	{   2,	IWM_RATE_1M_PLCP  },
    184 	{   4,	IWM_RATE_2M_PLCP  },
    185 	{  11,	IWM_RATE_5M_PLCP  },
    186 	{  22,	IWM_RATE_11M_PLCP },
    187 	{  12,	IWM_RATE_6M_PLCP  },
    188 	{  18,	IWM_RATE_9M_PLCP  },
    189 	{  24,	IWM_RATE_12M_PLCP },
    190 	{  36,	IWM_RATE_18M_PLCP },
    191 	{  48,	IWM_RATE_24M_PLCP },
    192 	{  72,	IWM_RATE_36M_PLCP },
    193 	{  96,	IWM_RATE_48M_PLCP },
    194 	{ 108,	IWM_RATE_54M_PLCP },
    195 };
    196 #define IWM_RIDX_CCK	0
    197 #define IWM_RIDX_OFDM	4
    198 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    199 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    200 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    201 
    202 struct iwm_newstate_state {
    203 	struct work ns_wk;
    204 	enum ieee80211_state ns_nstate;
    205 	int ns_arg;
    206 	int ns_generation;
    207 };
    208 
    209 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    210 static int	iwm_firmware_store_section(struct iwm_softc *,
    211 		    enum iwm_ucode_type, uint8_t *, size_t);
    212 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    213 static int	iwm_read_firmware(struct iwm_softc *);
    214 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    215 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    216 #ifdef IWM_DEBUG
    217 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    218 #endif
    219 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    220 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    221 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    222 static int	iwm_nic_lock(struct iwm_softc *);
    223 static void	iwm_nic_unlock(struct iwm_softc *);
    224 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    225 		    uint32_t);
    226 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    227 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    228 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    229 		    bus_size_t, bus_size_t);
    230 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    231 static int	iwm_alloc_fwmem(struct iwm_softc *);
    232 static void	iwm_free_fwmem(struct iwm_softc *);
    233 static int	iwm_alloc_sched(struct iwm_softc *);
    234 static void	iwm_free_sched(struct iwm_softc *);
    235 static int	iwm_alloc_kw(struct iwm_softc *);
    236 static void	iwm_free_kw(struct iwm_softc *);
    237 static int	iwm_alloc_ict(struct iwm_softc *);
    238 static void	iwm_free_ict(struct iwm_softc *);
    239 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    240 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    241 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    242 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    243 		    int);
    244 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    245 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    246 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    247 static int	iwm_check_rfkill(struct iwm_softc *);
    248 static void	iwm_enable_interrupts(struct iwm_softc *);
    249 static void	iwm_restore_interrupts(struct iwm_softc *);
    250 static void	iwm_disable_interrupts(struct iwm_softc *);
    251 static void	iwm_ict_reset(struct iwm_softc *);
    252 static int	iwm_set_hw_ready(struct iwm_softc *);
    253 static int	iwm_prepare_card_hw(struct iwm_softc *);
    254 static void	iwm_apm_config(struct iwm_softc *);
    255 static int	iwm_apm_init(struct iwm_softc *);
    256 static void	iwm_apm_stop(struct iwm_softc *);
    257 static int	iwm_allow_mcast(struct iwm_softc *);
    258 static int	iwm_start_hw(struct iwm_softc *);
    259 static void	iwm_stop_device(struct iwm_softc *);
    260 static void	iwm_set_pwr(struct iwm_softc *);
    261 static void	iwm_mvm_nic_config(struct iwm_softc *);
    262 static int	iwm_nic_rx_init(struct iwm_softc *);
    263 static int	iwm_nic_tx_init(struct iwm_softc *);
    264 static int	iwm_nic_init(struct iwm_softc *);
    265 static void	iwm_enable_txq(struct iwm_softc *, int, int);
    266 static int	iwm_post_alive(struct iwm_softc *);
    267 static int	iwm_is_valid_channel(uint16_t);
    268 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    269 static uint16_t iwm_channel_id_to_papd(uint16_t);
    270 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    271 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    272 		    uint8_t **, uint16_t *, uint16_t);
    273 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    274 		    void *);
    275 static int	iwm_send_phy_db_data(struct iwm_softc *);
    276 static int	iwm_send_phy_db_data(struct iwm_softc *);
    277 static void	iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    278 		    struct iwm_time_event_cmd_v1 *);
    279 static int	iwm_mvm_send_time_event_cmd(struct iwm_softc *,
    280 		    const struct iwm_time_event_cmd_v2 *);
    281 static int	iwm_mvm_time_event_send_add(struct iwm_softc *,
    282 		    struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
    283 static void	iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
    284 		    uint32_t, uint32_t, uint32_t);
    285 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    286 		    uint16_t, uint8_t *, uint16_t *);
    287 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    288 		    uint16_t *);
    289 static void	iwm_init_channel_map(struct iwm_softc *,
    290 		    const uint16_t * const);
    291 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    292 		    const uint16_t *, const uint16_t *, uint8_t, uint8_t);
    293 static int	iwm_nvm_init(struct iwm_softc *);
    294 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    295 		    const uint8_t *, uint32_t);
    296 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    297 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    298 static int	iwm_fw_alive(struct iwm_softc *, uint32_t);
    299 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    300 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    301 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
    302 		    enum iwm_ucode_type);
    303 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    304 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    305 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    306 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
    307 		    struct iwm_rx_phy_info *);
    308 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
    309 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    310 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
    311 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    312 		    struct iwm_rx_data *);
    313 static void	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
    314 		    struct iwm_rx_packet *, struct iwm_node *);
    315 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    316 		    struct iwm_rx_data *);
    317 static int	iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    318 		    uint32_t);
    319 static int	iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
    320 		    int);
    321 static int	iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    322 static void	iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
    323 		    struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
    324 		    uint32_t, uint32_t);
    325 static void	iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
    326 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    327 		    uint8_t, uint8_t);
    328 static int	iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
    329 		    struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
    330 		    uint32_t);
    331 static int	iwm_mvm_phy_ctxt_add(struct iwm_softc *,
    332 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    333 		    uint8_t, uint8_t);
    334 static int	iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
    335 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    336 		    uint8_t, uint8_t);
    337 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    338 static int	iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
    339 		    uint16_t, const void *);
    340 static int	iwm_mvm_send_cmd_status(struct iwm_softc *,
    341 		    struct iwm_host_cmd *, uint32_t *);
    342 static int	iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
    343 		    uint16_t, const void *, uint32_t *);
    344 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    345 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
    346 #if 0
    347 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    348 		    uint16_t);
    349 #endif
    350 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
    351 		    struct iwm_node *, struct ieee80211_frame *,
    352 		    struct iwm_tx_cmd *);
    353 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    354 		    struct ieee80211_node *, int);
    355 static int	iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
    356 		    struct iwm_beacon_filter_cmd *);
    357 static void	iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
    358 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    359 static int	iwm_mvm_update_beacon_abort(struct iwm_softc *,
    360 		    struct iwm_node *, int);
    361 static void	iwm_mvm_power_log(struct iwm_softc *,
    362 		    struct iwm_mac_power_cmd *);
    363 static void	iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    364 		    struct iwm_mac_power_cmd *);
    365 static int	iwm_mvm_power_mac_update_mode(struct iwm_softc *,
    366 		    struct iwm_node *);
    367 static int	iwm_mvm_power_update_device(struct iwm_softc *);
    368 static int	iwm_mvm_enable_beacon_filter(struct iwm_softc *,
    369 		    struct iwm_node *);
    370 static int	iwm_mvm_disable_beacon_filter(struct iwm_softc *,
    371 		    struct iwm_node *);
    372 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
    373 		    struct iwm_mvm_add_sta_cmd_v5 *);
    374 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
    375 		    struct iwm_mvm_add_sta_cmd_v6 *, int *);
    376 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
    377 		    int);
    378 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
    379 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
    380 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
    381 		    struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
    382 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
    383 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
    384 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
    385 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
    386 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
    387 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
    388 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
    389 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
    390 static int	iwm_mvm_scan_fill_channels(struct iwm_softc *,
    391 		    struct iwm_scan_cmd *, int, int, int);
    392 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
    393 		    struct ieee80211_frame *, const uint8_t *, int,
    394 		    const uint8_t *, int, const uint8_t *, int, int);
    395 static int	iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
    396 		    int);
    397 static void	iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    398 		    int *);
    399 static void	iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
    400 		    struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
    401 static int	iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
    402 		    struct iwm_mac_ctx_cmd *);
    403 static void	iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
    404 		    struct iwm_node *, struct iwm_mac_data_sta *, int);
    405 static int	iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
    406 		    struct iwm_node *, uint32_t);
    407 static int	iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
    408 		    uint32_t);
    409 static int	iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
    410 static int	iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
    411 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
    412 static int	iwm_auth(struct iwm_softc *);
    413 static int	iwm_assoc(struct iwm_softc *);
    414 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
    415 static void	iwm_calib_timeout(void *);
    416 static void	iwm_setrates(struct iwm_node *);
    417 static int	iwm_media_change(struct ifnet *);
    418 static void	iwm_newstate_cb(struct work *, void *);
    419 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    420 static void	iwm_endscan_cb(struct work *, void *);
    421 static int	iwm_init_hw(struct iwm_softc *);
    422 static int	iwm_init(struct ifnet *);
    423 static void	iwm_start(struct ifnet *);
    424 static void	iwm_stop(struct ifnet *, int);
    425 static void	iwm_watchdog(struct ifnet *);
    426 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    427 #ifdef IWM_DEBUG
    428 static const char *iwm_desc_lookup(uint32_t);
    429 static void	iwm_nic_error(struct iwm_softc *);
    430 #endif
    431 static void	iwm_notif_intr(struct iwm_softc *);
    432 static int	iwm_intr(void *);
    433 static int	iwm_preinit(struct iwm_softc *);
    434 static void	iwm_attach_hook(device_t);
    435 static void	iwm_attach(device_t, device_t, void *);
    436 #if 0
    437 static void	iwm_init_task(void *);
    438 static int	iwm_activate(device_t, enum devact);
    439 static void	iwm_wakeup(struct iwm_softc *);
    440 #endif
    441 static void	iwm_radiotap_attach(struct iwm_softc *);
    442 
    443 static int
    444 iwm_firmload(struct iwm_softc *sc)
    445 {
    446 	struct iwm_fw_info *fw = &sc->sc_fw;
    447 	firmware_handle_t fwh;
    448 	int error;
    449 
    450 	/* Open firmware image. */
    451 	if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
    452 		aprint_error_dev(sc->sc_dev,
    453 		    "could not get firmware handle %s\n", sc->sc_fwname);
    454 		return error;
    455 	}
    456 
    457 	fw->fw_rawsize = firmware_get_size(fwh);
    458 	/*
    459 	 * Well, this is how the Linux driver checks it ....
    460 	 */
    461 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    462 		aprint_error_dev(sc->sc_dev,
    463 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    464 		error = EINVAL;
    465 		goto out;
    466 	}
    467 
    468 	/* some sanity */
    469 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    470 		aprint_error_dev(sc->sc_dev,
    471 		    "firmware size is ridiculous: %zd bytes\n",
    472 		fw->fw_rawsize);
    473 		error = EINVAL;
    474 		goto out;
    475 	}
    476 
    477 	/* Read the firmware. */
    478 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    479 	if (fw->fw_rawdata == NULL) {
    480 		aprint_error_dev(sc->sc_dev,
    481 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    482 		error = ENOMEM;
    483 		goto out;
    484 	}
    485 	error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    486 	if (error) {
    487 		aprint_error_dev(sc->sc_dev,
    488 		    "could not read firmware %s\n", sc->sc_fwname);
    489 		goto out;
    490 	}
    491 
    492  out:
    493 	/* caller will release memory, if necessary */
    494 
    495 	firmware_close(fwh);
    496 	return error;
    497 }
    498 
    499 /*
    500  * just maintaining status quo.
    501  */
    502 static void
    503 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
    504 {
    505 	struct ieee80211_frame *wh;
    506 	uint8_t subtype;
    507 	uint8_t *frm, *efrm;
    508 
    509 	wh = mtod(m, struct ieee80211_frame *);
    510 
    511 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    512 		return;
    513 
    514 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    515 
    516 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    517 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    518 		return;
    519 
    520 	frm = (uint8_t *)(wh + 1);
    521 	efrm = mtod(m, uint8_t *) + m->m_len;
    522 
    523 	frm += 12;      /* skip tstamp, bintval and capinfo fields */
    524 	while (frm < efrm) {
    525 		if (*frm == IEEE80211_ELEMID_DSPARMS) {
    526 #if IEEE80211_CHAN_MAX < 255
    527 			if (frm[2] <= IEEE80211_CHAN_MAX)
    528 #endif
    529 				ic->ic_curchan = &ic->ic_channels[frm[2]];
    530 		}
    531 		frm += frm[1] + 2;
    532 	}
    533 }
    534 
    535 /*
    536  * Firmware parser.
    537  */
    538 
    539 static int
    540 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    541 {
    542 	struct iwm_fw_cscheme_list *l = (void *)data;
    543 
    544 	if (dlen < sizeof(*l) ||
    545 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    546 		return EINVAL;
    547 
    548 	/* we don't actually store anything for now, always use s/w crypto */
    549 
    550 	return 0;
    551 }
    552 
    553 static int
    554 iwm_firmware_store_section(struct iwm_softc *sc,
    555 	enum iwm_ucode_type type, uint8_t *data, size_t dlen)
    556 {
    557 	struct iwm_fw_sects *fws;
    558 	struct iwm_fw_onesect *fwone;
    559 
    560 	if (type >= IWM_UCODE_TYPE_MAX)
    561 		return EINVAL;
    562 	if (dlen < sizeof(uint32_t))
    563 		return EINVAL;
    564 
    565 	fws = &sc->sc_fw.fw_sects[type];
    566 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    567 		return EINVAL;
    568 
    569 	fwone = &fws->fw_sect[fws->fw_count];
    570 
    571 	/* first 32bit are device load offset */
    572 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    573 
    574 	/* rest is data */
    575 	fwone->fws_data = data + sizeof(uint32_t);
    576 	fwone->fws_len = dlen - sizeof(uint32_t);
    577 
    578 	/* for freeing the buffer during driver unload */
    579 	fwone->fws_alloc = data;
    580 	fwone->fws_allocsize = dlen;
    581 
    582 	fws->fw_count++;
    583 	fws->fw_totlen += fwone->fws_len;
    584 
    585 	return 0;
    586 }
    587 
    588 /* iwlwifi: iwl-drv.c */
    589 struct iwm_tlv_calib_data {
    590 	uint32_t ucode_type;
    591 	struct iwm_tlv_calib_ctrl calib;
    592 } __packed;
    593 
    594 static int
    595 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    596 {
    597 	const struct iwm_tlv_calib_data *def_calib = data;
    598 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    599 
    600 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    601 		DPRINTF(("%s: Wrong ucode_type %u for default "
    602 		    "calibration.\n", DEVNAME(sc), ucode_type));
    603 		return EINVAL;
    604 	}
    605 
    606 	sc->sc_default_calib[ucode_type].flow_trigger =
    607 	    def_calib->calib.flow_trigger;
    608 	sc->sc_default_calib[ucode_type].event_trigger =
    609 	    def_calib->calib.event_trigger;
    610 
    611 	return 0;
    612 }
    613 
    614 static int
    615 iwm_read_firmware(struct iwm_softc *sc)
    616 {
    617 	struct iwm_fw_info *fw = &sc->sc_fw;
    618 	struct iwm_tlv_ucode_header *uhdr;
    619 	struct iwm_ucode_tlv tlv;
    620 	enum iwm_ucode_tlv_type tlv_type;
    621 	uint8_t *data;
    622 	int error, status;
    623 	size_t len;
    624 
    625 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    626 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    627 	} else {
    628 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    629 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    630 	}
    631 	status = fw->fw_status;
    632 
    633 	if (status == IWM_FW_STATUS_DONE)
    634 		return 0;
    635 
    636 	/*
    637 	 * Load firmware into driver memory.
    638 	 * fw_rawdata and fw_rawsize will be set.
    639 	 */
    640 	error = iwm_firmload(sc);
    641 	if (error != 0) {
    642 		aprint_error_dev(sc->sc_dev,
    643 		    "could not read firmware %s (error %d)\n",
    644 		    sc->sc_fwname, error);
    645 		goto out;
    646 	}
    647 
    648 	/*
    649 	 * Parse firmware contents
    650 	 */
    651 
    652 	uhdr = (void *)fw->fw_rawdata;
    653 	if (*(uint32_t *)fw->fw_rawdata != 0
    654 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    655 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    656 		    sc->sc_fwname);
    657 		error = EINVAL;
    658 		goto out;
    659 	}
    660 
    661 	sc->sc_fwver = le32toh(uhdr->ver);
    662 	data = uhdr->data;
    663 	len = fw->fw_rawsize - sizeof(*uhdr);
    664 
    665 	while (len >= sizeof(tlv)) {
    666 		size_t tlv_len;
    667 		void *tlv_data;
    668 
    669 		memcpy(&tlv, data, sizeof(tlv));
    670 		tlv_len = le32toh(tlv.length);
    671 		tlv_type = le32toh(tlv.type);
    672 
    673 		len -= sizeof(tlv);
    674 		data += sizeof(tlv);
    675 		tlv_data = data;
    676 
    677 		if (len < tlv_len) {
    678 			aprint_error_dev(sc->sc_dev,
    679 			    "firmware too short: %zu bytes\n", len);
    680 			error = EINVAL;
    681 			goto parse_out;
    682 		}
    683 
    684 		switch ((int)tlv_type) {
    685 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    686 			if (tlv_len < sizeof(uint32_t)) {
    687 				error = EINVAL;
    688 				goto parse_out;
    689 			}
    690 			sc->sc_capa_max_probe_len
    691 			    = le32toh(*(uint32_t *)tlv_data);
    692 			/* limit it to something sensible */
    693 			if (sc->sc_capa_max_probe_len > (1<<16)) {
    694 				DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
    695 				    "ridiculous\n", DEVNAME(sc)));
    696 				error = EINVAL;
    697 				goto parse_out;
    698 			}
    699 			break;
    700 		case IWM_UCODE_TLV_PAN:
    701 			if (tlv_len) {
    702 				error = EINVAL;
    703 				goto parse_out;
    704 			}
    705 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    706 			break;
    707 		case IWM_UCODE_TLV_FLAGS:
    708 			if (tlv_len < sizeof(uint32_t)) {
    709 				error = EINVAL;
    710 				goto parse_out;
    711 			}
    712 			/*
    713 			 * Apparently there can be many flags, but Linux driver
    714 			 * parses only the first one, and so do we.
    715 			 *
    716 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    717 			 * Intentional or a bug?  Observations from
    718 			 * current firmware file:
    719 			 *  1) TLV_PAN is parsed first
    720 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    721 			 * ==> this resets TLV_PAN to itself... hnnnk
    722 			 */
    723 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    724 			break;
    725 		case IWM_UCODE_TLV_CSCHEME:
    726 			if ((error = iwm_store_cscheme(sc,
    727 			    tlv_data, tlv_len)) != 0)
    728 				goto parse_out;
    729 			break;
    730 		case IWM_UCODE_TLV_NUM_OF_CPU:
    731 			if (tlv_len != sizeof(uint32_t)) {
    732 				error = EINVAL;
    733 				goto parse_out;
    734 			}
    735 			if (le32toh(*(uint32_t*)tlv_data) != 1) {
    736 				DPRINTF(("%s: driver supports "
    737 				    "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
    738 				error = EINVAL;
    739 				goto parse_out;
    740 			}
    741 			break;
    742 		case IWM_UCODE_TLV_SEC_RT:
    743 			if ((error = iwm_firmware_store_section(sc,
    744 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
    745 				goto parse_out;
    746 			break;
    747 		case IWM_UCODE_TLV_SEC_INIT:
    748 			if ((error = iwm_firmware_store_section(sc,
    749 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
    750 				goto parse_out;
    751 			break;
    752 		case IWM_UCODE_TLV_SEC_WOWLAN:
    753 			if ((error = iwm_firmware_store_section(sc,
    754 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
    755 				goto parse_out;
    756 			break;
    757 		case IWM_UCODE_TLV_DEF_CALIB:
    758 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    759 				error = EINVAL;
    760 				goto parse_out;
    761 			}
    762 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
    763 				goto parse_out;
    764 			break;
    765 		case IWM_UCODE_TLV_PHY_SKU:
    766 			if (tlv_len != sizeof(uint32_t)) {
    767 				error = EINVAL;
    768 				goto parse_out;
    769 			}
    770 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    771 			break;
    772 
    773 		case IWM_UCODE_TLV_API_CHANGES_SET:
    774 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
    775 			/* ignore, not used by current driver */
    776 			break;
    777 
    778 		default:
    779 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    780 			    DEVNAME(sc), tlv_type));
    781 			error = EINVAL;
    782 			goto parse_out;
    783 		}
    784 
    785 		len -= roundup(tlv_len, 4);
    786 		data += roundup(tlv_len, 4);
    787 	}
    788 
    789 	KASSERT(error == 0);
    790 
    791  parse_out:
    792 	if (error) {
    793 		aprint_error_dev(sc->sc_dev,
    794 		    "firmware parse error, section type %d\n", tlv_type);
    795 	}
    796 
    797 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    798 		aprint_error_dev(sc->sc_dev,
    799 		    "device uses unsupported power ops\n");
    800 		error = ENOTSUP;
    801 	}
    802 
    803  out:
    804 	if (error)
    805 		fw->fw_status = IWM_FW_STATUS_NONE;
    806 	else
    807 		fw->fw_status = IWM_FW_STATUS_DONE;
    808 	wakeup(&sc->sc_fw);
    809 
    810 	if (error) {
    811 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    812 		fw->fw_rawdata = NULL;
    813 	}
    814 	return error;
    815 }
    816 
    817 /*
    818  * basic device access
    819  */
    820 
    821 static uint32_t
    822 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    823 {
    824 	IWM_WRITE(sc,
    825 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    826 	IWM_BARRIER_READ_WRITE(sc);
    827 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    828 }
    829 
    830 static void
    831 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    832 {
    833 	IWM_WRITE(sc,
    834 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    835 	IWM_BARRIER_WRITE(sc);
    836 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    837 }
    838 
    839 #ifdef IWM_DEBUG
    840 /* iwlwifi: pcie/trans.c */
    841 static int
    842 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    843 {
    844 	int offs, ret = 0;
    845 	uint32_t *vals = buf;
    846 
    847 	if (iwm_nic_lock(sc)) {
    848 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    849 		for (offs = 0; offs < dwords; offs++)
    850 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    851 		iwm_nic_unlock(sc);
    852 	} else {
    853 		ret = EBUSY;
    854 	}
    855 	return ret;
    856 }
    857 #endif
    858 
    859 /* iwlwifi: pcie/trans.c */
    860 static int
    861 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    862 {
    863 	int offs;
    864 	const uint32_t *vals = buf;
    865 
    866 	if (iwm_nic_lock(sc)) {
    867 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    868 		/* WADDR auto-increments */
    869 		for (offs = 0; offs < dwords; offs++) {
    870 			uint32_t val = vals ? vals[offs] : 0;
    871 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    872 		}
    873 		iwm_nic_unlock(sc);
    874 	} else {
    875 		DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
    876 		return EBUSY;
    877 	}
    878 	return 0;
    879 }
    880 
    881 static int
    882 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    883 {
    884 	return iwm_write_mem(sc, addr, &val, 1);
    885 }
    886 
    887 static int
    888 iwm_poll_bit(struct iwm_softc *sc, int reg,
    889 	uint32_t bits, uint32_t mask, int timo)
    890 {
    891 	for (;;) {
    892 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
    893 			return 1;
    894 		}
    895 		if (timo < 10) {
    896 			return 0;
    897 		}
    898 		timo -= 10;
    899 		DELAY(10);
    900 	}
    901 }
    902 
    903 static int
    904 iwm_nic_lock(struct iwm_softc *sc)
    905 {
    906 	int rv = 0;
    907 
    908 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
    909 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    910 
    911 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
    912 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
    913 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
    914 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
    915 	    	rv = 1;
    916 	} else {
    917 		/* jolt */
    918 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
    919 	}
    920 
    921 	return rv;
    922 }
    923 
    924 static void
    925 iwm_nic_unlock(struct iwm_softc *sc)
    926 {
    927 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
    928 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    929 }
    930 
    931 static void
    932 iwm_set_bits_mask_prph(struct iwm_softc *sc,
    933 	uint32_t reg, uint32_t bits, uint32_t mask)
    934 {
    935 	uint32_t val;
    936 
    937 	/* XXX: no error path? */
    938 	if (iwm_nic_lock(sc)) {
    939 		val = iwm_read_prph(sc, reg) & mask;
    940 		val |= bits;
    941 		iwm_write_prph(sc, reg, val);
    942 		iwm_nic_unlock(sc);
    943 	}
    944 }
    945 
    946 static void
    947 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    948 {
    949 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
    950 }
    951 
    952 static void
    953 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    954 {
    955 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
    956 }
    957 
    958 /*
    959  * DMA resource routines
    960  */
    961 
    962 static int
    963 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
    964     bus_size_t size, bus_size_t alignment)
    965 {
    966 	int nsegs, error;
    967 	void *va;
    968 
    969 	dma->tag = tag;
    970 	dma->size = size;
    971 
    972 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
    973 	    &dma->map);
    974 	if (error != 0)
    975 		goto fail;
    976 
    977 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
    978 	    BUS_DMA_NOWAIT);
    979 	if (error != 0)
    980 		goto fail;
    981 
    982 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
    983 	    BUS_DMA_NOWAIT);
    984 	if (error != 0)
    985 		goto fail;
    986 	dma->vaddr = va;
    987 
    988 	error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
    989 	    BUS_DMA_NOWAIT);
    990 	if (error != 0)
    991 		goto fail;
    992 
    993 	memset(dma->vaddr, 0, size);
    994 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
    995 	dma->paddr = dma->map->dm_segs[0].ds_addr;
    996 
    997 	return 0;
    998 
    999 fail:	iwm_dma_contig_free(dma);
   1000 	return error;
   1001 }
   1002 
   1003 static void
   1004 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1005 {
   1006 	if (dma->map != NULL) {
   1007 		if (dma->vaddr != NULL) {
   1008 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1009 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1010 			bus_dmamap_unload(dma->tag, dma->map);
   1011 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1012 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1013 			dma->vaddr = NULL;
   1014 		}
   1015 		bus_dmamap_destroy(dma->tag, dma->map);
   1016 		dma->map = NULL;
   1017 	}
   1018 }
   1019 
   1020 /* fwmem is used to load firmware onto the card */
   1021 static int
   1022 iwm_alloc_fwmem(struct iwm_softc *sc)
   1023 {
   1024 	/* Must be aligned on a 16-byte boundary. */
   1025 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
   1026 	    sc->sc_fwdmasegsz, 16);
   1027 }
   1028 
   1029 static void
   1030 iwm_free_fwmem(struct iwm_softc *sc)
   1031 {
   1032 	iwm_dma_contig_free(&sc->fw_dma);
   1033 }
   1034 
   1035 /* tx scheduler rings.  not used? */
   1036 static int
   1037 iwm_alloc_sched(struct iwm_softc *sc)
   1038 {
   1039 	int rv;
   1040 
   1041 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   1042 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   1043 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   1044 	return rv;
   1045 }
   1046 
   1047 static void
   1048 iwm_free_sched(struct iwm_softc *sc)
   1049 {
   1050 	iwm_dma_contig_free(&sc->sched_dma);
   1051 }
   1052 
   1053 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
   1054 static int
   1055 iwm_alloc_kw(struct iwm_softc *sc)
   1056 {
   1057 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   1058 }
   1059 
   1060 static void
   1061 iwm_free_kw(struct iwm_softc *sc)
   1062 {
   1063 	iwm_dma_contig_free(&sc->kw_dma);
   1064 }
   1065 
   1066 /* interrupt cause table */
   1067 static int
   1068 iwm_alloc_ict(struct iwm_softc *sc)
   1069 {
   1070 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
   1071 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
   1072 }
   1073 
   1074 static void
   1075 iwm_free_ict(struct iwm_softc *sc)
   1076 {
   1077 	iwm_dma_contig_free(&sc->ict_dma);
   1078 }
   1079 
   1080 static int
   1081 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1082 {
   1083 	bus_size_t size;
   1084 	int i, error;
   1085 
   1086 	ring->cur = 0;
   1087 
   1088 	/* Allocate RX descriptors (256-byte aligned). */
   1089 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1090 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1091 	if (error != 0) {
   1092 		aprint_error_dev(sc->sc_dev,
   1093 		    "could not allocate RX ring DMA memory\n");
   1094 		goto fail;
   1095 	}
   1096 	ring->desc = ring->desc_dma.vaddr;
   1097 
   1098 	/* Allocate RX status area (16-byte aligned). */
   1099 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1100 	    sizeof(*ring->stat), 16);
   1101 	if (error != 0) {
   1102 		aprint_error_dev(sc->sc_dev,
   1103 		    "could not allocate RX status DMA memory\n");
   1104 		goto fail;
   1105 	}
   1106 	ring->stat = ring->stat_dma.vaddr;
   1107 
   1108 	/*
   1109 	 * Allocate and map RX buffers.
   1110 	 */
   1111 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1112 		struct iwm_rx_data *data = &ring->data[i];
   1113 
   1114 		memset(data, 0, sizeof(*data));
   1115 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1116 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1117 		    &data->map);
   1118 		if (error != 0) {
   1119 			aprint_error_dev(sc->sc_dev,
   1120 			    "could not create RX buf DMA map\n");
   1121 			goto fail;
   1122 		}
   1123 
   1124 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
   1125 			goto fail;
   1126 		}
   1127 	}
   1128 	return 0;
   1129 
   1130 fail:	iwm_free_rx_ring(sc, ring);
   1131 	return error;
   1132 }
   1133 
   1134 static void
   1135 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1136 {
   1137 	int ntries;
   1138 
   1139 	if (iwm_nic_lock(sc)) {
   1140 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1141 		for (ntries = 0; ntries < 1000; ntries++) {
   1142 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1143 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1144 				break;
   1145 			DELAY(10);
   1146 		}
   1147 		iwm_nic_unlock(sc);
   1148 	}
   1149 	ring->cur = 0;
   1150 }
   1151 
   1152 static void
   1153 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1154 {
   1155 	int i;
   1156 
   1157 	iwm_dma_contig_free(&ring->desc_dma);
   1158 	iwm_dma_contig_free(&ring->stat_dma);
   1159 
   1160 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1161 		struct iwm_rx_data *data = &ring->data[i];
   1162 
   1163 		if (data->m != NULL) {
   1164 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1165 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1166 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1167 			m_freem(data->m);
   1168 		}
   1169 		if (data->map != NULL)
   1170 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1171 	}
   1172 }
   1173 
   1174 static int
   1175 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1176 {
   1177 	bus_addr_t paddr;
   1178 	bus_size_t size;
   1179 	int i, error;
   1180 
   1181 	ring->qid = qid;
   1182 	ring->queued = 0;
   1183 	ring->cur = 0;
   1184 
   1185 	/* Allocate TX descriptors (256-byte aligned). */
   1186 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1187 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1188 	if (error != 0) {
   1189 		aprint_error_dev(sc->sc_dev,
   1190 		    "could not allocate TX ring DMA memory\n");
   1191 		goto fail;
   1192 	}
   1193 	ring->desc = ring->desc_dma.vaddr;
   1194 
   1195 	/*
   1196 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1197 	 * to allocate commands space for other rings.
   1198 	 */
   1199 	if (qid > IWM_MVM_CMD_QUEUE)
   1200 		return 0;
   1201 
   1202 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1203 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1204 	if (error != 0) {
   1205 		aprint_error_dev(sc->sc_dev,
   1206 		    "could not allocate TX cmd DMA memory\n");
   1207 		goto fail;
   1208 	}
   1209 	ring->cmd = ring->cmd_dma.vaddr;
   1210 
   1211 	paddr = ring->cmd_dma.paddr;
   1212 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1213 		struct iwm_tx_data *data = &ring->data[i];
   1214 
   1215 		data->cmd_paddr = paddr;
   1216 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1217 		    + offsetof(struct iwm_tx_cmd, scratch);
   1218 		paddr += sizeof(struct iwm_device_cmd);
   1219 
   1220 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
   1221 		    IWM_NUM_OF_TBS, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
   1222 		    &data->map);
   1223 		if (error != 0) {
   1224 			aprint_error_dev(sc->sc_dev,
   1225 			    "could not create TX buf DMA map\n");
   1226 			goto fail;
   1227 		}
   1228 	}
   1229 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1230 	return 0;
   1231 
   1232 fail:	iwm_free_tx_ring(sc, ring);
   1233 	return error;
   1234 }
   1235 
   1236 static void
   1237 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1238 {
   1239 	int i;
   1240 
   1241 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1242 		struct iwm_tx_data *data = &ring->data[i];
   1243 
   1244 		if (data->m != NULL) {
   1245 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1246 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1247 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1248 			m_freem(data->m);
   1249 			data->m = NULL;
   1250 		}
   1251 	}
   1252 	/* Clear TX descriptors. */
   1253 	memset(ring->desc, 0, ring->desc_dma.size);
   1254 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1255 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1256 	sc->qfullmsk &= ~(1 << ring->qid);
   1257 	ring->queued = 0;
   1258 	ring->cur = 0;
   1259 }
   1260 
   1261 static void
   1262 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1263 {
   1264 	int i;
   1265 
   1266 	iwm_dma_contig_free(&ring->desc_dma);
   1267 	iwm_dma_contig_free(&ring->cmd_dma);
   1268 
   1269 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1270 		struct iwm_tx_data *data = &ring->data[i];
   1271 
   1272 		if (data->m != NULL) {
   1273 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1274 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1275 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1276 			m_freem(data->m);
   1277 		}
   1278 		if (data->map != NULL)
   1279 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1280 	}
   1281 }
   1282 
   1283 /*
   1284  * High-level hardware frobbing routines
   1285  */
   1286 
   1287 static void
   1288 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1289 {
   1290 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1291 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1292 }
   1293 
   1294 static int
   1295 iwm_check_rfkill(struct iwm_softc *sc)
   1296 {
   1297 	uint32_t v;
   1298 	int s;
   1299 	int rv;
   1300 
   1301 	s = splnet();
   1302 
   1303 	/*
   1304 	 * "documentation" is not really helpful here:
   1305 	 *  27:	HW_RF_KILL_SW
   1306 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1307 	 *
   1308 	 * But apparently when it's off, it's on ...
   1309 	 */
   1310 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1311 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1312 	if (rv) {
   1313 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1314 	} else {
   1315 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1316 	}
   1317 
   1318 	splx(s);
   1319 	return rv;
   1320 }
   1321 
   1322 static void
   1323 iwm_enable_interrupts(struct iwm_softc *sc)
   1324 {
   1325 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1326 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1327 }
   1328 
   1329 static void
   1330 iwm_restore_interrupts(struct iwm_softc *sc)
   1331 {
   1332 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1333 }
   1334 
   1335 static void
   1336 iwm_disable_interrupts(struct iwm_softc *sc)
   1337 {
   1338 	int s = splnet();
   1339 
   1340 	/* disable interrupts */
   1341 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1342 
   1343 	/* acknowledge all interrupts */
   1344 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1345 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1346 
   1347 	splx(s);
   1348 }
   1349 
   1350 static void
   1351 iwm_ict_reset(struct iwm_softc *sc)
   1352 {
   1353 	iwm_disable_interrupts(sc);
   1354 
   1355 	/* Reset ICT table. */
   1356 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1357 	sc->ict_cur = 0;
   1358 
   1359 	/* Set physical address of ICT table (4KB aligned). */
   1360 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1361 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1362 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1363 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1364 
   1365 	/* Switch to ICT interrupt mode in driver. */
   1366 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1367 
   1368 	/* Re-enable interrupts. */
   1369 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1370 	iwm_enable_interrupts(sc);
   1371 }
   1372 
   1373 #define IWM_HW_READY_TIMEOUT 50
   1374 static int
   1375 iwm_set_hw_ready(struct iwm_softc *sc)
   1376 {
   1377 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1378 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1379 
   1380 	return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1381 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1382 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1383 	    IWM_HW_READY_TIMEOUT);
   1384 }
   1385 #undef IWM_HW_READY_TIMEOUT
   1386 
   1387 static int
   1388 iwm_prepare_card_hw(struct iwm_softc *sc)
   1389 {
   1390 	int rv = 0;
   1391 	int t = 0;
   1392 
   1393 	if (iwm_set_hw_ready(sc))
   1394 		goto out;
   1395 
   1396 	/* If HW is not ready, prepare the conditions to check again */
   1397 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1398 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1399 
   1400 	do {
   1401 		if (iwm_set_hw_ready(sc))
   1402 			goto out;
   1403 		DELAY(200);
   1404 		t += 200;
   1405 	} while (t < 150000);
   1406 
   1407 	rv = ETIMEDOUT;
   1408 
   1409  out:
   1410 	return rv;
   1411 }
   1412 
   1413 static void
   1414 iwm_apm_config(struct iwm_softc *sc)
   1415 {
   1416 	pcireg_t reg;
   1417 
   1418 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1419 	    sc->sc_cap_off + PCIE_LCSR);
   1420 	if (reg & PCIE_LCSR_ASPM_L1) {
   1421 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1422 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1423 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1424 	} else {
   1425 		/* ... and "Enabling" here */
   1426 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1427 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1428 	}
   1429 }
   1430 
   1431 /*
   1432  * Start up NIC's basic functionality after it has been reset
   1433  * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
   1434  * NOTE:  This does not load uCode nor start the embedded processor
   1435  */
   1436 static int
   1437 iwm_apm_init(struct iwm_softc *sc)
   1438 {
   1439 	int error = 0;
   1440 
   1441 	DPRINTF(("iwm apm start\n"));
   1442 
   1443 	/* Disable L0S exit timer (platform NMI Work/Around) */
   1444 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1445 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1446 
   1447 	/*
   1448 	 * Disable L0s without affecting L1;
   1449 	 *  don't wait for ICH L0s (ICH bug W/A)
   1450 	 */
   1451 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1452 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1453 
   1454 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1455 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1456 
   1457 	/*
   1458 	 * Enable HAP INTA (interrupt from management bus) to
   1459 	 * wake device's PCI Express link L1a -> L0s
   1460 	 */
   1461 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1462 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1463 
   1464 	iwm_apm_config(sc);
   1465 
   1466 #if 0 /* not for 7k */
   1467 	/* Configure analog phase-lock-loop before activating to D0A */
   1468 	if (trans->cfg->base_params->pll_cfg_val)
   1469 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1470 		    trans->cfg->base_params->pll_cfg_val);
   1471 #endif
   1472 
   1473 	/*
   1474 	 * Set "initialization complete" bit to move adapter from
   1475 	 * D0U* --> D0A* (powered-up active) state.
   1476 	 */
   1477 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1478 
   1479 	/*
   1480 	 * Wait for clock stabilization; once stabilized, access to
   1481 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1482 	 * and accesses to uCode SRAM.
   1483 	 */
   1484 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1485 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1486 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1487 		aprint_error_dev(sc->sc_dev,
   1488 		    "timeout waiting for clock stabilization\n");
   1489 		goto out;
   1490 	}
   1491 
   1492 	if (sc->host_interrupt_operation_mode) {
   1493 		/*
   1494 		 * This is a bit of an abuse - This is needed for 7260 / 3160
   1495 		 * only check host_interrupt_operation_mode even if this is
   1496 		 * not related to host_interrupt_operation_mode.
   1497 		 *
   1498 		 * Enable the oscillator to count wake up time for L1 exit. This
   1499 		 * consumes slightly more power (100uA) - but allows to be sure
   1500 		 * that we wake up from L1 on time.
   1501 		 *
   1502 		 * This looks weird: read twice the same register, discard the
   1503 		 * value, set a bit, and yet again, read that same register
   1504 		 * just to discard the value. But that's the way the hardware
   1505 		 * seems to like it.
   1506 		 */
   1507 		iwm_read_prph(sc, IWM_OSC_CLK);
   1508 		iwm_read_prph(sc, IWM_OSC_CLK);
   1509 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1510 		iwm_read_prph(sc, IWM_OSC_CLK);
   1511 		iwm_read_prph(sc, IWM_OSC_CLK);
   1512 	}
   1513 
   1514 	/*
   1515 	 * Enable DMA clock and wait for it to stabilize.
   1516 	 *
   1517 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1518 	 * do not disable clocks.  This preserves any hardware bits already
   1519 	 * set by default in "CLK_CTRL_REG" after reset.
   1520 	 */
   1521 	iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1522 	//kpause("iwmapm", 0, mstohz(20), NULL);
   1523 	DELAY(20);
   1524 
   1525 	/* Disable L1-Active */
   1526 	iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1527 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1528 
   1529 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1530 	iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1531 	    IWM_APMG_RTC_INT_STT_RFKILL);
   1532 
   1533  out:
   1534 	if (error)
   1535 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
   1536 	return error;
   1537 }
   1538 
   1539 /* iwlwifi/pcie/trans.c */
   1540 static void
   1541 iwm_apm_stop(struct iwm_softc *sc)
   1542 {
   1543 	/* stop device's busmaster DMA activity */
   1544 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1545 
   1546 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1547 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1548 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1549 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1550 	DPRINTF(("iwm apm stop\n"));
   1551 }
   1552 
   1553 /* iwlwifi pcie/trans.c */
   1554 static int
   1555 iwm_start_hw(struct iwm_softc *sc)
   1556 {
   1557 	int error;
   1558 
   1559 	if ((error = iwm_prepare_card_hw(sc)) != 0)
   1560 		return error;
   1561 
   1562 	/* Reset the entire device */
   1563 	IWM_WRITE(sc, IWM_CSR_RESET,
   1564 	    IWM_CSR_RESET_REG_FLAG_SW_RESET |
   1565 	    IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1566 	DELAY(10);
   1567 
   1568 	if ((error = iwm_apm_init(sc)) != 0)
   1569 		return error;
   1570 
   1571 	iwm_enable_rfkill_int(sc);
   1572 	iwm_check_rfkill(sc);
   1573 
   1574 	return 0;
   1575 }
   1576 
   1577 /* iwlwifi pcie/trans.c */
   1578 
   1579 static void
   1580 iwm_stop_device(struct iwm_softc *sc)
   1581 {
   1582 	int chnl, ntries;
   1583 	int qid;
   1584 
   1585 	/* tell the device to stop sending interrupts */
   1586 	iwm_disable_interrupts(sc);
   1587 
   1588 	/* device going down, Stop using ICT table */
   1589 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1590 
   1591 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
   1592 
   1593 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1594 
   1595 	/* Stop all DMA channels. */
   1596 	if (iwm_nic_lock(sc)) {
   1597 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1598 			IWM_WRITE(sc,
   1599 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1600 			for (ntries = 0; ntries < 200; ntries++) {
   1601 				uint32_t r;
   1602 
   1603 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1604 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1605 				    chnl))
   1606 					break;
   1607 				DELAY(20);
   1608 			}
   1609 		}
   1610 		iwm_nic_unlock(sc);
   1611 	}
   1612 
   1613 	/* Stop RX ring. */
   1614 	iwm_reset_rx_ring(sc, &sc->rxq);
   1615 
   1616 	/* Reset all TX rings. */
   1617 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1618 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1619 
   1620 	/*
   1621 	 * Power-down device's busmaster DMA clocks
   1622 	 */
   1623 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1624 	DELAY(5);
   1625 
   1626 	/* Make sure (redundant) we've released our request to stay awake */
   1627 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1628 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1629 
   1630 	/* Stop the device, and put it in low power state */
   1631 	iwm_apm_stop(sc);
   1632 
   1633 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
   1634 	 * Clean again the interrupt here
   1635 	 */
   1636 	iwm_disable_interrupts(sc);
   1637 	/* stop and reset the on-board processor */
   1638 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1639 
   1640 	/*
   1641 	 * Even if we stop the HW, we still want the RF kill
   1642 	 * interrupt
   1643 	 */
   1644 	iwm_enable_rfkill_int(sc);
   1645 	iwm_check_rfkill(sc);
   1646 }
   1647 
   1648 /* iwlwifi pcie/trans.c (always main power) */
   1649 static void
   1650 iwm_set_pwr(struct iwm_softc *sc)
   1651 {
   1652 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1653 	    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1654 }
   1655 
   1656 /* iwlwifi: mvm/ops.c */
   1657 static void
   1658 iwm_mvm_nic_config(struct iwm_softc *sc)
   1659 {
   1660 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1661 	uint32_t reg_val = 0;
   1662 
   1663 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1664 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1665 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1666 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1667 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1668 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1669 
   1670 	/* SKU control */
   1671 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1672 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1673 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1674 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1675 
   1676 	/* radio configuration */
   1677 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1678 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1679 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1680 
   1681 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1682 
   1683 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1684 	    radio_cfg_step, radio_cfg_dash));
   1685 
   1686 	/*
   1687 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1688 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1689 	 * to lose ownership and not being able to obtain it back.
   1690 	 */
   1691 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1692 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1693 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1694 }
   1695 
   1696 static int
   1697 iwm_nic_rx_init(struct iwm_softc *sc)
   1698 {
   1699 	if (!iwm_nic_lock(sc))
   1700 		return EBUSY;
   1701 
   1702 	/*
   1703 	 * Initialize RX ring.  This is from the iwn driver.
   1704 	 */
   1705 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1706 
   1707 	/* stop DMA */
   1708 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1709 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1710 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1711 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1712 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1713 
   1714 	/* Set physical address of RX ring (256-byte aligned). */
   1715 	IWM_WRITE(sc,
   1716 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1717 
   1718 	/* Set physical address of RX status (16-byte aligned). */
   1719 	IWM_WRITE(sc,
   1720 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1721 
   1722 	/* Enable RX. */
   1723 	/*
   1724 	 * Note: Linux driver also sets this:
   1725 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1726 	 *
   1727 	 * It causes weird behavior.  YMMV.
   1728 	 */
   1729 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1730 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1731 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1732 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1733 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1734 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1735 
   1736 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1737 
   1738 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
   1739 	if (sc->host_interrupt_operation_mode)
   1740 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1741 
   1742 	/*
   1743 	 * Thus sayeth el jefe (iwlwifi) via a comment:
   1744 	 *
   1745 	 * This value should initially be 0 (before preparing any
   1746  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
   1747 	 */
   1748 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1749 
   1750 	iwm_nic_unlock(sc);
   1751 
   1752 	return 0;
   1753 }
   1754 
   1755 static int
   1756 iwm_nic_tx_init(struct iwm_softc *sc)
   1757 {
   1758 	int qid;
   1759 
   1760 	if (!iwm_nic_lock(sc))
   1761 		return EBUSY;
   1762 
   1763 	/* Deactivate TX scheduler. */
   1764 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1765 
   1766 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1767 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1768 
   1769 	/* Initialize TX rings. */
   1770 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1771 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1772 
   1773 		/* Set physical address of TX ring (256-byte aligned). */
   1774 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1775 		    txq->desc_dma.paddr >> 8);
   1776 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
   1777 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
   1778 	}
   1779 	iwm_nic_unlock(sc);
   1780 
   1781 	return 0;
   1782 }
   1783 
   1784 static int
   1785 iwm_nic_init(struct iwm_softc *sc)
   1786 {
   1787 	int error;
   1788 
   1789 	iwm_apm_init(sc);
   1790 	iwm_set_pwr(sc);
   1791 
   1792 	iwm_mvm_nic_config(sc);
   1793 
   1794 	if ((error = iwm_nic_rx_init(sc)) != 0)
   1795 		return error;
   1796 
   1797 	/*
   1798 	 * Ditto for TX, from iwn
   1799 	 */
   1800 	if ((error = iwm_nic_tx_init(sc)) != 0)
   1801 		return error;
   1802 
   1803 	DPRINTF(("shadow registers enabled\n"));
   1804 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1805 
   1806 	return 0;
   1807 }
   1808 
   1809 #if 0
   1810 enum iwm_mvm_tx_fifo {
   1811 	IWM_MVM_TX_FIFO_BK = 0,
   1812 	IWM_MVM_TX_FIFO_BE,
   1813 	IWM_MVM_TX_FIFO_VI,
   1814 	IWM_MVM_TX_FIFO_VO,
   1815 	IWM_MVM_TX_FIFO_MCAST = 5,
   1816 };
   1817 
   1818 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
   1819 	IWM_MVM_TX_FIFO_VO,
   1820 	IWM_MVM_TX_FIFO_VI,
   1821 	IWM_MVM_TX_FIFO_BE,
   1822 	IWM_MVM_TX_FIFO_BK,
   1823 };
   1824 #endif
   1825 
   1826 static void
   1827 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
   1828 {
   1829 	if (!iwm_nic_lock(sc)) {
   1830 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1831 		return; /* XXX return EBUSY */
   1832 	}
   1833 
   1834 	/* unactivate before configuration */
   1835 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1836 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1837 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1838 
   1839 	if (qid != IWM_MVM_CMD_QUEUE) {
   1840 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
   1841 	}
   1842 
   1843 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1844 
   1845 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1846 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1847 
   1848 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1849 	/* Set scheduler window size and frame limit. */
   1850 	iwm_write_mem32(sc,
   1851 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1852 	    sizeof(uint32_t),
   1853 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1854 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1855 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1856 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1857 
   1858 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1859 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1860 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1861 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1862 	    IWM_SCD_QUEUE_STTS_REG_MSK);
   1863 
   1864 	iwm_nic_unlock(sc);
   1865 
   1866 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1867 }
   1868 
   1869 static int
   1870 iwm_post_alive(struct iwm_softc *sc)
   1871 {
   1872 	int nwords;
   1873 	int error, chnl;
   1874 
   1875 	if (!iwm_nic_lock(sc))
   1876 		return EBUSY;
   1877 
   1878 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
   1879 		DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
   1880 		error = EINVAL;
   1881 		goto out;
   1882 	}
   1883 
   1884 	iwm_ict_reset(sc);
   1885 
   1886 	/* Clear TX scheduler state in SRAM. */
   1887 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1888 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1889 	    / sizeof(uint32_t);
   1890 	error = iwm_write_mem(sc,
   1891 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1892 	    NULL, nwords);
   1893 	if (error)
   1894 		goto out;
   1895 
   1896 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1897 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1898 
   1899 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1900 
   1901 	/* enable command channel */
   1902 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
   1903 
   1904 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1905 
   1906 	/* Enable DMA channels. */
   1907 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1908 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1909 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1910 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1911 	}
   1912 
   1913 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1914 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1915 
   1916 	/* Enable L1-Active */
   1917 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1918 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1919 
   1920  out:
   1921  	iwm_nic_unlock(sc);
   1922 	return error;
   1923 }
   1924 
   1925 /*
   1926  * PHY db
   1927  * iwlwifi/iwl-phy-db.c
   1928  */
   1929 
   1930 /*
   1931  * BEGIN iwl-phy-db.c
   1932  */
   1933 
   1934 enum iwm_phy_db_section_type {
   1935 	IWM_PHY_DB_CFG = 1,
   1936 	IWM_PHY_DB_CALIB_NCH,
   1937 	IWM_PHY_DB_UNUSED,
   1938 	IWM_PHY_DB_CALIB_CHG_PAPD,
   1939 	IWM_PHY_DB_CALIB_CHG_TXP,
   1940 	IWM_PHY_DB_MAX
   1941 };
   1942 
   1943 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
   1944 
   1945 /*
   1946  * phy db - configure operational ucode
   1947  */
   1948 struct iwm_phy_db_cmd {
   1949 	uint16_t type;
   1950 	uint16_t length;
   1951 	uint8_t data[];
   1952 } __packed;
   1953 
   1954 /* for parsing of tx power channel group data that comes from the firmware*/
   1955 struct iwm_phy_db_chg_txp {
   1956 	uint32_t space;
   1957 	uint16_t max_channel_idx;
   1958 } __packed;
   1959 
   1960 /*
   1961  * phy db - Receive phy db chunk after calibrations
   1962  */
   1963 struct iwm_calib_res_notif_phy_db {
   1964 	uint16_t type;
   1965 	uint16_t length;
   1966 	uint8_t data[];
   1967 } __packed;
   1968 
   1969 /*
   1970  * get phy db section: returns a pointer to a phy db section specified by
   1971  * type and channel group id.
   1972  */
   1973 static struct iwm_phy_db_entry *
   1974 iwm_phy_db_get_section(struct iwm_softc *sc,
   1975 	enum iwm_phy_db_section_type type, uint16_t chg_id)
   1976 {
   1977 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   1978 
   1979 	if (type >= IWM_PHY_DB_MAX)
   1980 		return NULL;
   1981 
   1982 	switch (type) {
   1983 	case IWM_PHY_DB_CFG:
   1984 		return &phy_db->cfg;
   1985 	case IWM_PHY_DB_CALIB_NCH:
   1986 		return &phy_db->calib_nch;
   1987 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   1988 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   1989 			return NULL;
   1990 		return &phy_db->calib_ch_group_papd[chg_id];
   1991 	case IWM_PHY_DB_CALIB_CHG_TXP:
   1992 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   1993 			return NULL;
   1994 		return &phy_db->calib_ch_group_txp[chg_id];
   1995 	default:
   1996 		return NULL;
   1997 	}
   1998 	return NULL;
   1999 }
   2000 
   2001 static int
   2002 iwm_phy_db_set_section(struct iwm_softc *sc,
   2003     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   2004 {
   2005 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   2006 	struct iwm_phy_db_entry *entry;
   2007 	uint16_t chg_id = 0;
   2008 
   2009 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2010 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2011 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2012 
   2013 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2014 	if (!entry)
   2015 		return EINVAL;
   2016 
   2017 	if (entry->data)
   2018 		kmem_intr_free(entry->data, entry->size);
   2019 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2020 	if (!entry->data) {
   2021 		entry->size = 0;
   2022 		return ENOMEM;
   2023 	}
   2024 	memcpy(entry->data, phy_db_notif->data, size);
   2025 	entry->size = size;
   2026 
   2027 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2028 	    __func__, __LINE__, type, size, entry->data));
   2029 
   2030 	return 0;
   2031 }
   2032 
   2033 static int
   2034 iwm_is_valid_channel(uint16_t ch_id)
   2035 {
   2036 	if (ch_id <= 14 ||
   2037 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2038 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2039 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2040 		return 1;
   2041 	return 0;
   2042 }
   2043 
   2044 static uint8_t
   2045 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2046 {
   2047 	if (!iwm_is_valid_channel(ch_id))
   2048 		return 0xff;
   2049 
   2050 	if (ch_id <= 14)
   2051 		return ch_id - 1;
   2052 	if (ch_id <= 64)
   2053 		return (ch_id + 20) / 4;
   2054 	if (ch_id <= 140)
   2055 		return (ch_id - 12) / 4;
   2056 	return (ch_id - 13) / 4;
   2057 }
   2058 
   2059 
   2060 static uint16_t
   2061 iwm_channel_id_to_papd(uint16_t ch_id)
   2062 {
   2063 	if (!iwm_is_valid_channel(ch_id))
   2064 		return 0xff;
   2065 
   2066 	if (1 <= ch_id && ch_id <= 14)
   2067 		return 0;
   2068 	if (36 <= ch_id && ch_id <= 64)
   2069 		return 1;
   2070 	if (100 <= ch_id && ch_id <= 140)
   2071 		return 2;
   2072 	return 3;
   2073 }
   2074 
   2075 static uint16_t
   2076 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2077 {
   2078 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2079 	struct iwm_phy_db_chg_txp *txp_chg;
   2080 	int i;
   2081 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2082 
   2083 	if (ch_index == 0xff)
   2084 		return 0xff;
   2085 
   2086 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2087 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2088 		if (!txp_chg)
   2089 			return 0xff;
   2090 		/*
   2091 		 * Looking for the first channel group that its max channel is
   2092 		 * higher then wanted channel.
   2093 		 */
   2094 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2095 			return i;
   2096 	}
   2097 	return 0xff;
   2098 }
   2099 
   2100 static int
   2101 iwm_phy_db_get_section_data(struct iwm_softc *sc,
   2102 	uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
   2103 {
   2104 	struct iwm_phy_db_entry *entry;
   2105 	uint16_t ch_group_id = 0;
   2106 
   2107 	/* find wanted channel group */
   2108 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2109 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2110 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2111 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2112 
   2113 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2114 	if (!entry)
   2115 		return EINVAL;
   2116 
   2117 	*data = entry->data;
   2118 	*size = entry->size;
   2119 
   2120 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2121 		       __func__, __LINE__, type, *size));
   2122 
   2123 	return 0;
   2124 }
   2125 
   2126 static int
   2127 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
   2128 	uint16_t length, void *data)
   2129 {
   2130 	struct iwm_phy_db_cmd phy_db_cmd;
   2131 	struct iwm_host_cmd cmd = {
   2132 		.id = IWM_PHY_DB_CMD,
   2133 		.flags = IWM_CMD_SYNC,
   2134 	};
   2135 
   2136 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2137 	    type, length));
   2138 
   2139 	/* Set phy db cmd variables */
   2140 	phy_db_cmd.type = le16toh(type);
   2141 	phy_db_cmd.length = le16toh(length);
   2142 
   2143 	/* Set hcmd variables */
   2144 	cmd.data[0] = &phy_db_cmd;
   2145 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2146 	cmd.data[1] = data;
   2147 	cmd.len[1] = length;
   2148 	cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
   2149 
   2150 	return iwm_send_cmd(sc, &cmd);
   2151 }
   2152 
   2153 static int
   2154 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2155 	enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2156 {
   2157 	uint16_t i;
   2158 	int err;
   2159 	struct iwm_phy_db_entry *entry;
   2160 
   2161 	/* Send all the channel-specific groups to operational fw */
   2162 	for (i = 0; i < max_ch_groups; i++) {
   2163 		entry = iwm_phy_db_get_section(sc, type, i);
   2164 		if (!entry)
   2165 			return EINVAL;
   2166 
   2167 		if (!entry->size)
   2168 			continue;
   2169 
   2170 		/* Send the requested PHY DB section */
   2171 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2172 		if (err) {
   2173 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2174 			    "err %d\n", DEVNAME(sc), type, i, err));
   2175 			return err;
   2176 		}
   2177 
   2178 		DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
   2179 	}
   2180 
   2181 	return 0;
   2182 }
   2183 
   2184 static int
   2185 iwm_send_phy_db_data(struct iwm_softc *sc)
   2186 {
   2187 	uint8_t *data = NULL;
   2188 	uint16_t size = 0;
   2189 	int err;
   2190 
   2191 	DPRINTF(("Sending phy db data and configuration to runtime image\n"));
   2192 
   2193 	/* Send PHY DB CFG section */
   2194 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2195 	if (err) {
   2196 		DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
   2197 		    DEVNAME(sc), err));
   2198 		return err;
   2199 	}
   2200 
   2201 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2202 	if (err) {
   2203 		DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
   2204 		    DEVNAME(sc), err));
   2205 		return err;
   2206 	}
   2207 
   2208 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2209 	    &data, &size, 0);
   2210 	if (err) {
   2211 		DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
   2212 		    "%d\n", DEVNAME(sc), err));
   2213 		return err;
   2214 	}
   2215 
   2216 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2217 	if (err) {
   2218 		DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
   2219 		    "sect, %d\n", DEVNAME(sc), err));
   2220 		return err;
   2221 	}
   2222 
   2223 	/* Send all the TXP channel specific data */
   2224 	err = iwm_phy_db_send_all_channel_groups(sc,
   2225 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2226 	if (err) {
   2227 		DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
   2228 		    DEVNAME(sc), err));
   2229 		return err;
   2230 	}
   2231 
   2232 	/* Send all the TXP channel specific data */
   2233 	err = iwm_phy_db_send_all_channel_groups(sc,
   2234 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2235 	if (err) {
   2236 		DPRINTF(("%s: Cannot send channel specific TX power groups, "
   2237 		    "%d\n", DEVNAME(sc), err));
   2238 		return err;
   2239 	}
   2240 
   2241 	DPRINTF(("Finished sending phy db non channel data\n"));
   2242 	return 0;
   2243 }
   2244 
   2245 /*
   2246  * END iwl-phy-db.c
   2247  */
   2248 
   2249 /*
   2250  * BEGIN iwlwifi/mvm/time-event.c
   2251  */
   2252 
   2253 /*
   2254  * For the high priority TE use a time event type that has similar priority to
   2255  * the FW's action scan priority.
   2256  */
   2257 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2258 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2259 
   2260 /* used to convert from time event API v2 to v1 */
   2261 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2262 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2263 static inline uint16_t
   2264 iwm_te_v2_get_notify(uint16_t policy)
   2265 {
   2266 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2267 }
   2268 
   2269 static inline uint16_t
   2270 iwm_te_v2_get_dep_policy(uint16_t policy)
   2271 {
   2272 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2273 		IWM_TE_V2_PLACEMENT_POS;
   2274 }
   2275 
   2276 static inline uint16_t
   2277 iwm_te_v2_get_absence(uint16_t policy)
   2278 {
   2279 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2280 }
   2281 
   2282 static void
   2283 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2284 	struct iwm_time_event_cmd_v1 *cmd_v1)
   2285 {
   2286 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2287 	cmd_v1->action = cmd_v2->action;
   2288 	cmd_v1->id = cmd_v2->id;
   2289 	cmd_v1->apply_time = cmd_v2->apply_time;
   2290 	cmd_v1->max_delay = cmd_v2->max_delay;
   2291 	cmd_v1->depends_on = cmd_v2->depends_on;
   2292 	cmd_v1->interval = cmd_v2->interval;
   2293 	cmd_v1->duration = cmd_v2->duration;
   2294 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2295 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2296 	else
   2297 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2298 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2299 	cmd_v1->interval_reciprocal = 0; /* unused */
   2300 
   2301 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2302 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2303 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2304 }
   2305 
   2306 static int
   2307 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
   2308 	const struct iwm_time_event_cmd_v2 *cmd)
   2309 {
   2310 	struct iwm_time_event_cmd_v1 cmd_v1;
   2311 
   2312 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2313 		return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
   2314 		    IWM_CMD_SYNC, sizeof(*cmd), cmd);
   2315 
   2316 	iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
   2317 	return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
   2318 	    sizeof(cmd_v1), &cmd_v1);
   2319 }
   2320 
   2321 static int
   2322 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
   2323 	void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
   2324 {
   2325 	int ret;
   2326 
   2327 	DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
   2328 
   2329 	ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
   2330 	if (ret) {
   2331 		DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
   2332 		    DEVNAME(sc), ret));
   2333 	}
   2334 
   2335 	return ret;
   2336 }
   2337 
   2338 static void
   2339 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2340 	uint32_t duration, uint32_t min_duration, uint32_t max_delay)
   2341 {
   2342 	struct iwm_time_event_cmd_v2 time_cmd;
   2343 
   2344 	memset(&time_cmd, 0, sizeof(time_cmd));
   2345 
   2346 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2347 	time_cmd.id_and_color =
   2348 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2349 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2350 
   2351 	time_cmd.apply_time = htole32(iwm_read_prph(sc,
   2352 	    IWM_DEVICE_SYSTEM_TIME_REG));
   2353 
   2354 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2355 	time_cmd.max_delay = htole32(max_delay);
   2356 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2357 	time_cmd.interval = htole32(1);
   2358 	time_cmd.duration = htole32(duration);
   2359 	time_cmd.repeat = 1;
   2360 	time_cmd.policy
   2361 	    = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2362 	        IWM_TE_V2_NOTIF_HOST_EVENT_END);
   2363 
   2364 	iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
   2365 }
   2366 
   2367 /*
   2368  * END iwlwifi/mvm/time-event.c
   2369  */
   2370 
   2371 /*
   2372  * NVM read access and content parsing.  We do not support
   2373  * external NVM or writing NVM.
   2374  * iwlwifi/mvm/nvm.c
   2375  */
   2376 
   2377 /* list of NVM sections we are allowed/need to read */
   2378 static const int nvm_to_read[] = {
   2379 	IWM_NVM_SECTION_TYPE_HW,
   2380 	IWM_NVM_SECTION_TYPE_SW,
   2381 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2382 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2383 };
   2384 
   2385 /* Default NVM size to read */
   2386 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
   2387 #define IWM_MAX_NVM_SECTION_SIZE 7000
   2388 
   2389 #define IWM_NVM_WRITE_OPCODE 1
   2390 #define IWM_NVM_READ_OPCODE 0
   2391 
   2392 static int
   2393 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
   2394 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
   2395 {
   2396 	offset = 0;
   2397 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2398 		.offset = htole16(offset),
   2399 		.length = htole16(length),
   2400 		.type = htole16(section),
   2401 		.op_code = IWM_NVM_READ_OPCODE,
   2402 	};
   2403 	struct iwm_nvm_access_resp *nvm_resp;
   2404 	struct iwm_rx_packet *pkt;
   2405 	struct iwm_host_cmd cmd = {
   2406 		.id = IWM_NVM_ACCESS_CMD,
   2407 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
   2408 		    IWM_CMD_SEND_IN_RFKILL,
   2409 		.data = { &nvm_access_cmd, },
   2410 	};
   2411 	int ret, bytes_read, offset_read;
   2412 	uint8_t *resp_data;
   2413 
   2414 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2415 
   2416 	ret = iwm_send_cmd(sc, &cmd);
   2417 	if (ret)
   2418 		return ret;
   2419 
   2420 	pkt = cmd.resp_pkt;
   2421 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2422 		DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
   2423 		    DEVNAME(sc), pkt->hdr.flags));
   2424 		ret = EIO;
   2425 		goto exit;
   2426 	}
   2427 
   2428 	/* Extract NVM response */
   2429 	nvm_resp = (void *)pkt->data;
   2430 
   2431 	ret = le16toh(nvm_resp->status);
   2432 	bytes_read = le16toh(nvm_resp->length);
   2433 	offset_read = le16toh(nvm_resp->offset);
   2434 	resp_data = nvm_resp->data;
   2435 	if (ret) {
   2436 		DPRINTF(("%s: NVM access command failed with status %d\n",
   2437 		    DEVNAME(sc), ret));
   2438 		ret = EINVAL;
   2439 		goto exit;
   2440 	}
   2441 
   2442 	if (offset_read != offset) {
   2443 		DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
   2444 		    DEVNAME(sc), offset_read));
   2445 		ret = EINVAL;
   2446 		goto exit;
   2447 	}
   2448 
   2449 	memcpy(data + offset, resp_data, bytes_read);
   2450 	*len = bytes_read;
   2451 
   2452  exit:
   2453 	iwm_free_resp(sc, &cmd);
   2454 	return ret;
   2455 }
   2456 
   2457 /*
   2458  * Reads an NVM section completely.
   2459  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2460  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2461  * by uCode, we need to manually check in this case that we don't
   2462  * overflow and try to read more than the EEPROM size.
   2463  * For 7000 family NICs, we supply the maximal size we can read, and
   2464  * the uCode fills the response with as much data as we can,
   2465  * without overflowing, so no check is needed.
   2466  */
   2467 static int
   2468 iwm_nvm_read_section(struct iwm_softc *sc,
   2469 	uint16_t section, uint8_t *data, uint16_t *len)
   2470 {
   2471 	uint16_t length, seglen;
   2472 	int error;
   2473 
   2474 	/* Set nvm section read length */
   2475 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2476 	*len = 0;
   2477 
   2478 	/* Read the NVM until exhausted (reading less than requested) */
   2479 	while (seglen == length) {
   2480 		error = iwm_nvm_read_chunk(sc,
   2481 		    section, *len, length, data, &seglen);
   2482 		if (error) {
   2483 			aprint_error_dev(sc->sc_dev,
   2484 			    "Cannot read NVM from section %d offset %d, "
   2485 			    "length %d\n", section, *len, length);
   2486 			return error;
   2487 		}
   2488 		*len += seglen;
   2489 	}
   2490 
   2491 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2492 	return 0;
   2493 }
   2494 
   2495 /*
   2496  * BEGIN IWM_NVM_PARSE
   2497  */
   2498 
   2499 /* iwlwifi/iwl-nvm-parse.c */
   2500 
   2501 /* NVM offsets (in words) definitions */
   2502 enum wkp_nvm_offsets {
   2503 	/* NVM HW-Section offset (in words) definitions */
   2504 	IWM_HW_ADDR = 0x15,
   2505 
   2506 /* NVM SW-Section offset (in words) definitions */
   2507 	IWM_NVM_SW_SECTION = 0x1C0,
   2508 	IWM_NVM_VERSION = 0,
   2509 	IWM_RADIO_CFG = 1,
   2510 	IWM_SKU = 2,
   2511 	IWM_N_HW_ADDRS = 3,
   2512 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
   2513 
   2514 /* NVM calibration section offset (in words) definitions */
   2515 	IWM_NVM_CALIB_SECTION = 0x2B8,
   2516 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
   2517 };
   2518 
   2519 /* SKU Capabilities (actual values from NVM definition) */
   2520 enum nvm_sku_bits {
   2521 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
   2522 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
   2523 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
   2524 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
   2525 };
   2526 
   2527 /* radio config bits (actual values from NVM definition) */
   2528 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
   2529 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
   2530 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
   2531 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
   2532 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
   2533 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
   2534 
   2535 #define DEFAULT_MAX_TX_POWER 16
   2536 
   2537 /**
   2538  * enum iwm_nvm_channel_flags - channel flags in NVM
   2539  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
   2540  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
   2541  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
   2542  * @IWM_NVM_CHANNEL_RADAR: radar detection required
   2543  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
   2544  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
   2545  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
   2546  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
   2547  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
   2548  */
   2549 enum iwm_nvm_channel_flags {
   2550 	IWM_NVM_CHANNEL_VALID = (1 << 0),
   2551 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
   2552 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
   2553 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
   2554 	IWM_NVM_CHANNEL_DFS = (1 << 7),
   2555 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
   2556 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
   2557 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
   2558 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
   2559 };
   2560 
   2561 static void
   2562 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
   2563 {
   2564 	struct ieee80211com *ic = &sc->sc_ic;
   2565 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2566 	int ch_idx;
   2567 	struct ieee80211_channel *channel;
   2568 	uint16_t ch_flags;
   2569 	int is_5ghz;
   2570 	int flags, hw_value;
   2571 
   2572 	for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
   2573 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2574 
   2575 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2576 		    !data->sku_cap_band_52GHz_enable)
   2577 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2578 
   2579 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2580 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2581 			    iwm_nvm_channels[ch_idx],
   2582 			    ch_flags,
   2583 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2584 			    "5.2" : "2.4"));
   2585 			continue;
   2586 		}
   2587 
   2588 		hw_value = iwm_nvm_channels[ch_idx];
   2589 		channel = &ic->ic_channels[hw_value];
   2590 
   2591 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2592 		if (!is_5ghz) {
   2593 			flags = IEEE80211_CHAN_2GHZ;
   2594 			channel->ic_flags
   2595 			    = IEEE80211_CHAN_CCK
   2596 			    | IEEE80211_CHAN_OFDM
   2597 			    | IEEE80211_CHAN_DYN
   2598 			    | IEEE80211_CHAN_2GHZ;
   2599 		} else {
   2600 			flags = IEEE80211_CHAN_5GHZ;
   2601 			channel->ic_flags =
   2602 			    IEEE80211_CHAN_A;
   2603 		}
   2604 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2605 
   2606 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2607 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2608 	}
   2609 }
   2610 
   2611 static int
   2612 iwm_parse_nvm_data(struct iwm_softc *sc,
   2613 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
   2614 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
   2615 {
   2616 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2617 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2618 	uint16_t radio_cfg, sku;
   2619 
   2620 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2621 
   2622 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2623 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2624 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2625 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2626 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2627 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
   2628 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
   2629 
   2630 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2631 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2632 #ifndef IWM_NO_5GHZ
   2633 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2634 #else
   2635 	data->sku_cap_band_52GHz_enable = 0;
   2636 #endif
   2637 	data->sku_cap_11n_enable = 0;
   2638 
   2639 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
   2640 		DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
   2641 		    data->valid_tx_ant, data->valid_rx_ant));
   2642 		return EINVAL;
   2643 	}
   2644 
   2645 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2646 
   2647 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
   2648 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
   2649 
   2650 	/* The byte order is little endian 16 bit, meaning 214365 */
   2651 	memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2652 	data->hw_addr[0] = hw_addr[1];
   2653 	data->hw_addr[1] = hw_addr[0];
   2654 	data->hw_addr[2] = hw_addr[3];
   2655 	data->hw_addr[3] = hw_addr[2];
   2656 	data->hw_addr[4] = hw_addr[5];
   2657 	data->hw_addr[5] = hw_addr[4];
   2658 
   2659 	iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
   2660 	data->calib_version = 255;   /* TODO:
   2661 					this value will prevent some checks from
   2662 					failing, we need to check if this
   2663 					field is still needed, and if it does,
   2664 					where is it in the NVM */
   2665 
   2666 	return 0;
   2667 }
   2668 
   2669 /*
   2670  * END NVM PARSE
   2671  */
   2672 
   2673 struct iwm_nvm_section {
   2674 	uint16_t length;
   2675 	const uint8_t *data;
   2676 };
   2677 
   2678 #define IWM_FW_VALID_TX_ANT(sc) \
   2679     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
   2680     >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
   2681 #define IWM_FW_VALID_RX_ANT(sc) \
   2682     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
   2683     >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
   2684 
   2685 static int
   2686 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2687 {
   2688 	const uint16_t *hw, *sw, *calib;
   2689 
   2690 	/* Checking for required sections */
   2691 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2692 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2693 		DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
   2694 		return ENOENT;
   2695 	}
   2696 
   2697 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
   2698 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2699 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2700 	return iwm_parse_nvm_data(sc, hw, sw, calib,
   2701 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
   2702 }
   2703 
   2704 static int
   2705 iwm_nvm_init(struct iwm_softc *sc)
   2706 {
   2707 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2708 	int i, section, error;
   2709 	uint16_t len;
   2710 	uint8_t *nvm_buffer, *temp;
   2711 
   2712 	/* Read From FW NVM */
   2713 	DPRINTF(("Read NVM\n"));
   2714 
   2715 	/* TODO: find correct NVM max size for a section */
   2716 	nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
   2717 	for (i = 0; i < __arraycount(nvm_to_read); i++) {
   2718 		section = nvm_to_read[i];
   2719 		KASSERT(section <= __arraycount(nvm_sections));
   2720 
   2721 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
   2722 		if (error)
   2723 			break;
   2724 
   2725 		temp = kmem_alloc(len, KM_SLEEP);
   2726 		memcpy(temp, nvm_buffer, len);
   2727 		nvm_sections[section].data = temp;
   2728 		nvm_sections[section].length = len;
   2729 	}
   2730 	kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
   2731 	if (error)
   2732 		return error;
   2733 
   2734 	return iwm_parse_nvm_sections(sc, nvm_sections);
   2735 }
   2736 
   2737 /*
   2738  * Firmware loading gunk.  This is kind of a weird hybrid between the
   2739  * iwn driver and the Linux iwlwifi driver.
   2740  */
   2741 
   2742 static int
   2743 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2744 	const uint8_t *section, uint32_t byte_cnt)
   2745 {
   2746 	struct iwm_dma_info *dma = &sc->fw_dma;
   2747 	int error;
   2748 
   2749 	/* Copy firmware section into pre-allocated DMA-safe memory. */
   2750 	memcpy(dma->vaddr, section, byte_cnt);
   2751 	bus_dmamap_sync(sc->sc_dmat,
   2752 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
   2753 
   2754 	if (!iwm_nic_lock(sc))
   2755 		return EBUSY;
   2756 
   2757 	sc->sc_fw_chunk_done = 0;
   2758 
   2759 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2760 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2761 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2762 	    dst_addr);
   2763 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2764 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2765 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2766 	    (iwm_get_dma_hi_addr(dma->paddr)
   2767 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2768 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2769 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2770 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2771 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2772 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2773 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   2774 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   2775 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   2776 
   2777 	iwm_nic_unlock(sc);
   2778 
   2779 	/* wait 1s for this segment to load */
   2780 	while (!sc->sc_fw_chunk_done)
   2781 		if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
   2782 			break;
   2783 
   2784 	return error;
   2785 }
   2786 
   2787 static int
   2788 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2789 {
   2790 	struct iwm_fw_sects *fws;
   2791 	int error, i, w;
   2792 	void *data;
   2793 	uint32_t dlen;
   2794 	uint32_t offset;
   2795 
   2796 	sc->sc_uc.uc_intr = 0;
   2797 
   2798 	fws = &sc->sc_fw.fw_sects[ucode_type];
   2799 	for (i = 0; i < fws->fw_count; i++) {
   2800 		data = fws->fw_sect[i].fws_data;
   2801 		dlen = fws->fw_sect[i].fws_len;
   2802 		offset = fws->fw_sect[i].fws_devoff;
   2803 		DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
   2804 		    ucode_type, offset, dlen));
   2805 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
   2806 		if (error) {
   2807 			DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
   2808 			    "returned error %02d\n", i, fws->fw_count, error));
   2809 			return error;
   2810 		}
   2811 	}
   2812 
   2813 	/* wait for the firmware to load */
   2814 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   2815 
   2816 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
   2817 		error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
   2818 	}
   2819 
   2820 	return error;
   2821 }
   2822 
   2823 /* iwlwifi: pcie/trans.c */
   2824 static int
   2825 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2826 {
   2827 	int error;
   2828 
   2829 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2830 
   2831 	if ((error = iwm_nic_init(sc)) != 0) {
   2832 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   2833 		return error;
   2834 	}
   2835 
   2836 	/* make sure rfkill handshake bits are cleared */
   2837 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2838 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   2839 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   2840 
   2841 	/* clear (again), then enable host interrupts */
   2842 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2843 	iwm_enable_interrupts(sc);
   2844 
   2845 	/* really make sure rfkill handshake bits are cleared */
   2846 	/* maybe we should write a few times more?  just to make sure */
   2847 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2848 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2849 
   2850 	/* Load the given image to the HW */
   2851 	error = iwm_load_firmware(sc, ucode_type);
   2852 	if (error) {
   2853 		aprint_error_dev(sc->sc_dev, "failed to load firmware: %d\n",
   2854 		    error);
   2855 	}
   2856 	return error;
   2857 }
   2858 
   2859 static int
   2860 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
   2861 {
   2862 	return iwm_post_alive(sc);
   2863 }
   2864 
   2865 static int
   2866 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   2867 {
   2868 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   2869 		.valid = htole32(valid_tx_ant),
   2870 	};
   2871 
   2872 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
   2873 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
   2874 }
   2875 
   2876 /* iwlwifi: mvm/fw.c */
   2877 static int
   2878 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   2879 {
   2880 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   2881 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   2882 
   2883 	/* Set parameters */
   2884 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   2885 	phy_cfg_cmd.calib_control.event_trigger =
   2886 	    sc->sc_default_calib[ucode_type].event_trigger;
   2887 	phy_cfg_cmd.calib_control.flow_trigger =
   2888 	    sc->sc_default_calib[ucode_type].flow_trigger;
   2889 
   2890 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   2891 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
   2892 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   2893 }
   2894 
   2895 static int
   2896 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
   2897 	enum iwm_ucode_type ucode_type)
   2898 {
   2899 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   2900 	int error;
   2901 
   2902 	if ((error = iwm_read_firmware(sc)) != 0)
   2903 		return error;
   2904 
   2905 	sc->sc_uc_current = ucode_type;
   2906 	error = iwm_start_fw(sc, ucode_type);
   2907 	if (error) {
   2908 		sc->sc_uc_current = old_type;
   2909 		return error;
   2910 	}
   2911 
   2912 	return iwm_fw_alive(sc, sc->sched_base);
   2913 }
   2914 
   2915 /*
   2916  * mvm misc bits
   2917  */
   2918 
   2919 /*
   2920  * follows iwlwifi/fw.c
   2921  */
   2922 static int
   2923 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   2924 {
   2925 	int error;
   2926 
   2927 	/* do not operate with rfkill switch turned on */
   2928 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   2929 		aprint_error_dev(sc->sc_dev,
   2930 		    "radio is disabled by hardware switch\n");
   2931 		return EPERM;
   2932 	}
   2933 
   2934 	sc->sc_init_complete = 0;
   2935 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
   2936 	    IWM_UCODE_TYPE_INIT)) != 0)
   2937 		return error;
   2938 
   2939 	if (justnvm) {
   2940 		if ((error = iwm_nvm_init(sc)) != 0) {
   2941 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   2942 			return error;
   2943 		}
   2944 		memcpy(&sc->sc_ic.ic_myaddr,
   2945 		    &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
   2946 
   2947 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
   2948 		    + sc->sc_capa_max_probe_len
   2949 		    + IWM_MAX_NUM_SCAN_CHANNELS
   2950 		    * sizeof(struct iwm_scan_channel);
   2951 		sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
   2952 
   2953 		return 0;
   2954 	}
   2955 
   2956 	/* Send TX valid antennas before triggering calibrations */
   2957 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   2958 		return error;
   2959 
   2960 	/*
   2961 	* Send phy configurations command to init uCode
   2962 	* to start the 16.0 uCode init image internal calibrations.
   2963 	*/
   2964 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
   2965 		DPRINTF(("%s: failed to run internal calibration: %d\n",
   2966 		    DEVNAME(sc), error));
   2967 		return error;
   2968 	}
   2969 
   2970 	/*
   2971 	 * Nothing to do but wait for the init complete notification
   2972 	 * from the firmware
   2973 	 */
   2974 	while (!sc->sc_init_complete)
   2975 		if ((error = tsleep(&sc->sc_init_complete,
   2976 		    0, "iwminit", 2*hz)) != 0)
   2977 			break;
   2978 
   2979 	return error;
   2980 }
   2981 
   2982 /*
   2983  * receive side
   2984  */
   2985 
   2986 /* (re)stock rx ring, called at init-time and at runtime */
   2987 static int
   2988 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   2989 {
   2990 	struct iwm_rx_ring *ring = &sc->rxq;
   2991 	struct iwm_rx_data *data = &ring->data[idx];
   2992 	struct mbuf *m;
   2993 	int error;
   2994 	int fatal = 0;
   2995 
   2996 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   2997 	if (m == NULL)
   2998 		return ENOBUFS;
   2999 
   3000 	if (size <= MCLBYTES) {
   3001 		MCLGET(m, M_DONTWAIT);
   3002 	} else {
   3003 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3004 	}
   3005 	if ((m->m_flags & M_EXT) == 0) {
   3006 		m_freem(m);
   3007 		return ENOBUFS;
   3008 	}
   3009 
   3010 	if (data->m != NULL) {
   3011 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3012 		fatal = 1;
   3013 	}
   3014 
   3015 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3016 	if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3017 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
   3018 		/* XXX */
   3019 		if (fatal)
   3020 			panic("iwm: could not load RX mbuf");
   3021 		m_freem(m);
   3022 		return error;
   3023 	}
   3024 	data->m = m;
   3025 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   3026 
   3027 	/* Update RX descriptor. */
   3028 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   3029 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3030 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   3031 
   3032 	return 0;
   3033 }
   3034 
   3035 /* iwlwifi: mvm/rx.c */
   3036 #define IWM_RSSI_OFFSET 50
   3037 static int
   3038 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3039 {
   3040 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   3041 	uint32_t agc_a, agc_b;
   3042 	uint32_t val;
   3043 
   3044 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3045 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3046 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3047 
   3048 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3049 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3050 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3051 
   3052 	/*
   3053 	 * dBm = rssi dB - agc dB - constant.
   3054 	 * Higher AGC (higher radio gain) means lower signal.
   3055 	 */
   3056 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3057 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3058 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3059 
   3060 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3061 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3062 
   3063 	return max_rssi_dbm;
   3064 }
   3065 
   3066 /* iwlwifi: mvm/rx.c */
   3067 /*
   3068  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
   3069  * values are reported by the fw as positive values - need to negate
   3070  * to obtain their dBM.  Account for missing antennas by replacing 0
   3071  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3072  */
   3073 static int
   3074 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
   3075     struct iwm_rx_phy_info *phy_info)
   3076 {
   3077 	int energy_a, energy_b, energy_c, max_energy;
   3078 	uint32_t val;
   3079 
   3080 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3081 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3082 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3083 	energy_a = energy_a ? -energy_a : -256;
   3084 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3085 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3086 	energy_b = energy_b ? -energy_b : -256;
   3087 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3088 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3089 	energy_c = energy_c ? -energy_c : -256;
   3090 	max_energy = MAX(energy_a, energy_b);
   3091 	max_energy = MAX(max_energy, energy_c);
   3092 
   3093 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   3094 	    energy_a, energy_b, energy_c, max_energy));
   3095 
   3096 	return max_energy;
   3097 }
   3098 
   3099 static void
   3100 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
   3101 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3102 {
   3103 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3104 
   3105 	DPRINTFN(20, ("received PHY stats\n"));
   3106 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3107 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3108 
   3109 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3110 }
   3111 
   3112 /*
   3113  * Retrieve the average noise (in dBm) among receivers.
   3114  */
   3115 static int
   3116 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
   3117 {
   3118 	int i, total, nbant, noise;
   3119 
   3120 	total = nbant = noise = 0;
   3121 	for (i = 0; i < 3; i++) {
   3122 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3123 		if (noise) {
   3124 			total += noise;
   3125 			nbant++;
   3126 		}
   3127 	}
   3128 
   3129 	/* There should be at least one antenna but check anyway. */
   3130 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3131 }
   3132 
   3133 /*
   3134  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
   3135  *
   3136  * Handles the actual data of the Rx packet from the fw
   3137  */
   3138 static void
   3139 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
   3140 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3141 {
   3142 	struct ieee80211com *ic = &sc->sc_ic;
   3143 	struct ieee80211_frame *wh;
   3144 	struct ieee80211_node *ni;
   3145 	struct ieee80211_channel *c = NULL;
   3146 	struct mbuf *m;
   3147 	struct iwm_rx_phy_info *phy_info;
   3148 	struct iwm_rx_mpdu_res_start *rx_res;
   3149 	int device_timestamp;
   3150 	uint32_t len;
   3151 	uint32_t rx_pkt_status;
   3152 	int rssi;
   3153 
   3154 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3155 	    BUS_DMASYNC_POSTREAD);
   3156 
   3157 	phy_info = &sc->sc_last_phy_info;
   3158 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3159 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3160 	len = le16toh(rx_res->byte_count);
   3161 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
   3162 
   3163 	m = data->m;
   3164 	m->m_data = pkt->data + sizeof(*rx_res);
   3165 	m->m_pkthdr.len = m->m_len = len;
   3166 
   3167 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3168 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3169 		    phy_info->cfg_phy_cnt));
   3170 		return;
   3171 	}
   3172 
   3173 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3174 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3175 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3176 		return; /* drop */
   3177 	}
   3178 
   3179 	device_timestamp = le32toh(phy_info->system_timestamp);
   3180 
   3181 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3182 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
   3183 	} else {
   3184 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
   3185 	}
   3186 	rssi = -rssi;
   3187 
   3188 	if (ic->ic_state == IEEE80211_S_SCAN)
   3189 		iwm_fix_channel(ic, m);
   3190 
   3191 	/* replenish ring for the buffer we're going to feed to the sharks */
   3192 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3193 		return;
   3194 
   3195 	m->m_pkthdr.rcvif = IC2IFP(ic);
   3196 
   3197 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
   3198 		if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3199 			c = &ic->ic_channels[le32toh(phy_info->channel)];
   3200 	}
   3201 
   3202 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3203 	if (c)
   3204 		ni->ni_chan = c;
   3205 
   3206 	if (sc->sc_drvbpf != NULL) {
   3207 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3208 
   3209 		tap->wr_flags = 0;
   3210 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3211 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3212 		tap->wr_chan_freq =
   3213 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3214 		tap->wr_chan_flags =
   3215 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3216 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3217 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3218 		tap->wr_tsft = phy_info->system_timestamp;
   3219 		switch (phy_info->rate) {
   3220 		/* CCK rates. */
   3221 		case  10: tap->wr_rate =   2; break;
   3222 		case  20: tap->wr_rate =   4; break;
   3223 		case  55: tap->wr_rate =  11; break;
   3224 		case 110: tap->wr_rate =  22; break;
   3225 		/* OFDM rates. */
   3226 		case 0xd: tap->wr_rate =  12; break;
   3227 		case 0xf: tap->wr_rate =  18; break;
   3228 		case 0x5: tap->wr_rate =  24; break;
   3229 		case 0x7: tap->wr_rate =  36; break;
   3230 		case 0x9: tap->wr_rate =  48; break;
   3231 		case 0xb: tap->wr_rate =  72; break;
   3232 		case 0x1: tap->wr_rate =  96; break;
   3233 		case 0x3: tap->wr_rate = 108; break;
   3234 		/* Unknown rate: should not happen. */
   3235 		default:  tap->wr_rate =   0;
   3236 		}
   3237 
   3238 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3239 	}
   3240 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3241 	ieee80211_free_node(ni);
   3242 }
   3243 
   3244 static void
   3245 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3246 	struct iwm_node *in)
   3247 {
   3248 	struct ieee80211com *ic = &sc->sc_ic;
   3249 	struct ifnet *ifp = IC2IFP(ic);
   3250 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
   3251 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3252 	int failack = tx_resp->failure_frame;
   3253 
   3254 	KASSERT(tx_resp->frame_count == 1);
   3255 
   3256 	/* Update rate control statistics. */
   3257 	in->in_amn.amn_txcnt++;
   3258 	if (failack > 0) {
   3259 		in->in_amn.amn_retrycnt++;
   3260 	}
   3261 
   3262 	if (status != IWM_TX_STATUS_SUCCESS &&
   3263 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3264 		ifp->if_oerrors++;
   3265 	else
   3266 		ifp->if_opackets++;
   3267 }
   3268 
   3269 static void
   3270 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
   3271 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3272 {
   3273 	struct ieee80211com *ic = &sc->sc_ic;
   3274 	struct ifnet *ifp = IC2IFP(ic);
   3275 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3276 	int idx = cmd_hdr->idx;
   3277 	int qid = cmd_hdr->qid;
   3278 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3279 	struct iwm_tx_data *txd = &ring->data[idx];
   3280 	struct iwm_node *in = txd->in;
   3281 
   3282 	if (txd->done) {
   3283 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3284 		    DEVNAME(sc)));
   3285 		return;
   3286 	}
   3287 
   3288 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3289 	    BUS_DMASYNC_POSTREAD);
   3290 
   3291 	sc->sc_tx_timer = 0;
   3292 
   3293 	iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
   3294 
   3295 	/* Unmap and free mbuf. */
   3296 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3297 	    BUS_DMASYNC_POSTWRITE);
   3298 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3299 	m_freem(txd->m);
   3300 
   3301 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3302 	KASSERT(txd->done == 0);
   3303 	txd->done = 1;
   3304 	KASSERT(txd->in);
   3305 
   3306 	txd->m = NULL;
   3307 	txd->in = NULL;
   3308 	ieee80211_free_node(&in->in_ni);
   3309 
   3310 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3311 		sc->qfullmsk &= ~(1 << ring->qid);
   3312 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3313 			ifp->if_flags &= ~IFF_OACTIVE;
   3314 			/*
   3315 			 * Well, we're in interrupt context, but then again
   3316 			 * I guess net80211 does all sorts of stunts in
   3317 			 * interrupt context, so maybe this is no biggie.
   3318 			 */
   3319 			(*ifp->if_start)(ifp);
   3320 		}
   3321 	}
   3322 }
   3323 
   3324 /*
   3325  * BEGIN iwlwifi/mvm/binding.c
   3326  */
   3327 
   3328 static int
   3329 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3330 {
   3331 	struct iwm_binding_cmd cmd;
   3332 	struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
   3333 	int i, ret;
   3334 	uint32_t status;
   3335 
   3336 	memset(&cmd, 0, sizeof(cmd));
   3337 
   3338 	cmd.id_and_color
   3339 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3340 	cmd.action = htole32(action);
   3341 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3342 
   3343 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3344 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3345 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3346 
   3347 	status = 0;
   3348 	ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3349 	    sizeof(cmd), &cmd, &status);
   3350 	if (ret) {
   3351 		DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
   3352 		    DEVNAME(sc), action, ret));
   3353 		return ret;
   3354 	}
   3355 
   3356 	if (status) {
   3357 		DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
   3358 		    status));
   3359 		ret = EIO;
   3360 	}
   3361 
   3362 	return ret;
   3363 }
   3364 
   3365 static int
   3366 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
   3367 {
   3368 	return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3369 }
   3370 
   3371 static int
   3372 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
   3373 {
   3374 	return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3375 }
   3376 
   3377 /*
   3378  * END iwlwifi/mvm/binding.c
   3379  */
   3380 
   3381 /*
   3382  * BEGIN iwlwifi/mvm/phy-ctxt.c
   3383  */
   3384 
   3385 /*
   3386  * Construct the generic fields of the PHY context command
   3387  */
   3388 static void
   3389 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3390 	struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3391 {
   3392 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3393 
   3394 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3395 	    ctxt->color));
   3396 	cmd->action = htole32(action);
   3397 	cmd->apply_time = htole32(apply_time);
   3398 }
   3399 
   3400 /*
   3401  * Add the phy configuration to the PHY context command
   3402  */
   3403 static void
   3404 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
   3405 	struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
   3406 	uint8_t chains_static, uint8_t chains_dynamic)
   3407 {
   3408 	struct ieee80211com *ic = &sc->sc_ic;
   3409 	uint8_t active_cnt, idle_cnt;
   3410 
   3411 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3412 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3413 
   3414 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3415 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3416 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3417 
   3418 	/* Set rx the chains */
   3419 	idle_cnt = chains_static;
   3420 	active_cnt = chains_dynamic;
   3421 
   3422 	cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
   3423 					IWM_PHY_RX_CHAIN_VALID_POS);
   3424 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3425 	cmd->rxchain_info |= htole32(active_cnt <<
   3426 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3427 
   3428 	cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
   3429 }
   3430 
   3431 /*
   3432  * Send a command
   3433  * only if something in the configuration changed: in case that this is the
   3434  * first time that the phy configuration is applied or in case that the phy
   3435  * configuration changed from the previous apply.
   3436  */
   3437 static int
   3438 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
   3439 	struct iwm_mvm_phy_ctxt *ctxt,
   3440 	uint8_t chains_static, uint8_t chains_dynamic,
   3441 	uint32_t action, uint32_t apply_time)
   3442 {
   3443 	struct iwm_phy_context_cmd cmd;
   3444 	int ret;
   3445 
   3446 	/* Set the command header fields */
   3447 	iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3448 
   3449 	/* Set the command data */
   3450 	iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3451 	    chains_static, chains_dynamic);
   3452 
   3453 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
   3454 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3455 	if (ret) {
   3456 		DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
   3457 	}
   3458 	return ret;
   3459 }
   3460 
   3461 /*
   3462  * Send a command to add a PHY context based on the current HW configuration.
   3463  */
   3464 static int
   3465 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3466 	struct ieee80211_channel *chan,
   3467 	uint8_t chains_static, uint8_t chains_dynamic)
   3468 {
   3469 	ctxt->channel = chan;
   3470 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3471 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
   3472 }
   3473 
   3474 /*
   3475  * Send a command to modify the PHY context based on the current HW
   3476  * configuration. Note that the function does not check that the configuration
   3477  * changed.
   3478  */
   3479 static int
   3480 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
   3481 	struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
   3482 	uint8_t chains_static, uint8_t chains_dynamic)
   3483 {
   3484 	ctxt->channel = chan;
   3485 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3486 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
   3487 }
   3488 
   3489 /*
   3490  * END iwlwifi/mvm/phy-ctxt.c
   3491  */
   3492 
   3493 /*
   3494  * transmit side
   3495  */
   3496 
   3497 /*
   3498  * Send a command to the firmware.  We try to implement the Linux
   3499  * driver interface for the routine.
   3500  * mostly from if_iwn (iwn_cmd()).
   3501  *
   3502  * For now, we always copy the first part and map the second one (if it exists).
   3503  */
   3504 static int
   3505 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3506 {
   3507 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3508 	struct iwm_tfd *desc;
   3509 	struct iwm_tx_data *data;
   3510 	struct iwm_device_cmd *cmd;
   3511 	struct mbuf *m;
   3512 	bus_addr_t paddr;
   3513 	uint32_t addr_lo;
   3514 	int error = 0, i, paylen, off, s;
   3515 	int code;
   3516 	int async, wantresp;
   3517 
   3518 	code = hcmd->id;
   3519 	async = hcmd->flags & IWM_CMD_ASYNC;
   3520 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3521 
   3522 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3523 		paylen += hcmd->len[i];
   3524 	}
   3525 
   3526 	/* if the command wants an answer, busy sc_cmd_resp */
   3527 	if (wantresp) {
   3528 		KASSERT(!async);
   3529 		while (sc->sc_wantresp != -1)
   3530 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3531 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3532 		DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
   3533 	}
   3534 
   3535 	/*
   3536 	 * Is the hardware still available?  (after e.g. above wait).
   3537 	 */
   3538 	s = splnet();
   3539 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3540 		error = ENXIO;
   3541 		goto out;
   3542 	}
   3543 
   3544 	desc = &ring->desc[ring->cur];
   3545 	data = &ring->data[ring->cur];
   3546 
   3547 	if (paylen > sizeof(cmd->data)) {
   3548 		/* Command is too large */
   3549 		if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
   3550 			error = EINVAL;
   3551 			goto out;
   3552 		}
   3553 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3554 		if (m == NULL) {
   3555 			error = ENOMEM;
   3556 			goto out;
   3557 		}
   3558 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3559 		if (!(m->m_flags & M_EXT)) {
   3560 			m_freem(m);
   3561 			error = ENOMEM;
   3562 			goto out;
   3563 		}
   3564 		cmd = mtod(m, struct iwm_device_cmd *);
   3565 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
   3566 		    IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3567 		if (error != 0) {
   3568 			m_freem(m);
   3569 			goto out;
   3570 		}
   3571 		data->m = m;
   3572 		paddr = data->map->dm_segs[0].ds_addr;
   3573 	} else {
   3574 		cmd = &ring->cmd[ring->cur];
   3575 		paddr = data->cmd_paddr;
   3576 	}
   3577 
   3578 	cmd->hdr.code = code;
   3579 	cmd->hdr.flags = 0;
   3580 	cmd->hdr.qid = ring->qid;
   3581 	cmd->hdr.idx = ring->cur;
   3582 
   3583 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3584 		if (hcmd->len[i] == 0)
   3585 			continue;
   3586 		memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
   3587 		off += hcmd->len[i];
   3588 	}
   3589 	KASSERT(off == paylen);
   3590 
   3591 	/* lo field is not aligned */
   3592 	addr_lo = htole32((uint32_t)paddr);
   3593 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3594 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3595 	    | ((sizeof(cmd->hdr) + paylen) << 4));
   3596 	desc->num_tbs = 1;
   3597 
   3598 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
   3599 	    code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
   3600 
   3601 	if (paylen > sizeof(cmd->data)) {
   3602 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3603 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3604 	} else {
   3605 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3606 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3607 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3608 	}
   3609 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3610 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3611 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   3612 
   3613 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3614 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3615 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3616 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3617 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3618 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3619 		DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
   3620 		error = EBUSY;
   3621 		goto out;
   3622 	}
   3623 
   3624 #if 0
   3625 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3626 #endif
   3627 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3628 	    code, ring->qid, ring->cur));
   3629 
   3630 	/* Kick command ring. */
   3631 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3632 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3633 
   3634 	if (!async) {
   3635 		/* m..m-mmyy-mmyyyy-mym-ym m-my generation */
   3636 		int generation = sc->sc_generation;
   3637 		error = tsleep(desc, PCATCH, "iwmcmd", hz);
   3638 		if (error == 0) {
   3639 			/* if hardware is no longer up, return error */
   3640 			if (generation != sc->sc_generation) {
   3641 				error = ENXIO;
   3642 			} else {
   3643 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3644 			}
   3645 		}
   3646 	}
   3647  out:
   3648 	if (wantresp && error != 0) {
   3649 		iwm_free_resp(sc, hcmd);
   3650 	}
   3651 	splx(s);
   3652 
   3653 	return error;
   3654 }
   3655 
   3656 /* iwlwifi: mvm/utils.c */
   3657 static int
   3658 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
   3659 	uint32_t flags, uint16_t len, const void *data)
   3660 {
   3661 	struct iwm_host_cmd cmd = {
   3662 		.id = id,
   3663 		.len = { len, },
   3664 		.data = { data, },
   3665 		.flags = flags,
   3666 	};
   3667 
   3668 	return iwm_send_cmd(sc, &cmd);
   3669 }
   3670 
   3671 /* iwlwifi: mvm/utils.c */
   3672 static int
   3673 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
   3674 	struct iwm_host_cmd *cmd, uint32_t *status)
   3675 {
   3676 	struct iwm_rx_packet *pkt;
   3677 	struct iwm_cmd_response *resp;
   3678 	int error, resp_len;
   3679 
   3680 	//lockdep_assert_held(&mvm->mutex);
   3681 
   3682 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3683 	cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
   3684 
   3685 	if ((error = iwm_send_cmd(sc, cmd)) != 0)
   3686 		return error;
   3687 	pkt = cmd->resp_pkt;
   3688 
   3689 	/* Can happen if RFKILL is asserted */
   3690 	if (!pkt) {
   3691 		error = 0;
   3692 		goto out_free_resp;
   3693 	}
   3694 
   3695 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3696 		error = EIO;
   3697 		goto out_free_resp;
   3698 	}
   3699 
   3700 	resp_len = iwm_rx_packet_payload_len(pkt);
   3701 	if (resp_len != sizeof(*resp)) {
   3702 		error = EIO;
   3703 		goto out_free_resp;
   3704 	}
   3705 
   3706 	resp = (void *)pkt->data;
   3707 	*status = le32toh(resp->status);
   3708  out_free_resp:
   3709 	iwm_free_resp(sc, cmd);
   3710 	return error;
   3711 }
   3712 
   3713 /* iwlwifi/mvm/utils.c */
   3714 static int
   3715 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
   3716 	uint16_t len, const void *data, uint32_t *status)
   3717 {
   3718 	struct iwm_host_cmd cmd = {
   3719 		.id = id,
   3720 		.len = { len, },
   3721 		.data = { data, },
   3722 	};
   3723 
   3724 	return iwm_mvm_send_cmd_status(sc, &cmd, status);
   3725 }
   3726 
   3727 static void
   3728 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3729 {
   3730 	KASSERT(sc->sc_wantresp != -1);
   3731 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
   3732 	    == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
   3733 	sc->sc_wantresp = -1;
   3734 	wakeup(&sc->sc_wantresp);
   3735 }
   3736 
   3737 /*
   3738  * Process a "command done" firmware notification.  This is where we wakeup
   3739  * processes waiting for a synchronous command completion.
   3740  * from if_iwn
   3741  */
   3742 static void
   3743 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
   3744 {
   3745 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3746 	struct iwm_tx_data *data;
   3747 
   3748 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
   3749 		return;	/* Not a command ack. */
   3750 	}
   3751 
   3752 	data = &ring->data[pkt->hdr.idx];
   3753 
   3754 	/* If the command was mapped in an mbuf, free it. */
   3755 	if (data->m != NULL) {
   3756 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3757 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3758 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3759 		m_freem(data->m);
   3760 		data->m = NULL;
   3761 	}
   3762 	wakeup(&ring->desc[pkt->hdr.idx]);
   3763 }
   3764 
   3765 #if 0
   3766 /*
   3767  * necessary only for block ack mode
   3768  */
   3769 void
   3770 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   3771 	uint16_t len)
   3772 {
   3773 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   3774 	uint16_t w_val;
   3775 
   3776 	scd_bc_tbl = sc->sched_dma.vaddr;
   3777 
   3778 	len += 8; /* magic numbers came naturally from paris */
   3779 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   3780 		len = roundup(len, 4) / 4;
   3781 
   3782 	w_val = htole16(sta_id << 12 | len);
   3783 
   3784 	/* Update TX scheduler. */
   3785 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   3786 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3787 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   3788 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   3789 
   3790 	/* I really wonder what this is ?!? */
   3791 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   3792 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   3793 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3794 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   3795 		    (char *)(void *)sc->sched_dma.vaddr,
   3796 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   3797 	}
   3798 }
   3799 #endif
   3800 
   3801 /*
   3802  * Fill in various bit for management frames, and leave them
   3803  * unfilled for data frames (firmware takes care of that).
   3804  * Return the selected TX rate.
   3805  */
   3806 static const struct iwm_rate *
   3807 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   3808 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   3809 {
   3810 	const struct iwm_rate *rinfo;
   3811 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3812 	int ridx, rate_flags;
   3813 	int nrates = in->in_ni.ni_rates.rs_nrates;
   3814 
   3815 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   3816 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   3817 
   3818 	/* for data frames, use RS table */
   3819 	if (type == IEEE80211_FC0_TYPE_DATA) {
   3820 		if (sc->sc_ic.ic_fixed_rate != -1) {
   3821 			tx->initial_rate_index = sc->sc_fixed_ridx;
   3822 		} else {
   3823 			tx->initial_rate_index = (nrates-1) - in->in_ni.ni_txrate;
   3824 		}
   3825 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   3826 		DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
   3827 		return &iwm_rates[tx->initial_rate_index];
   3828 	}
   3829 
   3830 	/* for non-data, use the lowest supported rate */
   3831 	ridx = in->in_ridx[0];
   3832 	rinfo = &iwm_rates[ridx];
   3833 
   3834 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   3835 	if (IWM_RIDX_IS_CCK(ridx))
   3836 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   3837 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   3838 
   3839 	return rinfo;
   3840 }
   3841 
   3842 #define TB0_SIZE 16
   3843 static int
   3844 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   3845 {
   3846 	struct ieee80211com *ic = &sc->sc_ic;
   3847 	struct iwm_node *in = (void *)ni;
   3848 	struct iwm_tx_ring *ring;
   3849 	struct iwm_tx_data *data;
   3850 	struct iwm_tfd *desc;
   3851 	struct iwm_device_cmd *cmd;
   3852 	struct iwm_tx_cmd *tx;
   3853 	struct ieee80211_frame *wh;
   3854 	struct ieee80211_key *k = NULL;
   3855 	struct mbuf *m1;
   3856 	const struct iwm_rate *rinfo;
   3857 	uint32_t flags;
   3858 	u_int hdrlen;
   3859 	bus_dma_segment_t *seg;
   3860 	uint8_t tid, type;
   3861 	int i, totlen, error, pad;
   3862 	int hdrlen2;
   3863 
   3864 	wh = mtod(m, struct ieee80211_frame *);
   3865 	hdrlen = ieee80211_anyhdrsize(wh);
   3866 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3867 
   3868 	hdrlen2 = (ieee80211_has_qos(wh)) ?
   3869 	    sizeof (struct ieee80211_qosframe) :
   3870 	    sizeof (struct ieee80211_frame);
   3871 
   3872 	if (hdrlen != hdrlen2)
   3873 		DPRINTF(("%s: hdrlen error (%d != %d)\n",
   3874 		    DEVNAME(sc), hdrlen, hdrlen2));
   3875 
   3876 	tid = 0;
   3877 
   3878 	ring = &sc->txq[ac];
   3879 	desc = &ring->desc[ring->cur];
   3880 	memset(desc, 0, sizeof(*desc));
   3881 	data = &ring->data[ring->cur];
   3882 
   3883 	/* Fill out iwm_tx_cmd to send to the firmware */
   3884 	cmd = &ring->cmd[ring->cur];
   3885 	cmd->hdr.code = IWM_TX_CMD;
   3886 	cmd->hdr.flags = 0;
   3887 	cmd->hdr.qid = ring->qid;
   3888 	cmd->hdr.idx = ring->cur;
   3889 
   3890 	tx = (void *)cmd->data;
   3891 	memset(tx, 0, sizeof(*tx));
   3892 
   3893 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   3894 
   3895 	if (sc->sc_drvbpf != NULL) {
   3896 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   3897 
   3898 		tap->wt_flags = 0;
   3899 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   3900 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   3901 		tap->wt_rate = rinfo->rate;
   3902 		tap->wt_hwqueue = ac;
   3903 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   3904 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   3905 
   3906 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   3907 	}
   3908 
   3909 	/* Encrypt the frame if need be. */
   3910 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   3911 		k = ieee80211_crypto_encap(ic, ni, m);
   3912 		if (k == NULL) {
   3913 			m_freem(m);
   3914 			return ENOBUFS;
   3915 		}
   3916 		/* Packet header may have moved, reset our local pointer. */
   3917 		wh = mtod(m, struct ieee80211_frame *);
   3918 	}
   3919 	totlen = m->m_pkthdr.len;
   3920 
   3921 	flags = 0;
   3922 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3923 		flags |= IWM_TX_CMD_FLG_ACK;
   3924 	}
   3925 
   3926 	if (type != IEEE80211_FC0_TYPE_DATA
   3927 	    && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
   3928 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3929 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   3930 	}
   3931 
   3932 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   3933 	    type != IEEE80211_FC0_TYPE_DATA)
   3934 		tx->sta_id = sc->sc_aux_sta.sta_id;
   3935 	else
   3936 		tx->sta_id = IWM_STATION_ID;
   3937 
   3938 	if (type == IEEE80211_FC0_TYPE_MGT) {
   3939 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   3940 
   3941 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   3942 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   3943 			tx->pm_frame_timeout = htole16(3);
   3944 		else
   3945 			tx->pm_frame_timeout = htole16(2);
   3946 	} else {
   3947 		tx->pm_frame_timeout = htole16(0);
   3948 	}
   3949 
   3950 	if (hdrlen & 3) {
   3951 		/* First segment length must be a multiple of 4. */
   3952 		flags |= IWM_TX_CMD_FLG_MH_PAD;
   3953 		pad = 4 - (hdrlen & 3);
   3954 	} else
   3955 		pad = 0;
   3956 
   3957 	tx->driver_txop = 0;
   3958 	tx->next_frame_len = 0;
   3959 
   3960 	tx->len = htole16(totlen);
   3961 	tx->tid_tspec = tid;
   3962 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   3963 
   3964 	/* Set physical address of "scratch area". */
   3965 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   3966 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   3967 
   3968 	/* Copy 802.11 header in TX command. */
   3969 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   3970 
   3971 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   3972 
   3973 	tx->sec_ctl = 0;
   3974 	tx->tx_flags |= htole32(flags);
   3975 
   3976 	/* Trim 802.11 header. */
   3977 	m_adj(m, hdrlen);
   3978 
   3979 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3980 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3981 	if (error != 0) {
   3982 		if (error != EFBIG) {
   3983 			aprint_error_dev(sc->sc_dev,
   3984 			    "can't map mbuf (error %d)\n", error);
   3985 			m_freem(m);
   3986 			return error;
   3987 		}
   3988 		/* Too many DMA segments, linearize mbuf. */
   3989 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   3990 		if (m1 == NULL) {
   3991 			m_freem(m);
   3992 			return ENOBUFS;
   3993 		}
   3994 		if (m->m_pkthdr.len > MHLEN) {
   3995 			MCLGET(m1, M_DONTWAIT);
   3996 			if (!(m1->m_flags & M_EXT)) {
   3997 				m_freem(m);
   3998 				m_freem(m1);
   3999 				return ENOBUFS;
   4000 			}
   4001 		}
   4002 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   4003 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   4004 		m_freem(m);
   4005 		m = m1;
   4006 
   4007 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4008 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4009 		if (error != 0) {
   4010 			aprint_error_dev(sc->sc_dev,
   4011 			    "can't map mbuf (error %d)\n", error);
   4012 			m_freem(m);
   4013 			return error;
   4014 		}
   4015 	}
   4016 	data->m = m;
   4017 	data->in = in;
   4018 	data->done = 0;
   4019 
   4020 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   4021 	KASSERT(data->in != NULL);
   4022 
   4023 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   4024 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   4025 
   4026 	/* Fill TX descriptor. */
   4027 	desc->num_tbs = 2 + data->map->dm_nsegs;
   4028 
   4029 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   4030 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4031 	    (TB0_SIZE << 4);
   4032 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   4033 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4034 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   4035 	      + hdrlen + pad - TB0_SIZE) << 4);
   4036 
   4037 	/* Other DMA segments are for data payload. */
   4038 	seg = data->map->dm_segs;
   4039 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   4040 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   4041 		desc->tbs[i+2].hi_n_len = \
   4042 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4043 		    | ((seg->ds_len) << 4);
   4044 	}
   4045 
   4046 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4047 	    BUS_DMASYNC_PREWRITE);
   4048 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4049 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4050 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4051 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4052 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4053 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4054 
   4055 #if 0
   4056 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
   4057 #endif
   4058 
   4059 	/* Kick TX ring. */
   4060 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4061 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4062 
   4063 	/* Mark TX ring as full if we reach a certain threshold. */
   4064 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4065 		sc->qfullmsk |= 1 << ring->qid;
   4066 	}
   4067 
   4068 	return 0;
   4069 }
   4070 
   4071 #if 0
   4072 /* not necessary? */
   4073 static int
   4074 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4075 {
   4076 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4077 		.queues_ctl = htole32(tfd_msk),
   4078 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4079 	};
   4080 	int ret;
   4081 
   4082 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
   4083 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
   4084 	    sizeof(flush_cmd), &flush_cmd);
   4085 	if (ret)
   4086 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4087 		    ret);
   4088 	return ret;
   4089 }
   4090 #endif
   4091 
   4092 
   4093 /*
   4094  * BEGIN mvm/power.c
   4095  */
   4096 
   4097 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4098 
   4099 static int
   4100 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4101 	struct iwm_beacon_filter_cmd *cmd)
   4102 {
   4103 	int ret;
   4104 
   4105 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4106 	    IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4107 
   4108 	if (!ret) {
   4109 		DPRINTF(("ba_enable_beacon_abort is: %d\n",
   4110 		    le32toh(cmd->ba_enable_beacon_abort)));
   4111 		DPRINTF(("ba_escape_timer is: %d\n",
   4112 		    le32toh(cmd->ba_escape_timer)));
   4113 		DPRINTF(("bf_debug_flag is: %d\n",
   4114 		    le32toh(cmd->bf_debug_flag)));
   4115 		DPRINTF(("bf_enable_beacon_filter is: %d\n",
   4116 		    le32toh(cmd->bf_enable_beacon_filter)));
   4117 		DPRINTF(("bf_energy_delta is: %d\n",
   4118 		    le32toh(cmd->bf_energy_delta)));
   4119 		DPRINTF(("bf_escape_timer is: %d\n",
   4120 		    le32toh(cmd->bf_escape_timer)));
   4121 		DPRINTF(("bf_roaming_energy_delta is: %d\n",
   4122 		    le32toh(cmd->bf_roaming_energy_delta)));
   4123 		DPRINTF(("bf_roaming_state is: %d\n",
   4124 		    le32toh(cmd->bf_roaming_state)));
   4125 		DPRINTF(("bf_temp_threshold is: %d\n",
   4126 		    le32toh(cmd->bf_temp_threshold)));
   4127 		DPRINTF(("bf_temp_fast_filter is: %d\n",
   4128 		    le32toh(cmd->bf_temp_fast_filter)));
   4129 		DPRINTF(("bf_temp_slow_filter is: %d\n",
   4130 		    le32toh(cmd->bf_temp_slow_filter)));
   4131 	}
   4132 	return ret;
   4133 }
   4134 
   4135 static void
   4136 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
   4137 	struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
   4138 {
   4139 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4140 }
   4141 
   4142 static int
   4143 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
   4144 	int enable)
   4145 {
   4146 	struct iwm_beacon_filter_cmd cmd = {
   4147 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4148 		.bf_enable_beacon_filter = htole32(1),
   4149 		.ba_enable_beacon_abort = htole32(enable),
   4150 	};
   4151 
   4152 	if (!sc->sc_bf.bf_enabled)
   4153 		return 0;
   4154 
   4155 	sc->sc_bf.ba_enabled = enable;
   4156 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4157 	return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4158 }
   4159 
   4160 static void
   4161 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
   4162 {
   4163 	DPRINTF(("Sending power table command on mac id 0x%X for "
   4164 	    "power level %d, flags = 0x%X\n",
   4165 	    cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
   4166 	DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
   4167 
   4168 	if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
   4169 		DPRINTF(("Disable power management\n"));
   4170 		return;
   4171 	}
   4172 	KASSERT(0);
   4173 
   4174 #if 0
   4175 	DPRINTF(mvm, "Rx timeout = %u usec\n",
   4176 			le32_to_cpu(cmd->rx_data_timeout));
   4177 	DPRINTF(mvm, "Tx timeout = %u usec\n",
   4178 			le32_to_cpu(cmd->tx_data_timeout));
   4179 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
   4180 		DPRINTF(mvm, "DTIM periods to skip = %u\n",
   4181 				cmd->skip_dtim_periods);
   4182 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
   4183 		DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
   4184 				cmd->lprx_rssi_threshold);
   4185 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
   4186 		DPRINTF(mvm, "uAPSD enabled\n");
   4187 		DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
   4188 				le32_to_cpu(cmd->rx_data_timeout_uapsd));
   4189 		DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
   4190 				le32_to_cpu(cmd->tx_data_timeout_uapsd));
   4191 		DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
   4192 		DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
   4193 		DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
   4194 	}
   4195 #endif
   4196 }
   4197 
   4198 static void
   4199 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4200 	struct iwm_mac_power_cmd *cmd)
   4201 {
   4202 	struct ieee80211com *ic = &sc->sc_ic;
   4203 	struct ieee80211_node *ni = &in->in_ni;
   4204 	int dtimper, dtimper_msec;
   4205 	int keep_alive;
   4206 
   4207 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4208 	    in->in_color));
   4209 	dtimper = ic->ic_dtim_period ?: 1;
   4210 
   4211 	/*
   4212 	 * Regardless of power management state the driver must set
   4213 	 * keep alive period. FW will use it for sending keep alive NDPs
   4214 	 * immediately after association. Check that keep alive period
   4215 	 * is at least 3 * DTIM
   4216 	 */
   4217 	dtimper_msec = dtimper * ni->ni_intval;
   4218 	keep_alive
   4219 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4220 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4221 	cmd->keep_alive_seconds = htole16(keep_alive);
   4222 }
   4223 
   4224 static int
   4225 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4226 {
   4227 	int ret;
   4228 	int ba_enable;
   4229 	struct iwm_mac_power_cmd cmd;
   4230 
   4231 	memset(&cmd, 0, sizeof(cmd));
   4232 
   4233 	iwm_mvm_power_build_cmd(sc, in, &cmd);
   4234 	iwm_mvm_power_log(sc, &cmd);
   4235 
   4236 	if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
   4237 	    IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
   4238 		return ret;
   4239 
   4240 	ba_enable = !!(cmd.flags &
   4241 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4242 	return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
   4243 }
   4244 
   4245 static int
   4246 iwm_mvm_power_update_device(struct iwm_softc *sc)
   4247 {
   4248 	struct iwm_device_power_cmd cmd = {
   4249 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4250 	};
   4251 
   4252 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4253 		return 0;
   4254 
   4255 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4256 	DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
   4257 
   4258 	return iwm_mvm_send_cmd_pdu(sc,
   4259 	    IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   4260 }
   4261 
   4262 static int
   4263 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4264 {
   4265 	struct iwm_beacon_filter_cmd cmd = {
   4266 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4267 		.bf_enable_beacon_filter = htole32(1),
   4268 	};
   4269 	int ret;
   4270 
   4271 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4272 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4273 
   4274 	if (ret == 0)
   4275 		sc->sc_bf.bf_enabled = 1;
   4276 
   4277 	return ret;
   4278 }
   4279 
   4280 static int
   4281 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4282 {
   4283 	struct iwm_beacon_filter_cmd cmd;
   4284 	int ret;
   4285 
   4286 	memset(&cmd, 0, sizeof(cmd));
   4287 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4288 		return 0;
   4289 
   4290 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4291 	if (ret == 0)
   4292 		sc->sc_bf.bf_enabled = 0;
   4293 
   4294 	return ret;
   4295 }
   4296 
   4297 #if 0
   4298 static int
   4299 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4300 {
   4301 	if (!sc->sc_bf.bf_enabled)
   4302 		return 0;
   4303 
   4304 	return iwm_mvm_enable_beacon_filter(sc, in);
   4305 }
   4306 #endif
   4307 
   4308 /*
   4309  * END mvm/power.c
   4310  */
   4311 
   4312 /*
   4313  * BEGIN mvm/sta.c
   4314  */
   4315 
   4316 static void
   4317 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
   4318 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
   4319 {
   4320 	memset(cmd_v5, 0, sizeof(*cmd_v5));
   4321 
   4322 	cmd_v5->add_modify = cmd_v6->add_modify;
   4323 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
   4324 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
   4325 	memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
   4326 	cmd_v5->sta_id = cmd_v6->sta_id;
   4327 	cmd_v5->modify_mask = cmd_v6->modify_mask;
   4328 	cmd_v5->station_flags = cmd_v6->station_flags;
   4329 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
   4330 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
   4331 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
   4332 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
   4333 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
   4334 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
   4335 	cmd_v5->assoc_id = cmd_v6->assoc_id;
   4336 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
   4337 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
   4338 }
   4339 
   4340 static int
   4341 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
   4342 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
   4343 {
   4344 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
   4345 
   4346 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
   4347 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
   4348 		    sizeof(*cmd), cmd, status);
   4349 	}
   4350 
   4351 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
   4352 
   4353 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
   4354 	    &cmd_v5, status);
   4355 }
   4356 
   4357 /* send station add/update command to firmware */
   4358 static int
   4359 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
   4360 {
   4361 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
   4362 	int ret;
   4363 	uint32_t status;
   4364 
   4365 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4366 
   4367 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4368 	add_sta_cmd.mac_id_n_color
   4369 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4370 	if (!update) {
   4371 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
   4372 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4373 	}
   4374 	add_sta_cmd.add_modify = update ? 1 : 0;
   4375 	add_sta_cmd.station_flags_msk
   4376 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4377 
   4378 	status = IWM_ADD_STA_SUCCESS;
   4379 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
   4380 	if (ret)
   4381 		return ret;
   4382 
   4383 	switch (status) {
   4384 	case IWM_ADD_STA_SUCCESS:
   4385 		break;
   4386 	default:
   4387 		ret = EIO;
   4388 		DPRINTF(("IWM_ADD_STA failed\n"));
   4389 		break;
   4390 	}
   4391 
   4392 	return ret;
   4393 }
   4394 
   4395 static int
   4396 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
   4397 {
   4398 	int ret;
   4399 
   4400 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
   4401 	if (ret)
   4402 		return ret;
   4403 
   4404 	return 0;
   4405 }
   4406 
   4407 static int
   4408 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
   4409 {
   4410 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
   4411 }
   4412 
   4413 static int
   4414 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
   4415 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
   4416 {
   4417 	struct iwm_mvm_add_sta_cmd_v6 cmd;
   4418 	int ret;
   4419 	uint32_t status;
   4420 
   4421 	memset(&cmd, 0, sizeof(cmd));
   4422 	cmd.sta_id = sta->sta_id;
   4423 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
   4424 
   4425 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
   4426 
   4427 	if (addr)
   4428 		memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
   4429 
   4430 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
   4431 	if (ret)
   4432 		return ret;
   4433 
   4434 	switch (status) {
   4435 	case IWM_ADD_STA_SUCCESS:
   4436 		DPRINTF(("Internal station added.\n"));
   4437 		return 0;
   4438 	default:
   4439 		DPRINTF(("%s: Add internal station failed, status=0x%x\n",
   4440 		    DEVNAME(sc), status));
   4441 		ret = EIO;
   4442 		break;
   4443 	}
   4444 	return ret;
   4445 }
   4446 
   4447 static int
   4448 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
   4449 {
   4450 	int ret;
   4451 
   4452 	sc->sc_aux_sta.sta_id = 3;
   4453 	sc->sc_aux_sta.tfd_queue_msk = 0;
   4454 
   4455 	ret = iwm_mvm_add_int_sta_common(sc,
   4456 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
   4457 
   4458 	if (ret)
   4459 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
   4460 	return ret;
   4461 }
   4462 
   4463 /*
   4464  * END mvm/sta.c
   4465  */
   4466 
   4467 /*
   4468  * BEGIN mvm/scan.c
   4469  */
   4470 
   4471 #define IWM_PLCP_QUIET_THRESH 1
   4472 #define IWM_ACTIVE_QUIET_TIME 10
   4473 #define LONG_OUT_TIME_PERIOD 600
   4474 #define SHORT_OUT_TIME_PERIOD 200
   4475 #define SUSPEND_TIME_PERIOD 100
   4476 
   4477 static uint16_t
   4478 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
   4479 {
   4480 	uint16_t rx_chain;
   4481 	uint8_t rx_ant;
   4482 
   4483 	rx_ant = IWM_FW_VALID_RX_ANT(sc);
   4484 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4485 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4486 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4487 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4488 	return htole16(rx_chain);
   4489 }
   4490 
   4491 #define ieee80211_tu_to_usec(a) (1024*(a))
   4492 
   4493 static uint32_t
   4494 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
   4495 {
   4496 	if (!is_assoc)
   4497 		return 0;
   4498 	if (flags & 0x1)
   4499 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
   4500 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
   4501 }
   4502 
   4503 static uint32_t
   4504 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
   4505 {
   4506 	if (!is_assoc)
   4507 		return 0;
   4508 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
   4509 }
   4510 
   4511 static uint32_t
   4512 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
   4513 {
   4514 	if (flags & IEEE80211_CHAN_2GHZ)
   4515 		return htole32(IWM_PHY_BAND_24);
   4516 	else
   4517 		return htole32(IWM_PHY_BAND_5);
   4518 }
   4519 
   4520 static uint32_t
   4521 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4522 {
   4523 	uint32_t tx_ant;
   4524 	int i, ind;
   4525 
   4526 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4527 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4528 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4529 		if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
   4530 			sc->sc_scan_last_antenna = ind;
   4531 			break;
   4532 		}
   4533 	}
   4534 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4535 
   4536 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4537 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4538 				   tx_ant);
   4539 	else
   4540 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4541 }
   4542 
   4543 /*
   4544  * If req->n_ssids > 0, it means we should do an active scan.
   4545  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4546  * just to notify that this scan is active and not passive.
   4547  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4548  * the zero-length one), we need to set the corresponding bits in chan->type,
   4549  * one for each SSID, and set the active bit (first). If the first SSID is
   4550  * already included in the probe template, so we need to set only
   4551  * req->n_ssids - 1 bits in addition to the first bit.
   4552  */
   4553 static uint16_t
   4554 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4555 {
   4556 	if (flags & IEEE80211_CHAN_2GHZ)
   4557 		return 30  + 3 * (n_ssids + 1);
   4558 	return 20  + 2 * (n_ssids + 1);
   4559 }
   4560 
   4561 static uint16_t
   4562 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4563 {
   4564 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4565 }
   4566 
   4567 static int
   4568 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
   4569 	int flags, int n_ssids, int basic_ssid)
   4570 {
   4571 	struct ieee80211com *ic = &sc->sc_ic;
   4572 	uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
   4573 	uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
   4574 	struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
   4575 		(cmd->data + le16toh(cmd->tx_cmd.len));
   4576 	int type = (1 << n_ssids) - 1;
   4577 	struct ieee80211_channel *c;
   4578 	int nchan;
   4579 
   4580 	if (!basic_ssid)
   4581 		type |= (1 << n_ssids);
   4582 
   4583 	for (nchan = 0, c = &ic->ic_channels[1];
   4584 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
   4585 	    c++) {
   4586 		if ((c->ic_flags & flags) != flags)
   4587 			continue;
   4588 
   4589 		chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
   4590 		chan->type = htole32(type);
   4591 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
   4592 			chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
   4593 		chan->active_dwell = htole16(active_dwell);
   4594 		chan->passive_dwell = htole16(passive_dwell);
   4595 		chan->iteration_count = htole16(1);
   4596 		chan++;
   4597 		nchan++;
   4598 	}
   4599 	if (nchan == 0)
   4600 		DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
   4601 	return nchan;
   4602 }
   4603 
   4604 /*
   4605  * Fill in probe request with the following parameters:
   4606  * TA is our vif HW address, which mac80211 ensures we have.
   4607  * Packet is broadcasted, so this is both SA and DA.
   4608  * The probe request IE is made out of two: first comes the most prioritized
   4609  * SSID if a directed scan is requested. Second comes whatever extra
   4610  * information was given to us as the scan request IE.
   4611  */
   4612 static uint16_t
   4613 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
   4614 	const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
   4615 	const uint8_t *ie, int ie_len, int left)
   4616 {
   4617 	int len = 0;
   4618 	uint8_t *pos = NULL;
   4619 
   4620 	/* Make sure there is enough space for the probe request,
   4621 	 * two mandatory IEs and the data */
   4622 	left -= sizeof(*frame);
   4623 	if (left < 0)
   4624 		return 0;
   4625 
   4626 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4627 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4628 	frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4629 	IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
   4630 	memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
   4631 	IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
   4632 
   4633 	len += sizeof(*frame);
   4634 	CTASSERT(sizeof(*frame) == 24);
   4635 
   4636 	/* for passive scans, no need to fill anything */
   4637 	if (n_ssids == 0)
   4638 		return (uint16_t)len;
   4639 
   4640 	/* points to the payload of the request */
   4641 	pos = (uint8_t *)frame + sizeof(*frame);
   4642 
   4643 	/* fill in our SSID IE */
   4644 	left -= ssid_len + 2;
   4645 	if (left < 0)
   4646 		return 0;
   4647 	*pos++ = IEEE80211_ELEMID_SSID;
   4648 	*pos++ = ssid_len;
   4649 	if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
   4650 		memcpy(pos, ssid, ssid_len);
   4651 		pos += ssid_len;
   4652 	}
   4653 
   4654 	len += ssid_len + 2;
   4655 
   4656 	if (left < ie_len)
   4657 		return len;
   4658 
   4659 	if (ie && ie_len) {
   4660 		memcpy(pos, ie, ie_len);
   4661 		len += ie_len;
   4662 	}
   4663 
   4664 	return (uint16_t)len;
   4665 }
   4666 
   4667 static int
   4668 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
   4669 	int n_ssids, uint8_t *ssid, int ssid_len)
   4670 {
   4671 	struct ieee80211com *ic = &sc->sc_ic;
   4672 	struct iwm_host_cmd hcmd = {
   4673 		.id = IWM_SCAN_REQUEST_CMD,
   4674 		.len = { 0, },
   4675 		.data = { sc->sc_scan_cmd, },
   4676 		.flags = IWM_CMD_SYNC,
   4677 		.dataflags = { IWM_HCMD_DFL_NOCOPY, },
   4678 	};
   4679 	struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
   4680 	int is_assoc = 0;
   4681 	int ret;
   4682 	uint32_t status;
   4683 	int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
   4684 
   4685 	//lockdep_assert_held(&mvm->mutex);
   4686 
   4687 	sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
   4688 
   4689 	DPRINTF(("Handling ieee80211 scan request\n"));
   4690 	memset(cmd, 0, sc->sc_scan_cmd_len);
   4691 
   4692 	cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
   4693 	cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
   4694 	cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
   4695 	cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
   4696 	cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
   4697 	cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
   4698 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
   4699 	    IWM_MAC_FILTER_IN_BEACON);
   4700 
   4701 	cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
   4702 	cmd->repeats = htole32(1);
   4703 
   4704 	/*
   4705 	 * If the user asked for passive scan, don't change to active scan if
   4706 	 * you see any activity on the channel - remain passive.
   4707 	 */
   4708 	if (n_ssids > 0) {
   4709 		cmd->passive2active = htole16(1);
   4710 		cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4711 #if 0
   4712 		if (basic_ssid) {
   4713 			ssid = req->ssids[0].ssid;
   4714 			ssid_len = req->ssids[0].ssid_len;
   4715 		}
   4716 #endif
   4717 	} else {
   4718 		cmd->passive2active = 0;
   4719 		cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4720 	}
   4721 
   4722 	cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4723 	    IWM_TX_CMD_FLG_BT_DIS);
   4724 	cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
   4725 	cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4726 	cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
   4727 
   4728 	cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
   4729 			    (struct ieee80211_frame *)cmd->data,
   4730 			    ic->ic_myaddr, n_ssids, ssid, ssid_len,
   4731 			    NULL, 0, sc->sc_capa_max_probe_len));
   4732 
   4733 	cmd->channel_count
   4734 	    = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
   4735 
   4736 	cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
   4737 		le16toh(cmd->tx_cmd.len) +
   4738 		(cmd->channel_count * sizeof(struct iwm_scan_channel)));
   4739 	hcmd.len[0] = le16toh(cmd->len);
   4740 
   4741 	status = IWM_SCAN_RESPONSE_OK;
   4742 	ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
   4743 	if (!ret && status == IWM_SCAN_RESPONSE_OK) {
   4744 		DPRINTF(("Scan request was sent successfully\n"));
   4745 	} else {
   4746 		/*
   4747 		 * If the scan failed, it usually means that the FW was unable
   4748 		 * to allocate the time events. Warn on it, but maybe we
   4749 		 * should try to send the command again with different params.
   4750 		 */
   4751 		sc->sc_scanband = 0;
   4752 		ret = EIO;
   4753 	}
   4754 	return ret;
   4755 }
   4756 
   4757 /*
   4758  * END mvm/scan.c
   4759  */
   4760 
   4761 /*
   4762  * BEGIN mvm/mac-ctxt.c
   4763  */
   4764 
   4765 static void
   4766 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
   4767 	int *cck_rates, int *ofdm_rates)
   4768 {
   4769 	int lowest_present_ofdm = 100;
   4770 	int lowest_present_cck = 100;
   4771 	uint8_t cck = 0;
   4772 	uint8_t ofdm = 0;
   4773 	int i;
   4774 
   4775 	for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
   4776 		cck |= (1 << i);
   4777 		if (lowest_present_cck > i)
   4778 			lowest_present_cck = i;
   4779 	}
   4780 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   4781 		int adj = i - IWM_FIRST_OFDM_RATE;
   4782 		ofdm |= (1 << adj);
   4783 		if (lowest_present_ofdm > i)
   4784 			lowest_present_ofdm = i;
   4785 	}
   4786 
   4787 	/*
   4788 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   4789 	 * variables. This isn't sufficient though, as there might not
   4790 	 * be all the right rates in the bitmap. E.g. if the only basic
   4791 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   4792 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   4793 	 *
   4794 	 *    [...] a STA responding to a received frame shall transmit
   4795 	 *    its Control Response frame [...] at the highest rate in the
   4796 	 *    BSSBasicRateSet parameter that is less than or equal to the
   4797 	 *    rate of the immediately previous frame in the frame exchange
   4798 	 *    sequence ([...]) and that is of the same modulation class
   4799 	 *    ([...]) as the received frame. If no rate contained in the
   4800 	 *    BSSBasicRateSet parameter meets these conditions, then the
   4801 	 *    control frame sent in response to a received frame shall be
   4802 	 *    transmitted at the highest mandatory rate of the PHY that is
   4803 	 *    less than or equal to the rate of the received frame, and
   4804 	 *    that is of the same modulation class as the received frame.
   4805 	 *
   4806 	 * As a consequence, we need to add all mandatory rates that are
   4807 	 * lower than all of the basic rates to these bitmaps.
   4808 	 */
   4809 
   4810 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   4811 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   4812 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   4813 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   4814 	/* 6M already there or needed so always add */
   4815 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   4816 
   4817 	/*
   4818 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   4819 	 * Note, however:
   4820 	 *  - if no CCK rates are basic, it must be ERP since there must
   4821 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   4822 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   4823 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   4824 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   4825 	 *  - if 2M is basic, 1M is mandatory
   4826 	 *  - if 1M is basic, that's the only valid ACK rate.
   4827 	 * As a consequence, it's not as complicated as it sounds, just add
   4828 	 * any lower rates to the ACK rate bitmap.
   4829 	 */
   4830 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   4831 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   4832 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   4833 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   4834 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   4835 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   4836 	/* 1M already there or needed so always add */
   4837 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   4838 
   4839 	*cck_rates = cck;
   4840 	*ofdm_rates = ofdm;
   4841 }
   4842 
   4843 static void
   4844 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   4845 	struct iwm_mac_ctx_cmd *cmd, uint32_t action)
   4846 {
   4847 	struct ieee80211com *ic = &sc->sc_ic;
   4848 	struct ieee80211_node *ni = ic->ic_bss;
   4849 	int cck_ack_rates, ofdm_ack_rates;
   4850 	int i;
   4851 
   4852 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4853 	    in->in_color));
   4854 	cmd->action = htole32(action);
   4855 
   4856 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   4857 	cmd->tsf_id = htole32(in->in_tsfid);
   4858 
   4859 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   4860 	if (in->in_assoc) {
   4861 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   4862 	} else {
   4863 		memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
   4864 	}
   4865 	iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   4866 	cmd->cck_rates = htole32(cck_ack_rates);
   4867 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   4868 
   4869 	cmd->cck_short_preamble
   4870 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   4871 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   4872 	cmd->short_slot
   4873 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   4874 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   4875 
   4876 	for (i = 0; i < IWM_AC_NUM+1; i++) {
   4877 		int txf = i;
   4878 
   4879 		cmd->ac[txf].cw_min = htole16(0x0f);
   4880 		cmd->ac[txf].cw_max = htole16(0x3f);
   4881 		cmd->ac[txf].aifsn = 1;
   4882 		cmd->ac[txf].fifos_mask = (1 << txf);
   4883 		cmd->ac[txf].edca_txop = 0;
   4884 	}
   4885 
   4886 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   4887 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   4888 
   4889 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   4890 }
   4891 
   4892 static int
   4893 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
   4894 {
   4895 	int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
   4896 				       sizeof(*cmd), cmd);
   4897 	if (ret)
   4898 		DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
   4899 		    DEVNAME(sc), le32toh(cmd->action), ret));
   4900 	return ret;
   4901 }
   4902 
   4903 /*
   4904  * Fill the specific data for mac context of type station or p2p client
   4905  */
   4906 static void
   4907 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   4908 	struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
   4909 {
   4910 	struct ieee80211_node *ni = &in->in_ni;
   4911 	unsigned dtim_period, dtim_count;
   4912 
   4913 	dtim_period = ni->ni_dtim_period;
   4914 	dtim_count = ni->ni_dtim_count;
   4915 
   4916 	/* We need the dtim_period to set the MAC as associated */
   4917 	if (in->in_assoc && dtim_period && !force_assoc_off) {
   4918 		uint64_t tsf;
   4919 		uint32_t dtim_offs;
   4920 
   4921 		/*
   4922 		 * The DTIM count counts down, so when it is N that means N
   4923 		 * more beacon intervals happen until the DTIM TBTT. Therefore
   4924 		 * add this to the current time. If that ends up being in the
   4925 		 * future, the firmware will handle it.
   4926 		 *
   4927 		 * Also note that the system_timestamp (which we get here as
   4928 		 * "sync_device_ts") and TSF timestamp aren't at exactly the
   4929 		 * same offset in the frame -- the TSF is at the first symbol
   4930 		 * of the TSF, the system timestamp is at signal acquisition
   4931 		 * time. This means there's an offset between them of at most
   4932 		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
   4933 		 * 384us in the longest case), this is currently not relevant
   4934 		 * as the firmware wakes up around 2ms before the TBTT.
   4935 		 */
   4936 		dtim_offs = dtim_count * ni->ni_intval;
   4937 		/* convert TU to usecs */
   4938 		dtim_offs *= 1024;
   4939 
   4940 		tsf = ni->ni_tstamp.tsf;
   4941 
   4942 		ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
   4943 		ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
   4944 
   4945 		DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
   4946 		    (long long)le64toh(ctxt_sta->dtim_tsf),
   4947 		    le32toh(ctxt_sta->dtim_time), dtim_offs));
   4948 
   4949 		ctxt_sta->is_assoc = htole32(1);
   4950 	} else {
   4951 		ctxt_sta->is_assoc = htole32(0);
   4952 	}
   4953 
   4954 	ctxt_sta->bi = htole32(ni->ni_intval);
   4955 	ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
   4956 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
   4957 	ctxt_sta->dtim_reciprocal =
   4958 	    htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
   4959 
   4960 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
   4961 	ctxt_sta->listen_interval = htole32(10);
   4962 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
   4963 }
   4964 
   4965 static int
   4966 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
   4967 	uint32_t action)
   4968 {
   4969 	struct iwm_mac_ctx_cmd cmd;
   4970 
   4971 	memset(&cmd, 0, sizeof(cmd));
   4972 
   4973 	/* Fill the common data for all mac context types */
   4974 	iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
   4975 
   4976 	if (in->in_assoc)
   4977 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   4978 	else
   4979 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
   4980 
   4981 	/* Fill the data specific for station mode */
   4982 	iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
   4983 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
   4984 
   4985 	return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
   4986 }
   4987 
   4988 static int
   4989 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   4990 {
   4991 	return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
   4992 }
   4993 
   4994 static int
   4995 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
   4996 {
   4997 	int ret;
   4998 
   4999 	ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
   5000 	if (ret)
   5001 		return ret;
   5002 
   5003 	return 0;
   5004 }
   5005 
   5006 static int
   5007 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
   5008 {
   5009 	return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
   5010 }
   5011 
   5012 #if 0
   5013 static int
   5014 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
   5015 {
   5016 	struct iwm_mac_ctx_cmd cmd;
   5017 	int ret;
   5018 
   5019 	if (!in->in_uploaded) {
   5020 		print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
   5021 		return EIO;
   5022 	}
   5023 
   5024 	memset(&cmd, 0, sizeof(cmd));
   5025 
   5026 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5027 	    in->in_color));
   5028 	cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
   5029 
   5030 	ret = iwm_mvm_send_cmd_pdu(sc,
   5031 	    IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   5032 	if (ret) {
   5033 		aprint_error_dev(sc->sc_dev,
   5034 		    "Failed to remove MAC context: %d\n", ret);
   5035 		return ret;
   5036 	}
   5037 	in->in_uploaded = 0;
   5038 
   5039 	return 0;
   5040 }
   5041 #endif
   5042 
   5043 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
   5044 
   5045 static void
   5046 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5047 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5048 {
   5049 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5050 
   5051 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5052 	    le32toh(mb->mac_id),
   5053 	    le32toh(mb->consec_missed_beacons),
   5054 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5055 	    le32toh(mb->num_recvd_beacons),
   5056 	    le32toh(mb->num_expected_beacons)));
   5057 
   5058 	/*
   5059 	 * TODO: the threshold should be adjusted based on latency conditions,
   5060 	 * and/or in case of a CS flow on one of the other AP vifs.
   5061 	 */
   5062 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5063 	    IWM_MVM_MISSED_BEACONS_THRESHOLD)
   5064 		ieee80211_beacon_miss(&sc->sc_ic);
   5065 }
   5066 
   5067 /*
   5068  * END mvm/mac-ctxt.c
   5069  */
   5070 
   5071 /*
   5072  * BEGIN mvm/quota.c
   5073  */
   5074 
   5075 static int
   5076 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5077 {
   5078 	struct iwm_time_quota_cmd cmd;
   5079 	int i, idx, ret, num_active_macs, quota, quota_rem;
   5080 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5081 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5082 	uint16_t id;
   5083 
   5084 	memset(&cmd, 0, sizeof(cmd));
   5085 
   5086 	/* currently, PHY ID == binding ID */
   5087 	if (in) {
   5088 		id = in->in_phyctxt->id;
   5089 		KASSERT(id < IWM_MAX_BINDINGS);
   5090 		colors[id] = in->in_phyctxt->color;
   5091 
   5092 		if (1)
   5093 			n_ifs[id] = 1;
   5094 	}
   5095 
   5096 	/*
   5097 	 * The FW's scheduling session consists of
   5098 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
   5099 	 * equally between all the bindings that require quota
   5100 	 */
   5101 	num_active_macs = 0;
   5102 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5103 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5104 		num_active_macs += n_ifs[i];
   5105 	}
   5106 
   5107 	quota = 0;
   5108 	quota_rem = 0;
   5109 	if (num_active_macs) {
   5110 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
   5111 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
   5112 	}
   5113 
   5114 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5115 		if (colors[i] < 0)
   5116 			continue;
   5117 
   5118 		cmd.quotas[idx].id_and_color =
   5119 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5120 
   5121 		if (n_ifs[i] <= 0) {
   5122 			cmd.quotas[idx].quota = htole32(0);
   5123 			cmd.quotas[idx].max_duration = htole32(0);
   5124 		} else {
   5125 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5126 			cmd.quotas[idx].max_duration = htole32(0);
   5127 		}
   5128 		idx++;
   5129 	}
   5130 
   5131 	/* Give the remainder of the session to the first binding */
   5132 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5133 
   5134 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
   5135 	    sizeof(cmd), &cmd);
   5136 	if (ret)
   5137 		DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
   5138 	return ret;
   5139 }
   5140 
   5141 /*
   5142  * END mvm/quota.c
   5143  */
   5144 
   5145 /*
   5146  * aieee80211 routines
   5147  */
   5148 
   5149 /*
   5150  * Change to AUTH state in 80211 state machine.  Roughly matches what
   5151  * Linux does in bss_info_changed().
   5152  */
   5153 static int
   5154 iwm_auth(struct iwm_softc *sc)
   5155 {
   5156 	struct ieee80211com *ic = &sc->sc_ic;
   5157 	struct iwm_node *in = (void *)ic->ic_bss;
   5158 	uint32_t duration;
   5159 	uint32_t min_duration;
   5160 	int error;
   5161 
   5162 	in->in_assoc = 0;
   5163 
   5164 	if ((error = iwm_allow_mcast(sc)) != 0)
   5165 		return error;
   5166 
   5167 	if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
   5168 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5169 		return error;
   5170 	}
   5171 
   5172 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
   5173 	    in->in_ni.ni_chan, 1, 1)) != 0) {
   5174 		DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
   5175 		return error;
   5176 	}
   5177 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5178 
   5179 	if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
   5180 		DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
   5181 		return error;
   5182 	}
   5183 
   5184 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
   5185 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5186 		return error;
   5187 	}
   5188 
   5189 	/* a bit superfluous? */
   5190 	while (sc->sc_auth_prot)
   5191 		tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
   5192 	sc->sc_auth_prot = 1;
   5193 
   5194 	duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
   5195 	    200 + in->in_ni.ni_intval);
   5196 	min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
   5197 	    100 + in->in_ni.ni_intval);
   5198 	iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
   5199 
   5200 	while (sc->sc_auth_prot != 2) {
   5201 		/*
   5202 		 * well, meh, but if the kernel is sleeping for half a
   5203 		 * second, we have bigger problems
   5204 		 */
   5205 		if (sc->sc_auth_prot == 0) {
   5206 			DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
   5207 			return ETIMEDOUT;
   5208 		} else if (sc->sc_auth_prot == -1) {
   5209 			DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
   5210 			sc->sc_auth_prot = 0;
   5211 			return EAUTH;
   5212 		}
   5213 		tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
   5214 	}
   5215 
   5216 	return 0;
   5217 }
   5218 
   5219 static int
   5220 iwm_assoc(struct iwm_softc *sc)
   5221 {
   5222 	struct ieee80211com *ic = &sc->sc_ic;
   5223 	struct iwm_node *in = (void *)ic->ic_bss;
   5224 	int error;
   5225 
   5226 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
   5227 		DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
   5228 		return error;
   5229 	}
   5230 
   5231 	in->in_assoc = 1;
   5232 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5233 		DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
   5234 		return error;
   5235 	}
   5236 
   5237 	return 0;
   5238 }
   5239 
   5240 static int
   5241 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
   5242 {
   5243 	/*
   5244 	 * Ok, so *technically* the proper set of calls for going
   5245 	 * from RUN back to SCAN is:
   5246 	 *
   5247 	 * iwm_mvm_power_mac_disable(sc, in);
   5248 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5249 	 * iwm_mvm_rm_sta(sc, in);
   5250 	 * iwm_mvm_update_quotas(sc, NULL);
   5251 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5252 	 * iwm_mvm_binding_remove_vif(sc, in);
   5253 	 * iwm_mvm_mac_ctxt_remove(sc, in);
   5254 	 *
   5255 	 * However, that freezes the device not matter which permutations
   5256 	 * and modifications are attempted.  Obviously, this driver is missing
   5257 	 * something since it works in the Linux driver, but figuring out what
   5258 	 * is missing is a little more complicated.  Now, since we're going
   5259 	 * back to nothing anyway, we'll just do a complete device reset.
   5260 	 * Up your's, device!
   5261 	 */
   5262 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
   5263 	iwm_stop_device(sc);
   5264 	iwm_init_hw(sc);
   5265 	if (in)
   5266 		in->in_assoc = 0;
   5267 	return 0;
   5268 
   5269 #if 0
   5270 	int error;
   5271 
   5272 	iwm_mvm_power_mac_disable(sc, in);
   5273 
   5274 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5275 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
   5276 		    error);
   5277 		return error;
   5278 	}
   5279 
   5280 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
   5281 		aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
   5282 		return error;
   5283 	}
   5284 	error = iwm_mvm_rm_sta(sc, in);
   5285 	in->in_assoc = 0;
   5286 	iwm_mvm_update_quotas(sc, NULL);
   5287 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5288 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
   5289 		    error);
   5290 		return error;
   5291 	}
   5292 	iwm_mvm_binding_remove_vif(sc, in);
   5293 
   5294 	iwm_mvm_mac_ctxt_remove(sc, in);
   5295 
   5296 	return error;
   5297 #endif
   5298 }
   5299 
   5300 
   5301 static struct ieee80211_node *
   5302 iwm_node_alloc(struct ieee80211_node_table *nt)
   5303 {
   5304 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   5305 }
   5306 
   5307 static void
   5308 iwm_calib_timeout(void *arg)
   5309 {
   5310 	struct iwm_softc *sc = arg;
   5311 	struct ieee80211com *ic = &sc->sc_ic;
   5312 	int s;
   5313 
   5314 	s = splnet();
   5315 	if (ic->ic_fixed_rate == -1
   5316 	    && ic->ic_opmode == IEEE80211_M_STA
   5317 	    && ic->ic_bss) {
   5318 		struct iwm_node *in = (void *)ic->ic_bss;
   5319 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5320 	}
   5321 	splx(s);
   5322 
   5323 	callout_schedule(&sc->sc_calib_to, hz/2);
   5324 }
   5325 
   5326 static void
   5327 iwm_setrates(struct iwm_node *in)
   5328 {
   5329 	struct ieee80211_node *ni = &in->in_ni;
   5330 	struct ieee80211com *ic = ni->ni_ic;
   5331 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5332 	struct iwm_lq_cmd *lq = &in->in_lq;
   5333 	int nrates = ni->ni_rates.rs_nrates;
   5334 	int i, ridx, tab = 0;
   5335 	int txant = 0;
   5336 
   5337 	if (nrates > __arraycount(lq->rs_table) ||
   5338 	    nrates > IEEE80211_RATE_MAXSIZE) {
   5339 		DPRINTF(("%s: node supports %d rates, driver handles only "
   5340 		    "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
   5341 		return;
   5342 	}
   5343 
   5344 	/* first figure out which rates we should support */
   5345 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
   5346 	for (i = 0; i < nrates; i++) {
   5347 		int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
   5348 
   5349 		/* Map 802.11 rate to HW rate index. */
   5350 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5351 			if (iwm_rates[ridx].rate == rate)
   5352 				break;
   5353 		if (ridx > IWM_RIDX_MAX)
   5354 			DPRINTF(("%s: WARNING: device rate for %d not found!\n",
   5355 			    DEVNAME(sc), rate));
   5356 		else
   5357 			in->in_ridx[i] = ridx;
   5358 	}
   5359 
   5360 	/* then construct a lq_cmd based on those */
   5361 	memset(lq, 0, sizeof(*lq));
   5362 	lq->sta_id = IWM_STATION_ID;
   5363 
   5364 	/*
   5365 	 * are these used? (we don't do SISO or MIMO)
   5366 	 * need to set them to non-zero, though, or we get an error.
   5367 	 */
   5368 	lq->single_stream_ant_msk = 1;
   5369 	lq->dual_stream_ant_msk = 1;
   5370 
   5371 	/*
   5372 	 * Build the actual rate selection table.
   5373 	 * The lowest bits are the rates.  Additionally,
   5374 	 * CCK needs bit 9 to be set.  The rest of the bits
   5375 	 * we add to the table select the tx antenna
   5376 	 * Note that we add the rates in the highest rate first
   5377 	 * (opposite of ni_rates).
   5378 	 */
   5379 	for (i = 0; i < nrates; i++) {
   5380 		int nextant;
   5381 
   5382 		if (txant == 0)
   5383 			txant = IWM_FW_VALID_TX_ANT(sc);
   5384 		nextant = 1<<(ffs(txant)-1);
   5385 		txant &= ~nextant;
   5386 
   5387 		ridx = in->in_ridx[(nrates-1)-i];
   5388 		tab = iwm_rates[ridx].plcp;
   5389 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
   5390 		if (IWM_RIDX_IS_CCK(ridx))
   5391 			tab |= IWM_RATE_MCS_CCK_MSK;
   5392 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5393 		lq->rs_table[i] = htole32(tab);
   5394 	}
   5395 	/* then fill the rest with the lowest possible rate */
   5396 	for (i = nrates; i < __arraycount(lq->rs_table); i++) {
   5397 		KASSERT(tab != 0);
   5398 		lq->rs_table[i] = htole32(tab);
   5399 	}
   5400 
   5401 	/* init amrr */
   5402 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5403 	/* Start at lowest available bit-rate, AMRR will raise. */
   5404 	ni->ni_txrate = 0;
   5405 }
   5406 
   5407 static int
   5408 iwm_media_change(struct ifnet *ifp)
   5409 {
   5410 	struct iwm_softc *sc = ifp->if_softc;
   5411 	struct ieee80211com *ic = &sc->sc_ic;
   5412 	uint8_t rate, ridx;
   5413 	int error;
   5414 
   5415 	error = ieee80211_media_change(ifp);
   5416 	if (error != ENETRESET)
   5417 		return error;
   5418 
   5419 	if (ic->ic_fixed_rate != -1) {
   5420 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5421 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5422 		/* Map 802.11 rate to HW rate index. */
   5423 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5424 			if (iwm_rates[ridx].rate == rate)
   5425 				break;
   5426 		sc->sc_fixed_ridx = ridx;
   5427 	}
   5428 
   5429 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5430 	    (IFF_UP | IFF_RUNNING)) {
   5431 		iwm_stop(ifp, 0);
   5432 		error = iwm_init(ifp);
   5433 	}
   5434 	return error;
   5435 }
   5436 
   5437 static void
   5438 iwm_newstate_cb(struct work *wk, void *v)
   5439 {
   5440 	struct iwm_softc *sc = v;
   5441 	struct ieee80211com *ic = &sc->sc_ic;
   5442 	struct iwm_newstate_state *iwmns = (void *)wk;
   5443 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5444 	int generation = iwmns->ns_generation;
   5445 	struct iwm_node *in;
   5446 	int arg = iwmns->ns_arg;
   5447 	int error;
   5448 
   5449 	kmem_free(iwmns, sizeof(*iwmns));
   5450 
   5451 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
   5452 	if (sc->sc_generation != generation) {
   5453 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5454 		if (nstate == IEEE80211_S_INIT) {
   5455 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5456 			sc->sc_newstate(ic, nstate, arg);
   5457 		}
   5458 		return;
   5459 	}
   5460 
   5461 	DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
   5462 
   5463 	/* disable beacon filtering if we're hopping out of RUN */
   5464 	if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
   5465 		iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
   5466 
   5467 		if (((in = (void *)ic->ic_bss) != NULL))
   5468 			in->in_assoc = 0;
   5469 		iwm_release(sc, NULL);
   5470 
   5471 		/*
   5472 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
   5473 		 * above then the card will be completely reinitialized,
   5474 		 * so the driver must do everything necessary to bring the card
   5475 		 * from INIT to SCAN.
   5476 		 *
   5477 		 * Additionally, upon receiving deauth frame from AP,
   5478 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
   5479 		 * state. This will also fail with this driver, so bring the FSM
   5480 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
   5481 		 */
   5482 		if (nstate == IEEE80211_S_SCAN ||
   5483 		    nstate == IEEE80211_S_AUTH ||
   5484 		    nstate == IEEE80211_S_ASSOC) {
   5485 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5486 			sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
   5487 			DPRINTF(("Going INIT->SCAN\n"));
   5488 			nstate = IEEE80211_S_SCAN;
   5489 		}
   5490 	}
   5491 
   5492 	switch (nstate) {
   5493 	case IEEE80211_S_INIT:
   5494 		sc->sc_scanband = 0;
   5495 		break;
   5496 
   5497 	case IEEE80211_S_SCAN:
   5498 		if (sc->sc_scanband)
   5499 			break;
   5500 
   5501 		if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
   5502 		    ic->ic_des_esslen != 0,
   5503 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5504 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5505 			return;
   5506 		}
   5507 		ic->ic_state = nstate;
   5508 		return;
   5509 
   5510 	case IEEE80211_S_AUTH:
   5511 		if ((error = iwm_auth(sc)) != 0) {
   5512 			DPRINTF(("%s: could not move to auth state: %d\n",
   5513 			    DEVNAME(sc), error));
   5514 			return;
   5515 		}
   5516 
   5517 		break;
   5518 
   5519 	case IEEE80211_S_ASSOC:
   5520 		if ((error = iwm_assoc(sc)) != 0) {
   5521 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5522 			    error));
   5523 			return;
   5524 		}
   5525 		break;
   5526 
   5527 	case IEEE80211_S_RUN: {
   5528 		struct iwm_host_cmd cmd = {
   5529 			.id = IWM_LQ_CMD,
   5530 			.len = { sizeof(in->in_lq), },
   5531 			.flags = IWM_CMD_SYNC,
   5532 		};
   5533 
   5534 		in = (struct iwm_node *)ic->ic_bss;
   5535 		iwm_mvm_power_mac_update_mode(sc, in);
   5536 		iwm_mvm_enable_beacon_filter(sc, in);
   5537 		iwm_mvm_update_quotas(sc, in);
   5538 		iwm_setrates(in);
   5539 
   5540 		cmd.data[0] = &in->in_lq;
   5541 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
   5542 			DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
   5543 		}
   5544 
   5545 		callout_schedule(&sc->sc_calib_to, hz/2);
   5546 
   5547 		break; }
   5548 
   5549 	default:
   5550 		DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
   5551 		break;
   5552 	}
   5553 
   5554 	sc->sc_newstate(ic, nstate, arg);
   5555 }
   5556 
   5557 static int
   5558 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5559 {
   5560 	struct iwm_newstate_state *iwmns;
   5561 	struct ifnet *ifp = IC2IFP(ic);
   5562 	struct iwm_softc *sc = ifp->if_softc;
   5563 
   5564 	callout_stop(&sc->sc_calib_to);
   5565 
   5566 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5567 	if (!iwmns) {
   5568 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5569 		return ENOMEM;
   5570 	}
   5571 
   5572 	iwmns->ns_nstate = nstate;
   5573 	iwmns->ns_arg = arg;
   5574 	iwmns->ns_generation = sc->sc_generation;
   5575 
   5576 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5577 
   5578 	return 0;
   5579 }
   5580 
   5581 static void
   5582 iwm_endscan_cb(struct work *work __unused, void *arg)
   5583 {
   5584 	struct iwm_softc *sc = arg;
   5585 	struct ieee80211com *ic = &sc->sc_ic;
   5586 	int done;
   5587 
   5588 	DPRINTF(("scan ended\n"));
   5589 
   5590 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ) {
   5591 #ifndef IWM_NO_5GHZ
   5592 		int error;
   5593 		done = 0;
   5594 		if ((error = iwm_mvm_scan_request(sc,
   5595 		    IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
   5596 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5597 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5598 			done = 1;
   5599 		}
   5600 #else
   5601 		done = 1;
   5602 #endif
   5603 	} else {
   5604 		done = 1;
   5605 	}
   5606 
   5607 	if (done) {
   5608 		if (!sc->sc_scanband) {
   5609 			ieee80211_cancel_scan(ic);
   5610 		} else {
   5611 			ieee80211_end_scan(ic);
   5612 		}
   5613 		sc->sc_scanband = 0;
   5614 	}
   5615 }
   5616 
   5617 static int
   5618 iwm_init_hw(struct iwm_softc *sc)
   5619 {
   5620 	struct ieee80211com *ic = &sc->sc_ic;
   5621 	int error, i, qid;
   5622 
   5623 	if ((error = iwm_preinit(sc)) != 0)
   5624 		return error;
   5625 
   5626 	if ((error = iwm_start_hw(sc)) != 0)
   5627 		return error;
   5628 
   5629 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
   5630 		return error;
   5631 	}
   5632 
   5633 	/*
   5634 	 * should stop and start HW since that INIT
   5635 	 * image just loaded
   5636 	 */
   5637 	iwm_stop_device(sc);
   5638 	if ((error = iwm_start_hw(sc)) != 0) {
   5639 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   5640 		return error;
   5641 	}
   5642 
   5643 	/* omstart, this time with the regular firmware */
   5644 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   5645 	if (error) {
   5646 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   5647 		goto error;
   5648 	}
   5649 
   5650 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   5651 		goto error;
   5652 
   5653 	/* Send phy db control command and then phy db calibration*/
   5654 	if ((error = iwm_send_phy_db_data(sc)) != 0)
   5655 		goto error;
   5656 
   5657 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
   5658 		goto error;
   5659 
   5660 	/* Add auxiliary station for scanning */
   5661 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
   5662 		goto error;
   5663 
   5664 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   5665 		/*
   5666 		 * The channel used here isn't relevant as it's
   5667 		 * going to be overwritten in the other flows.
   5668 		 * For now use the first channel we have.
   5669 		 */
   5670 		if ((error = iwm_mvm_phy_ctxt_add(sc,
   5671 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
   5672 			goto error;
   5673 	}
   5674 
   5675 	error = iwm_mvm_power_update_device(sc);
   5676 	if (error)
   5677 		goto error;
   5678 
   5679 	/* Mark TX rings as active. */
   5680 	for (qid = 0; qid < 4; qid++) {
   5681 		iwm_enable_txq(sc, qid, qid);
   5682 	}
   5683 
   5684 	return 0;
   5685 
   5686  error:
   5687 	iwm_stop_device(sc);
   5688 	return error;
   5689 }
   5690 
   5691 /* Allow multicast from our BSSID. */
   5692 static int
   5693 iwm_allow_mcast(struct iwm_softc *sc)
   5694 {
   5695 	struct ieee80211com *ic = &sc->sc_ic;
   5696 	struct ieee80211_node *ni = ic->ic_bss;
   5697 	struct iwm_mcast_filter_cmd *cmd;
   5698 	size_t size;
   5699 	int error;
   5700 
   5701 	size = roundup(sizeof(*cmd), 4);
   5702 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
   5703 	if (cmd == NULL)
   5704 		return ENOMEM;
   5705 	cmd->filter_own = 1;
   5706 	cmd->port_id = 0;
   5707 	cmd->count = 0;
   5708 	cmd->pass_all = 1;
   5709 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
   5710 
   5711 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
   5712 	    IWM_CMD_SYNC, size, cmd);
   5713 	kmem_intr_free(cmd, size);
   5714 	return error;
   5715 }
   5716 
   5717 /*
   5718  * ifnet interfaces
   5719  */
   5720 
   5721 static int
   5722 iwm_init(struct ifnet *ifp)
   5723 {
   5724 	struct iwm_softc *sc = ifp->if_softc;
   5725 	int error;
   5726 
   5727 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
   5728 		return 0;
   5729 	}
   5730 	sc->sc_generation++;
   5731 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   5732 
   5733 	if ((error = iwm_init_hw(sc)) != 0) {
   5734 		iwm_stop(ifp, 1);
   5735 		return error;
   5736 	}
   5737 
   5738 	/*
   5739  	 * Ok, firmware loaded and we are jogging
   5740 	 */
   5741 
   5742 	ifp->if_flags &= ~IFF_OACTIVE;
   5743 	ifp->if_flags |= IFF_RUNNING;
   5744 
   5745 	ieee80211_begin_scan(&sc->sc_ic, 0);
   5746 	sc->sc_flags |= IWM_FLAG_HW_INITED;
   5747 
   5748 	return 0;
   5749 }
   5750 
   5751 /*
   5752  * Dequeue packets from sendq and call send.
   5753  * mostly from iwn
   5754  */
   5755 static void
   5756 iwm_start(struct ifnet *ifp)
   5757 {
   5758 	struct iwm_softc *sc = ifp->if_softc;
   5759 	struct ieee80211com *ic = &sc->sc_ic;
   5760 	struct ieee80211_node *ni;
   5761 	struct ether_header *eh;
   5762 	struct mbuf *m;
   5763 	int ac;
   5764 
   5765 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   5766 		return;
   5767 
   5768 	for (;;) {
   5769 		/* why isn't this done per-queue? */
   5770 		if (sc->qfullmsk != 0) {
   5771 			ifp->if_flags |= IFF_OACTIVE;
   5772 			break;
   5773 		}
   5774 
   5775 		/* need to send management frames even if we're not RUNning */
   5776 		IF_DEQUEUE(&ic->ic_mgtq, m);
   5777 		if (m) {
   5778 			ni = (void *)m->m_pkthdr.rcvif;
   5779 			ac = 0;
   5780 			goto sendit;
   5781 		}
   5782 		if (ic->ic_state != IEEE80211_S_RUN) {
   5783 			break;
   5784 		}
   5785 
   5786 		IFQ_DEQUEUE(&ifp->if_snd, m);
   5787 		if (!m)
   5788 			break;
   5789 		if (m->m_len < sizeof (*eh) &&
   5790 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
   5791 			ifp->if_oerrors++;
   5792 			continue;
   5793 		}
   5794 		if (ifp->if_bpf != NULL)
   5795 			bpf_mtap(ifp, m);
   5796 
   5797 		eh = mtod(m, struct ether_header *);
   5798 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   5799 		if (ni == NULL) {
   5800 			m_freem(m);
   5801 			ifp->if_oerrors++;
   5802 			continue;
   5803 		}
   5804 		/* classify mbuf so we can find which tx ring to use */
   5805 		if (ieee80211_classify(ic, m, ni) != 0) {
   5806 			m_freem(m);
   5807 			ieee80211_free_node(ni);
   5808 			ifp->if_oerrors++;
   5809 			continue;
   5810 		}
   5811 
   5812 		/* No QoS encapsulation for EAPOL frames. */
   5813 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   5814 		    M_WME_GETAC(m) : WME_AC_BE;
   5815 
   5816 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   5817 			ieee80211_free_node(ni);
   5818 			ifp->if_oerrors++;
   5819 			continue;
   5820 		}
   5821 
   5822  sendit:
   5823 		if (ic->ic_rawbpf != NULL)
   5824 			bpf_mtap3(ic->ic_rawbpf, m);
   5825 		if (iwm_tx(sc, m, ni, ac) != 0) {
   5826 			ieee80211_free_node(ni);
   5827 			ifp->if_oerrors++;
   5828 			continue;
   5829 		}
   5830 
   5831 		if (ifp->if_flags & IFF_UP) {
   5832 			sc->sc_tx_timer = 15;
   5833 			ifp->if_timer = 1;
   5834 		}
   5835 	}
   5836 
   5837 	return;
   5838 }
   5839 
   5840 static void
   5841 iwm_stop(struct ifnet *ifp, int disable)
   5842 {
   5843 	struct iwm_softc *sc = ifp->if_softc;
   5844 	struct ieee80211com *ic = &sc->sc_ic;
   5845 
   5846 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   5847 	sc->sc_flags |= IWM_FLAG_STOPPED;
   5848 	sc->sc_generation++;
   5849 	sc->sc_scanband = 0;
   5850 	sc->sc_auth_prot = 0;
   5851 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5852 
   5853 	if (ic->ic_state != IEEE80211_S_INIT)
   5854 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   5855 
   5856 	ifp->if_timer = sc->sc_tx_timer = 0;
   5857 	iwm_stop_device(sc);
   5858 }
   5859 
   5860 static void
   5861 iwm_watchdog(struct ifnet *ifp)
   5862 {
   5863 	struct iwm_softc *sc = ifp->if_softc;
   5864 
   5865 	ifp->if_timer = 0;
   5866 	if (sc->sc_tx_timer > 0) {
   5867 		if (--sc->sc_tx_timer == 0) {
   5868 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   5869 #ifdef IWM_DEBUG
   5870 			iwm_nic_error(sc);
   5871 #endif
   5872 			ifp->if_flags &= ~IFF_UP;
   5873 			iwm_stop(ifp, 1);
   5874 			ifp->if_oerrors++;
   5875 			return;
   5876 		}
   5877 		ifp->if_timer = 1;
   5878 	}
   5879 
   5880 	ieee80211_watchdog(&sc->sc_ic);
   5881 }
   5882 
   5883 static int
   5884 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   5885 {
   5886 	struct iwm_softc *sc = ifp->if_softc;
   5887 	struct ieee80211com *ic = &sc->sc_ic;
   5888 	const struct sockaddr *sa;
   5889 	int s, error = 0;
   5890 
   5891 	s = splnet();
   5892 
   5893 	switch (cmd) {
   5894 	case SIOCSIFADDR:
   5895 		ifp->if_flags |= IFF_UP;
   5896 		/* FALLTHROUGH */
   5897 	case SIOCSIFFLAGS:
   5898 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
   5899 			break;
   5900 		if (ifp->if_flags & IFF_UP) {
   5901 			if (!(ifp->if_flags & IFF_RUNNING)) {
   5902 				if ((error = iwm_init(ifp)) != 0)
   5903 					ifp->if_flags &= ~IFF_UP;
   5904 			}
   5905 		} else {
   5906 			if (ifp->if_flags & IFF_RUNNING)
   5907 				iwm_stop(ifp, 1);
   5908 		}
   5909 		break;
   5910 
   5911 	case SIOCADDMULTI:
   5912 	case SIOCDELMULTI:
   5913 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   5914 		error = (cmd == SIOCADDMULTI) ?
   5915 		    ether_addmulti(sa, &sc->sc_ec) :
   5916 		    ether_delmulti(sa, &sc->sc_ec);
   5917 
   5918 		if (error == ENETRESET)
   5919 			error = 0;
   5920 		break;
   5921 
   5922 	default:
   5923 		error = ieee80211_ioctl(ic, cmd, data);
   5924 	}
   5925 
   5926 	if (error == ENETRESET) {
   5927 		error = 0;
   5928 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5929 		    (IFF_UP | IFF_RUNNING)) {
   5930 			iwm_stop(ifp, 0);
   5931 			error = iwm_init(ifp);
   5932 		}
   5933 	}
   5934 
   5935 	splx(s);
   5936 	return error;
   5937 }
   5938 
   5939 /*
   5940  * The interrupt side of things
   5941  */
   5942 
   5943 /*
   5944  * error dumping routines are from iwlwifi/mvm/utils.c
   5945  */
   5946 
   5947 /*
   5948  * Note: This structure is read from the device with IO accesses,
   5949  * and the reading already does the endian conversion. As it is
   5950  * read with uint32_t-sized accesses, any members with a different size
   5951  * need to be ordered correctly though!
   5952  */
   5953 struct iwm_error_event_table {
   5954 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   5955 	uint32_t error_id;		/* type of error */
   5956 	uint32_t pc;			/* program counter */
   5957 	uint32_t blink1;		/* branch link */
   5958 	uint32_t blink2;		/* branch link */
   5959 	uint32_t ilink1;		/* interrupt link */
   5960 	uint32_t ilink2;		/* interrupt link */
   5961 	uint32_t data1;		/* error-specific data */
   5962 	uint32_t data2;		/* error-specific data */
   5963 	uint32_t data3;		/* error-specific data */
   5964 	uint32_t bcon_time;		/* beacon timer */
   5965 	uint32_t tsf_low;		/* network timestamp function timer */
   5966 	uint32_t tsf_hi;		/* network timestamp function timer */
   5967 	uint32_t gp1;		/* GP1 timer register */
   5968 	uint32_t gp2;		/* GP2 timer register */
   5969 	uint32_t gp3;		/* GP3 timer register */
   5970 	uint32_t ucode_ver;		/* uCode version */
   5971 	uint32_t hw_ver;		/* HW Silicon version */
   5972 	uint32_t brd_ver;		/* HW board version */
   5973 	uint32_t log_pc;		/* log program counter */
   5974 	uint32_t frame_ptr;		/* frame pointer */
   5975 	uint32_t stack_ptr;		/* stack pointer */
   5976 	uint32_t hcmd;		/* last host command header */
   5977 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   5978 				 * rxtx_flag */
   5979 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   5980 				 * host_flag */
   5981 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   5982 				 * enc_flag */
   5983 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   5984 				 * time_flag */
   5985 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   5986 				 * wico interrupt */
   5987 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
   5988 	uint32_t wait_event;		/* wait event() caller address */
   5989 	uint32_t l2p_control;	/* L2pControlField */
   5990 	uint32_t l2p_duration;	/* L2pDurationField */
   5991 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   5992 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   5993 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   5994 				 * (LMPM_PMG_SEL) */
   5995 	uint32_t u_timestamp;	/* indicate when the date and time of the
   5996 				 * compilation */
   5997 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   5998 } __packed;
   5999 
   6000 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   6001 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   6002 
   6003 #ifdef IWM_DEBUG
   6004 static const struct {
   6005 	const char *name;
   6006 	uint8_t num;
   6007 } advanced_lookup[] = {
   6008 	{ "NMI_INTERRUPT_WDG", 0x34 },
   6009 	{ "SYSASSERT", 0x35 },
   6010 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   6011 	{ "BAD_COMMAND", 0x38 },
   6012 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   6013 	{ "FATAL_ERROR", 0x3D },
   6014 	{ "NMI_TRM_HW_ERR", 0x46 },
   6015 	{ "NMI_INTERRUPT_TRM", 0x4C },
   6016 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   6017 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   6018 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   6019 	{ "NMI_INTERRUPT_HOST", 0x66 },
   6020 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   6021 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   6022 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   6023 	{ "ADVANCED_SYSASSERT", 0 },
   6024 };
   6025 
   6026 static const char *
   6027 iwm_desc_lookup(uint32_t num)
   6028 {
   6029 	int i;
   6030 
   6031 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   6032 		if (advanced_lookup[i].num == num)
   6033 			return advanced_lookup[i].name;
   6034 
   6035 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   6036 	return advanced_lookup[i].name;
   6037 }
   6038 
   6039 /*
   6040  * Support for dumping the error log seemed like a good idea ...
   6041  * but it's mostly hex junk and the only sensible thing is the
   6042  * hw/ucode revision (which we know anyway).  Since it's here,
   6043  * I'll just leave it in, just in case e.g. the Intel guys want to
   6044  * help us decipher some "ADVANCED_SYSASSERT" later.
   6045  */
   6046 static void
   6047 iwm_nic_error(struct iwm_softc *sc)
   6048 {
   6049 	struct iwm_error_event_table table;
   6050 	uint32_t base;
   6051 
   6052 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   6053 	base = sc->sc_uc.uc_error_event_table;
   6054 	if (base < 0x800000 || base >= 0x80C000) {
   6055 		aprint_error_dev(sc->sc_dev,
   6056 		    "Not valid error log pointer 0x%08x\n", base);
   6057 		return;
   6058 	}
   6059 
   6060 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
   6061 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6062 		return;
   6063 	}
   6064 
   6065 	if (!table.valid) {
   6066 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   6067 		return;
   6068 	}
   6069 
   6070 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
   6071 		aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
   6072 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6073 		    sc->sc_flags, table.valid);
   6074 	}
   6075 
   6076 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
   6077 		iwm_desc_lookup(table.error_id));
   6078 	aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
   6079 	aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
   6080 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
   6081 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
   6082 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
   6083 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
   6084 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
   6085 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
   6086 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
   6087 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
   6088 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
   6089 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
   6090 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
   6091 	aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
   6092 	aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
   6093 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
   6094 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
   6095 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
   6096 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
   6097 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
   6098 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
   6099 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
   6100 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
   6101 	aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
   6102 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
   6103 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
   6104 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
   6105 	    table.l2p_duration);
   6106 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
   6107 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6108 	    table.l2p_addr_match);
   6109 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
   6110 	    table.lmpm_pmg_sel);
   6111 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
   6112 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
   6113 	    table.flow_handler);
   6114 }
   6115 #endif
   6116 
   6117 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6118 do {									\
   6119 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6120 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6121 	_var_ = (void *)((_pkt_)+1);					\
   6122 } while (/*CONSTCOND*/0)
   6123 
   6124 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6125 do {									\
   6126 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6127 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6128 	_ptr_ = (void *)((_pkt_)+1);					\
   6129 } while (/*CONSTCOND*/0)
   6130 
   6131 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6132 
   6133 /*
   6134  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
   6135  * Basic structure from if_iwn
   6136  */
   6137 static void
   6138 iwm_notif_intr(struct iwm_softc *sc)
   6139 {
   6140 	uint16_t hw;
   6141 
   6142 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6143 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6144 
   6145 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6146 	while (sc->rxq.cur != hw) {
   6147 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6148 		struct iwm_rx_packet *pkt, tmppkt;
   6149 		struct iwm_cmd_response *cresp;
   6150 		int qid, idx;
   6151 
   6152 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6153 		    BUS_DMASYNC_POSTREAD);
   6154 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6155 
   6156 		qid = pkt->hdr.qid & ~0x80;
   6157 		idx = pkt->hdr.idx;
   6158 
   6159 		DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
   6160 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
   6161 		    pkt->hdr.code, sc->rxq.cur, hw));
   6162 
   6163 		/*
   6164 		 * randomly get these from the firmware, no idea why.
   6165 		 * they at least seem harmless, so just ignore them for now
   6166 		 */
   6167 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6168 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6169 			ADVANCE_RXQ(sc);
   6170 			continue;
   6171 		}
   6172 
   6173 		switch (pkt->hdr.code) {
   6174 		case IWM_REPLY_RX_PHY_CMD:
   6175 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
   6176 			break;
   6177 
   6178 		case IWM_REPLY_RX_MPDU_CMD:
   6179 			tmppkt = *pkt; // XXX m is freed by ieee80211_input()
   6180 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
   6181 			pkt = &tmppkt;
   6182 			break;
   6183 
   6184 		case IWM_TX_CMD:
   6185 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
   6186 			break;
   6187 
   6188 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6189 			iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
   6190 			break;
   6191 
   6192 		case IWM_MVM_ALIVE: {
   6193 			struct iwm_mvm_alive_resp *resp;
   6194 			SYNC_RESP_STRUCT(resp, pkt);
   6195 
   6196 			sc->sc_uc.uc_error_event_table
   6197 			    = le32toh(resp->error_event_table_ptr);
   6198 			sc->sc_uc.uc_log_event_table
   6199 			    = le32toh(resp->log_event_table_ptr);
   6200 			sc->sched_base = le32toh(resp->scd_base_ptr);
   6201 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
   6202 
   6203 			sc->sc_uc.uc_intr = 1;
   6204 			wakeup(&sc->sc_uc);
   6205 			break; }
   6206 
   6207 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6208 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6209 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6210 
   6211 			uint16_t size = le16toh(phy_db_notif->length);
   6212 			bus_dmamap_sync(sc->sc_dmat, data->map,
   6213 			    sizeof(*pkt) + sizeof(*phy_db_notif),
   6214 			    size, BUS_DMASYNC_POSTREAD);
   6215 			iwm_phy_db_set_section(sc, phy_db_notif, size);
   6216 
   6217 			break; }
   6218 
   6219 		case IWM_STATISTICS_NOTIFICATION: {
   6220 			struct iwm_notif_statistics *stats;
   6221 			SYNC_RESP_STRUCT(stats, pkt);
   6222 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6223 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6224 			break; }
   6225 
   6226 		case IWM_NVM_ACCESS_CMD:
   6227 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6228 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6229 				    sizeof(sc->sc_cmd_resp),
   6230 				    BUS_DMASYNC_POSTREAD);
   6231 				memcpy(sc->sc_cmd_resp,
   6232 				    pkt, sizeof(sc->sc_cmd_resp));
   6233 			}
   6234 			break;
   6235 
   6236 		case IWM_PHY_CONFIGURATION_CMD:
   6237 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6238 		case IWM_ADD_STA:
   6239 		case IWM_MAC_CONTEXT_CMD:
   6240 		case IWM_REPLY_SF_CFG_CMD:
   6241 		case IWM_POWER_TABLE_CMD:
   6242 		case IWM_PHY_CONTEXT_CMD:
   6243 		case IWM_BINDING_CONTEXT_CMD:
   6244 		case IWM_TIME_EVENT_CMD:
   6245 		case IWM_SCAN_REQUEST_CMD:
   6246 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6247 		case IWM_MAC_PM_POWER_TABLE:
   6248 		case IWM_TIME_QUOTA_CMD:
   6249 		case IWM_REMOVE_STA:
   6250 		case IWM_TXPATH_FLUSH:
   6251 		case IWM_LQ_CMD:
   6252 			SYNC_RESP_STRUCT(cresp, pkt);
   6253 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6254 				memcpy(sc->sc_cmd_resp,
   6255 				    pkt, sizeof(*pkt)+sizeof(*cresp));
   6256 			}
   6257 			break;
   6258 
   6259 		/* ignore */
   6260 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
   6261 			break;
   6262 
   6263 		case IWM_INIT_COMPLETE_NOTIF:
   6264 			sc->sc_init_complete = 1;
   6265 			wakeup(&sc->sc_init_complete);
   6266 			break;
   6267 
   6268 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
   6269 			struct iwm_scan_complete_notif *notif;
   6270 			SYNC_RESP_STRUCT(notif, pkt);
   6271 
   6272 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6273 			break; }
   6274 
   6275 		case IWM_REPLY_ERROR: {
   6276 			struct iwm_error_resp *resp;
   6277 			SYNC_RESP_STRUCT(resp, pkt);
   6278 
   6279 			aprint_error_dev(sc->sc_dev,
   6280 			    "firmware error 0x%x, cmd 0x%x\n",
   6281 			    le32toh(resp->error_type), resp->cmd_id);
   6282 			break; }
   6283 
   6284 		case IWM_TIME_EVENT_NOTIFICATION: {
   6285 			struct iwm_time_event_notif *notif;
   6286 			SYNC_RESP_STRUCT(notif, pkt);
   6287 
   6288 			if (notif->status) {
   6289 				if (le32toh(notif->action) &
   6290 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
   6291 					sc->sc_auth_prot = 2;
   6292 				else
   6293 					sc->sc_auth_prot = 0;
   6294 			} else {
   6295 				sc->sc_auth_prot = -1;
   6296 			}
   6297 			wakeup(&sc->sc_auth_prot);
   6298 			break; }
   6299 
   6300 		case IWM_MCAST_FILTER_CMD:
   6301 			break;
   6302 
   6303 		default:
   6304 			aprint_error_dev(sc->sc_dev,
   6305 			    "code %02x frame %d/%d %x UNHANDLED "
   6306 			    "(this should not happen)\n",
   6307 			    pkt->hdr.code, qid, idx, pkt->len_n_flags);
   6308 			break;
   6309 		}
   6310 
   6311 		/*
   6312 		 * Why test bit 0x80?  The Linux driver:
   6313 		 *
   6314 		 * There is one exception:  uCode sets bit 15 when it
   6315 		 * originates the response/notification, i.e. when the
   6316 		 * response/notification is not a direct response to a
   6317 		 * command sent by the driver.  For example, uCode issues
   6318 		 * IWM_REPLY_RX when it sends a received frame to the driver;
   6319 		 * it is not a direct response to any driver command.
   6320 		 *
   6321 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
   6322 		 * uses a slightly different format for pkt->hdr, and "qid"
   6323 		 * is actually the upper byte of a two-byte field.
   6324 		 */
   6325 		if (!(pkt->hdr.qid & (1 << 7))) {
   6326 			iwm_cmd_done(sc, pkt);
   6327 		}
   6328 
   6329 		ADVANCE_RXQ(sc);
   6330 	}
   6331 
   6332 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   6333 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   6334 
   6335 	/*
   6336 	 * Tell the firmware what we have processed.
   6337 	 * Seems like the hardware gets upset unless we align
   6338 	 * the write by 8??
   6339 	 */
   6340 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   6341 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   6342 }
   6343 
   6344 static int
   6345 iwm_intr(void *arg)
   6346 {
   6347 	struct iwm_softc *sc = arg;
   6348 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6349 	int handled = 0;
   6350 	int r1, r2, rv = 0;
   6351 	int isperiodic = 0;
   6352 
   6353 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   6354 
   6355 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   6356 		uint32_t *ict = sc->ict_dma.vaddr;
   6357 		int tmp;
   6358 
   6359 		tmp = htole32(ict[sc->ict_cur]);
   6360 		if (!tmp)
   6361 			goto out_ena;
   6362 
   6363 		/*
   6364 		 * ok, there was something.  keep plowing until we have all.
   6365 		 */
   6366 		r1 = r2 = 0;
   6367 		while (tmp) {
   6368 			r1 |= tmp;
   6369 			ict[sc->ict_cur] = 0;
   6370 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
   6371 			tmp = htole32(ict[sc->ict_cur]);
   6372 		}
   6373 
   6374 		/* this is where the fun begins.  don't ask */
   6375 		if (r1 == 0xffffffff)
   6376 			r1 = 0;
   6377 
   6378 		/* i am not expected to understand this */
   6379 		if (r1 & 0xc0000)
   6380 			r1 |= 0x8000;
   6381 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   6382 	} else {
   6383 		r1 = IWM_READ(sc, IWM_CSR_INT);
   6384 		/* "hardware gone" (where, fishing?) */
   6385 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   6386 			goto out;
   6387 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   6388 	}
   6389 	if (r1 == 0 && r2 == 0) {
   6390 		goto out_ena;
   6391 	}
   6392 
   6393 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   6394 
   6395 	/* ignored */
   6396 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
   6397 
   6398 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   6399 #ifdef IWM_DEBUG
   6400 		int i;
   6401 
   6402 		iwm_nic_error(sc);
   6403 
   6404 		/* Dump driver status (TX and RX rings) while we're here. */
   6405 		DPRINTF(("driver status:\n"));
   6406 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
   6407 			struct iwm_tx_ring *ring = &sc->txq[i];
   6408 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   6409 			    "queued=%-3d\n",
   6410 			    i, ring->qid, ring->cur, ring->queued));
   6411 		}
   6412 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   6413 		DPRINTF(("  802.11 state %d\n", sc->sc_ic.ic_state));
   6414 #endif
   6415 
   6416 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   6417 		ifp->if_flags &= ~IFF_UP;
   6418 		iwm_stop(ifp, 1);
   6419 		rv = 1;
   6420 		goto out;
   6421 
   6422 	}
   6423 
   6424 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   6425 		handled |= IWM_CSR_INT_BIT_HW_ERR;
   6426 		aprint_error_dev(sc->sc_dev,
   6427 		    "hardware error, stopping device\n");
   6428 		ifp->if_flags &= ~IFF_UP;
   6429 		iwm_stop(ifp, 1);
   6430 		rv = 1;
   6431 		goto out;
   6432 	}
   6433 
   6434 	/* firmware chunk loaded */
   6435 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   6436 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   6437 		handled |= IWM_CSR_INT_BIT_FH_TX;
   6438 
   6439 		sc->sc_fw_chunk_done = 1;
   6440 		wakeup(&sc->sc_fw);
   6441 	}
   6442 
   6443 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   6444 		handled |= IWM_CSR_INT_BIT_RF_KILL;
   6445 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   6446 			DPRINTF(("%s: rfkill switch, disabling interface\n",
   6447 			    DEVNAME(sc)));
   6448 			ifp->if_flags &= ~IFF_UP;
   6449 			iwm_stop(ifp, 1);
   6450 		}
   6451 	}
   6452 
   6453 	/*
   6454 	 * The Linux driver uses periodic interrupts to avoid races.
   6455 	 * We cargo-cult like it's going out of fashion.
   6456 	 */
   6457 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   6458 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
   6459 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   6460 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   6461 			IWM_WRITE_1(sc,
   6462 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   6463 		isperiodic = 1;
   6464 	}
   6465 
   6466 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
   6467 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
   6468 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   6469 
   6470 		iwm_notif_intr(sc);
   6471 
   6472 		/* enable periodic interrupt, see above */
   6473 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
   6474 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   6475 			    IWM_CSR_INT_PERIODIC_ENA);
   6476 	}
   6477 
   6478 	if (__predict_false(r1 & ~handled))
   6479 		DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
   6480 	rv = 1;
   6481 
   6482  out_ena:
   6483 	iwm_restore_interrupts(sc);
   6484  out:
   6485 	return rv;
   6486 }
   6487 
   6488 /*
   6489  * Autoconf glue-sniffing
   6490  */
   6491 
   6492 static const pci_product_id_t iwm_devices[] = {
   6493 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   6494 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   6495 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   6496 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   6497 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   6498 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   6499 };
   6500 
   6501 static int
   6502 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   6503 {
   6504 	struct pci_attach_args *pa = aux;
   6505 
   6506 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   6507 		return 0;
   6508 
   6509 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   6510 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   6511 			return 1;
   6512 
   6513 	return 0;
   6514 }
   6515 
   6516 static int
   6517 iwm_preinit(struct iwm_softc *sc)
   6518 {
   6519 	int error;
   6520 
   6521 	if ((error = iwm_prepare_card_hw(sc)) != 0) {
   6522 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6523 		return error;
   6524 	}
   6525 
   6526 	if (sc->sc_flags & IWM_FLAG_ATTACHED)
   6527 		return 0;
   6528 
   6529 	if ((error = iwm_start_hw(sc)) != 0) {
   6530 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6531 		return error;
   6532 	}
   6533 
   6534 	error = iwm_run_init_mvm_ucode(sc, 1);
   6535 	iwm_stop_device(sc);
   6536 	return error;
   6537 }
   6538 
   6539 static void
   6540 iwm_attach_hook(device_t dev)
   6541 {
   6542 	struct iwm_softc *sc = device_private(dev);
   6543 	struct ieee80211com *ic = &sc->sc_ic;
   6544 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   6545 
   6546 	KASSERT(!cold);
   6547 
   6548 	if (iwm_preinit(sc) != 0)
   6549 		return;
   6550 
   6551 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   6552 
   6553 	aprint_normal_dev(sc->sc_dev,
   6554 	    "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
   6555 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
   6556 	    IWM_UCODE_MAJOR(sc->sc_fwver),
   6557 	    IWM_UCODE_MINOR(sc->sc_fwver),
   6558 	    IWM_UCODE_API(sc->sc_fwver),
   6559 	    ether_sprintf(sc->sc_nvm.hw_addr));
   6560 
   6561 	ic->ic_ifp = ifp;
   6562 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   6563 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   6564 	ic->ic_state = IEEE80211_S_INIT;
   6565 
   6566 	/* Set device capabilities. */
   6567 	ic->ic_caps =
   6568 	    IEEE80211_C_WEP |		/* WEP */
   6569 	    IEEE80211_C_WPA |		/* 802.11i */
   6570 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   6571 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   6572 
   6573 #ifndef IWM_NO_5GHZ
   6574 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   6575 #endif
   6576 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   6577 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   6578 
   6579 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   6580 		sc->sc_phyctxt[i].id = i;
   6581 	}
   6582 
   6583 	sc->sc_amrr.amrr_min_success_threshold =  1;
   6584 	sc->sc_amrr.amrr_max_success_threshold = 15;
   6585 
   6586 	/* IBSS channel undefined for now. */
   6587 	ic->ic_ibss_chan = &ic->ic_channels[1];
   6588 
   6589 #if 0
   6590 	/* Max RSSI */
   6591 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   6592 #endif
   6593 
   6594 	ifp->if_softc = sc;
   6595 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   6596 	ifp->if_init = iwm_init;
   6597 	ifp->if_stop = iwm_stop;
   6598 	ifp->if_ioctl = iwm_ioctl;
   6599 	ifp->if_start = iwm_start;
   6600 	ifp->if_watchdog = iwm_watchdog;
   6601 	IFQ_SET_READY(&ifp->if_snd);
   6602 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   6603 
   6604 	if_initialize(ifp);
   6605 	ieee80211_ifattach(ic);
   6606 	if_register(ifp);
   6607 
   6608 	ic->ic_node_alloc = iwm_node_alloc;
   6609 
   6610 	/* Override 802.11 state transition machine. */
   6611 	sc->sc_newstate = ic->ic_newstate;
   6612 	ic->ic_newstate = iwm_newstate;
   6613 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   6614 	ieee80211_announce(ic);
   6615 
   6616 	iwm_radiotap_attach(sc);
   6617 	callout_init(&sc->sc_calib_to, 0);
   6618 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   6619 
   6620 	//task_set(&sc->init_task, iwm_init_task, sc);
   6621 }
   6622 
   6623 static void
   6624 iwm_attach(device_t parent, device_t self, void *aux)
   6625 {
   6626 	struct iwm_softc *sc = device_private(self);
   6627 	struct pci_attach_args *pa = aux;
   6628 	pci_intr_handle_t ih;
   6629 	pcireg_t reg, memtype;
   6630 	const char *intrstr;
   6631 	int error;
   6632 	int txq_i;
   6633 
   6634 	sc->sc_dev = self;
   6635 	sc->sc_pct = pa->pa_pc;
   6636 	sc->sc_pcitag = pa->pa_tag;
   6637 	sc->sc_dmat = pa->pa_dmat;
   6638 	sc->sc_pciid = pa->pa_id;
   6639 
   6640 	pci_aprint_devinfo(pa, NULL);
   6641 
   6642 	/*
   6643 	 * Get the offset of the PCI Express Capability Structure in PCI
   6644 	 * Configuration Space.
   6645 	 */
   6646 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   6647 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   6648 	if (error == 0) {
   6649 		aprint_error_dev(self,
   6650 		    "PCIe capability structure not found!\n");
   6651 		return;
   6652 	}
   6653 
   6654 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6655 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6656 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6657 
   6658 	/* Enable bus-mastering and hardware bug workaround. */
   6659 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   6660 	reg |= PCI_COMMAND_MASTER_ENABLE;
   6661 	/* if !MSI */
   6662 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
   6663 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
   6664 	}
   6665 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   6666 
   6667 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   6668 	error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   6669 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   6670 	if (error != 0) {
   6671 		aprint_error_dev(self, "can't map mem space\n");
   6672 		return;
   6673 	}
   6674 
   6675 	/* Install interrupt handler. */
   6676 	if (pci_intr_map(pa, &ih)) {
   6677 		aprint_error_dev(self, "can't map interrupt\n");
   6678 		return;
   6679 	}
   6680 
   6681 	char intrbuf[PCI_INTRSTR_LEN];
   6682 	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
   6683 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
   6684 	if (sc->sc_ih == NULL) {
   6685 		aprint_error_dev(self, "can't establish interrupt");
   6686 		if (intrstr != NULL)
   6687 			aprint_error(" at %s", intrstr);
   6688 		aprint_error("\n");
   6689 		return;
   6690 	}
   6691 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   6692 
   6693 	sc->sc_wantresp = -1;
   6694 
   6695 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   6696 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   6697 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   6698 		sc->sc_fwname = "iwlwifi-7260-9.ucode";
   6699 		sc->host_interrupt_operation_mode = 1;
   6700 		break;
   6701 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   6702 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   6703 		sc->sc_fwname = "iwlwifi-3160-9.ucode";
   6704 		sc->host_interrupt_operation_mode = 1;
   6705 		break;
   6706 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   6707 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   6708 		sc->sc_fwname = "iwlwifi-7265-9.ucode";
   6709 		sc->host_interrupt_operation_mode = 0;
   6710 		break;
   6711 	default:
   6712 		aprint_error_dev(self, "unknown product %#x",
   6713 		    PCI_PRODUCT(sc->sc_pciid));
   6714 		return;
   6715 	}
   6716 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   6717 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   6718 
   6719 	/*
   6720 	 * We now start fiddling with the hardware
   6721 	 */
   6722 
   6723 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   6724 	if (iwm_prepare_card_hw(sc) != 0) {
   6725 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6726 		return;
   6727 	}
   6728 
   6729 	/* Allocate DMA memory for firmware transfers. */
   6730 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
   6731 		aprint_error_dev(sc->sc_dev,
   6732 		    "could not allocate memory for firmware\n");
   6733 		return;
   6734 	}
   6735 
   6736 	/* Allocate "Keep Warm" page. */
   6737 	if ((error = iwm_alloc_kw(sc)) != 0) {
   6738 		aprint_error_dev(sc->sc_dev,
   6739 		    "could not allocate keep warm page\n");
   6740 		goto fail1;
   6741 	}
   6742 
   6743 	/* We use ICT interrupts */
   6744 	if ((error = iwm_alloc_ict(sc)) != 0) {
   6745 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   6746 		goto fail2;
   6747 	}
   6748 
   6749 	/* Allocate TX scheduler "rings". */
   6750 	if ((error = iwm_alloc_sched(sc)) != 0) {
   6751 		aprint_error_dev(sc->sc_dev,
   6752 		    "could not allocate TX scheduler rings\n");
   6753 		goto fail3;
   6754 	}
   6755 
   6756 	/* Allocate TX rings */
   6757 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   6758 		if ((error = iwm_alloc_tx_ring(sc,
   6759 		    &sc->txq[txq_i], txq_i)) != 0) {
   6760 			aprint_error_dev(sc->sc_dev,
   6761 			    "could not allocate TX ring %d\n", txq_i);
   6762 			goto fail4;
   6763 		}
   6764 	}
   6765 
   6766 	/* Allocate RX ring. */
   6767 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
   6768 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   6769 		goto fail4;
   6770 	}
   6771 
   6772 	workqueue_create(&sc->sc_eswq, "iwmes",
   6773 	    iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
   6774 	workqueue_create(&sc->sc_nswq, "iwmns",
   6775 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
   6776 
   6777 	/* Clear pending interrupts. */
   6778 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   6779 
   6780 	/*
   6781 	 * We can't do normal attach before the file system is mounted
   6782 	 * because we cannot read the MAC address without loading the
   6783 	 * firmware from disk.  So we postpone until mountroot is done.
   6784 	 * Notably, this will require a full driver unload/load cycle
   6785 	 * (or reboot) in case the firmware is not present when the
   6786 	 * hook runs.
   6787 	 */
   6788 	config_mountroot(self, iwm_attach_hook);
   6789 
   6790 	return;
   6791 
   6792 	/* Free allocated memory if something failed during attachment. */
   6793 fail4:	while (--txq_i >= 0)
   6794 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   6795 	iwm_free_sched(sc);
   6796 fail3:	if (sc->ict_dma.vaddr != NULL)
   6797 		iwm_free_ict(sc);
   6798 fail2:	iwm_free_kw(sc);
   6799 fail1:	iwm_free_fwmem(sc);
   6800 }
   6801 
   6802 /*
   6803  * Attach the interface to 802.11 radiotap.
   6804  */
   6805 void
   6806 iwm_radiotap_attach(struct iwm_softc *sc)
   6807 {
   6808 	struct ifnet *ifp = sc->sc_ic.ic_ifp;
   6809 
   6810 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   6811 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   6812 	    &sc->sc_drvbpf);
   6813 
   6814 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   6815 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   6816 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   6817 
   6818 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   6819 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   6820 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   6821 }
   6822 
   6823 #if 0
   6824 static void
   6825 iwm_init_task(void *arg1)
   6826 {
   6827 	struct iwm_softc *sc = arg1;
   6828 	struct ifnet *ifp = &sc->sc_ic.ic_if;
   6829 	int s;
   6830 
   6831 	s = splnet();
   6832 	while (sc->sc_flags & IWM_FLAG_BUSY)
   6833 		tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
   6834 	sc->sc_flags |= IWM_FLAG_BUSY;
   6835 
   6836 	iwm_stop(ifp, 0);
   6837 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   6838 		iwm_init(ifp);
   6839 
   6840 	sc->sc_flags &= ~IWM_FLAG_BUSY;
   6841 	wakeup(&sc->sc_flags);
   6842 	splx(s);
   6843 }
   6844 
   6845 static void
   6846 iwm_wakeup(struct iwm_softc *sc)
   6847 {
   6848 	pcireg_t reg;
   6849 
   6850 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6851 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6852 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6853 
   6854 	iwm_init_task(sc);
   6855 }
   6856 
   6857 static int
   6858 iwm_activate(device_t self, enum devact act)
   6859 {
   6860 	struct iwm_softc *sc = device_private(self);
   6861 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6862 
   6863 	switch (act) {
   6864 	case DVACT_DEACTIVATE:
   6865 		if (ifp->if_flags & IFF_RUNNING)
   6866 			iwm_stop(ifp, 0);
   6867 		return 0;
   6868 	default:
   6869 		return EOPNOTSUPP;
   6870 	}
   6871 }
   6872 #endif
   6873 
   6874 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   6875 	NULL, NULL);
   6876