Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.8
      1 /*	$NetBSD: if_iwm.c,v 1.8 2015/02/16 13:22:19 nonaka Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.18 2015/02/11 01:12:42 brad Exp	*/
      3 
      4 /*
      5  * Copyright (c) 2014 genua mbh <info (at) genua.de>
      6  * Copyright (c) 2014 Fixup Software Ltd.
      7  *
      8  * Permission to use, copy, modify, and distribute this software for any
      9  * purpose with or without fee is hereby granted, provided that the above
     10  * copyright notice and this permission notice appear in all copies.
     11  *
     12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19  */
     20 
     21 /*-
     22  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     23  * which were used as the reference documentation for this implementation.
     24  *
     25  * Driver version we are currently based off of is
     26  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
     27  *
     28  ***********************************************************************
     29  *
     30  * This file is provided under a dual BSD/GPLv2 license.  When using or
     31  * redistributing this file, you may do so under either license.
     32  *
     33  * GPL LICENSE SUMMARY
     34  *
     35  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * All rights reserved.
     63  *
     64  * Redistribution and use in source and binary forms, with or without
     65  * modification, are permitted provided that the following conditions
     66  * are met:
     67  *
     68  *  * Redistributions of source code must retain the above copyright
     69  *    notice, this list of conditions and the following disclaimer.
     70  *  * Redistributions in binary form must reproduce the above copyright
     71  *    notice, this list of conditions and the following disclaimer in
     72  *    the documentation and/or other materials provided with the
     73  *    distribution.
     74  *  * Neither the name Intel Corporation nor the names of its
     75  *    contributors may be used to endorse or promote products derived
     76  *    from this software without specific prior written permission.
     77  *
     78  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     79  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     80  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     81  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     82  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     83  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     84  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     85  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     86  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     87  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     88  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     89  */
     90 
     91 /*-
     92  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     93  *
     94  * Permission to use, copy, modify, and distribute this software for any
     95  * purpose with or without fee is hereby granted, provided that the above
     96  * copyright notice and this permission notice appear in all copies.
     97  *
     98  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     99  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    100  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    101  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    102  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    103  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    104  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    105  */
    106 
    107 #include <sys/cdefs.h>
    108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.8 2015/02/16 13:22:19 nonaka Exp $");
    109 
    110 #include <sys/param.h>
    111 #include <sys/conf.h>
    112 #include <sys/kernel.h>
    113 #include <sys/kmem.h>
    114 #include <sys/mbuf.h>
    115 #include <sys/mutex.h>
    116 #include <sys/proc.h>
    117 #include <sys/socket.h>
    118 #include <sys/sockio.h>
    119 #include <sys/systm.h>
    120 
    121 #include <sys/cpu.h>
    122 #include <sys/bus.h>
    123 #include <sys/workqueue.h>
    124 #include <machine/endian.h>
    125 #include <machine/intr.h>
    126 
    127 #include <dev/pci/pcireg.h>
    128 #include <dev/pci/pcivar.h>
    129 #include <dev/pci/pcidevs.h>
    130 #include <dev/firmload.h>
    131 
    132 #include <net/bpf.h>
    133 #include <net/if.h>
    134 #include <net/if_arp.h>
    135 #include <net/if_dl.h>
    136 #include <net/if_media.h>
    137 #include <net/if_types.h>
    138 #include <net/if_ether.h>
    139 
    140 #include <netinet/in.h>
    141 #include <netinet/in_systm.h>
    142 #include <netinet/ip.h>
    143 
    144 #include <net80211/ieee80211_var.h>
    145 #include <net80211/ieee80211_amrr.h>
    146 #include <net80211/ieee80211_radiotap.h>
    147 
    148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    150 
    151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    153 
    154 #ifdef IWM_DEBUG
    155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    157 int iwm_debug = 1;
    158 #else
    159 #define DPRINTF(x)	do { ; } while (0)
    160 #define DPRINTFN(n, x)	do { ; } while (0)
    161 #endif
    162 
    163 #include <dev/pci/if_iwmreg.h>
    164 #include <dev/pci/if_iwmvar.h>
    165 
    166 static const uint8_t iwm_nvm_channels[] = {
    167 	/* 2.4 GHz */
    168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    169 	/* 5 GHz */
    170 	36, 40, 44 , 48, 52, 56, 60, 64,
    171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    172 	149, 153, 157, 161, 165
    173 };
    174 #define IWM_NUM_2GHZ_CHANNELS	14
    175 
    176 /* It looks like 11a TX is broken, unfortunately. */
    177 #define IWM_NO_5GHZ		1
    178 
    179 static const struct iwm_rate {
    180 	uint8_t rate;
    181 	uint8_t plcp;
    182 } iwm_rates[] = {
    183 	{   2,	IWM_RATE_1M_PLCP  },
    184 	{   4,	IWM_RATE_2M_PLCP  },
    185 	{  11,	IWM_RATE_5M_PLCP  },
    186 	{  22,	IWM_RATE_11M_PLCP },
    187 	{  12,	IWM_RATE_6M_PLCP  },
    188 	{  18,	IWM_RATE_9M_PLCP  },
    189 	{  24,	IWM_RATE_12M_PLCP },
    190 	{  36,	IWM_RATE_18M_PLCP },
    191 	{  48,	IWM_RATE_24M_PLCP },
    192 	{  72,	IWM_RATE_36M_PLCP },
    193 	{  96,	IWM_RATE_48M_PLCP },
    194 	{ 108,	IWM_RATE_54M_PLCP },
    195 };
    196 #define IWM_RIDX_CCK	0
    197 #define IWM_RIDX_OFDM	4
    198 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    199 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    200 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    201 
    202 struct iwm_newstate_state {
    203 	struct work ns_wk;
    204 	enum ieee80211_state ns_nstate;
    205 	int ns_arg;
    206 	int ns_generation;
    207 };
    208 
    209 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    210 static int	iwm_firmware_store_section(struct iwm_softc *,
    211 		    enum iwm_ucode_type, uint8_t *, size_t);
    212 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    213 static int	iwm_read_firmware(struct iwm_softc *);
    214 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    215 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    216 #ifdef IWM_DEBUG
    217 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    218 #endif
    219 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    220 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    221 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    222 static int	iwm_nic_lock(struct iwm_softc *);
    223 static void	iwm_nic_unlock(struct iwm_softc *);
    224 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    225 		    uint32_t);
    226 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    227 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    228 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    229 		    bus_size_t, bus_size_t);
    230 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    231 static int	iwm_alloc_fwmem(struct iwm_softc *);
    232 static void	iwm_free_fwmem(struct iwm_softc *);
    233 static int	iwm_alloc_sched(struct iwm_softc *);
    234 static void	iwm_free_sched(struct iwm_softc *);
    235 static int	iwm_alloc_kw(struct iwm_softc *);
    236 static void	iwm_free_kw(struct iwm_softc *);
    237 static int	iwm_alloc_ict(struct iwm_softc *);
    238 static void	iwm_free_ict(struct iwm_softc *);
    239 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    240 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    241 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    242 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    243 		    int);
    244 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    245 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    246 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    247 static int	iwm_check_rfkill(struct iwm_softc *);
    248 static void	iwm_enable_interrupts(struct iwm_softc *);
    249 static void	iwm_restore_interrupts(struct iwm_softc *);
    250 static void	iwm_disable_interrupts(struct iwm_softc *);
    251 static void	iwm_ict_reset(struct iwm_softc *);
    252 static int	iwm_set_hw_ready(struct iwm_softc *);
    253 static int	iwm_prepare_card_hw(struct iwm_softc *);
    254 static void	iwm_apm_config(struct iwm_softc *);
    255 static int	iwm_apm_init(struct iwm_softc *);
    256 static void	iwm_apm_stop(struct iwm_softc *);
    257 static int	iwm_start_hw(struct iwm_softc *);
    258 static void	iwm_stop_device(struct iwm_softc *);
    259 static void	iwm_set_pwr(struct iwm_softc *);
    260 static void	iwm_mvm_nic_config(struct iwm_softc *);
    261 static int	iwm_nic_rx_init(struct iwm_softc *);
    262 static int	iwm_nic_tx_init(struct iwm_softc *);
    263 static int	iwm_nic_init(struct iwm_softc *);
    264 static void	iwm_enable_txq(struct iwm_softc *, int, int);
    265 static int	iwm_post_alive(struct iwm_softc *);
    266 static int	iwm_is_valid_channel(uint16_t);
    267 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    268 static uint16_t iwm_channel_id_to_papd(uint16_t);
    269 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    270 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    271 		    uint8_t **, uint16_t *, uint16_t);
    272 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    273 		    void *);
    274 static int	iwm_send_phy_db_data(struct iwm_softc *);
    275 static int	iwm_send_phy_db_data(struct iwm_softc *);
    276 static void	iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    277 		    struct iwm_time_event_cmd_v1 *);
    278 static int	iwm_mvm_send_time_event_cmd(struct iwm_softc *,
    279 		    const struct iwm_time_event_cmd_v2 *);
    280 static int	iwm_mvm_time_event_send_add(struct iwm_softc *,
    281 		    struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
    282 static void	iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
    283 		    uint32_t, uint32_t, uint32_t);
    284 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    285 		    uint16_t, uint8_t *, uint16_t *);
    286 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    287 		    uint16_t *);
    288 static void	iwm_init_channel_map(struct iwm_softc *,
    289 		    const uint16_t * const);
    290 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    291 		    const uint16_t *, const uint16_t *, uint8_t, uint8_t);
    292 static int	iwm_nvm_init(struct iwm_softc *);
    293 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    294 		    const uint8_t *, uint32_t);
    295 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    296 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    297 static int	iwm_fw_alive(struct iwm_softc *, uint32_t);
    298 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    299 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    300 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
    301 		    enum iwm_ucode_type);
    302 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    303 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    304 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    305 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
    306 		    struct iwm_rx_phy_info *);
    307 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
    308 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    309 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
    310 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    311 		    struct iwm_rx_data *);
    312 static void	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
    313 		    struct iwm_rx_packet *, struct iwm_node *);
    314 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    315 		    struct iwm_rx_data *);
    316 static int	iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    317 		    uint32_t);
    318 static int	iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
    319 		    int);
    320 static int	iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    321 static void	iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
    322 		    struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
    323 		    uint32_t, uint32_t);
    324 static void	iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
    325 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    326 		    uint8_t, uint8_t);
    327 static int	iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
    328 		    struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
    329 		    uint32_t);
    330 static int	iwm_mvm_phy_ctxt_add(struct iwm_softc *,
    331 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    332 		    uint8_t, uint8_t);
    333 static int	iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
    334 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    335 		    uint8_t, uint8_t);
    336 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    337 static int	iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
    338 		    uint16_t, const void *);
    339 static int	iwm_mvm_send_cmd_status(struct iwm_softc *,
    340 		    struct iwm_host_cmd *, uint32_t *);
    341 static int	iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
    342 		    uint16_t, const void *, uint32_t *);
    343 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    344 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
    345 #if 0
    346 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    347 		    uint16_t);
    348 #endif
    349 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
    350 		    struct iwm_node *, struct ieee80211_frame *,
    351 		    struct iwm_tx_cmd *);
    352 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    353 		    struct ieee80211_node *, int);
    354 static int	iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
    355 		    struct iwm_beacon_filter_cmd *);
    356 static void	iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
    357 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    358 static int	iwm_mvm_update_beacon_abort(struct iwm_softc *,
    359 		    struct iwm_node *, int);
    360 static void	iwm_mvm_power_log(struct iwm_softc *,
    361 		    struct iwm_mac_power_cmd *);
    362 static void	iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    363 		    struct iwm_mac_power_cmd *);
    364 static int	iwm_mvm_power_mac_update_mode(struct iwm_softc *,
    365 		    struct iwm_node *);
    366 static int	iwm_mvm_power_update_device(struct iwm_softc *);
    367 static int	iwm_mvm_enable_beacon_filter(struct iwm_softc *,
    368 		    struct iwm_node *);
    369 static int	iwm_mvm_disable_beacon_filter(struct iwm_softc *,
    370 		    struct iwm_node *);
    371 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
    372 		    struct iwm_mvm_add_sta_cmd_v5 *);
    373 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
    374 		    struct iwm_mvm_add_sta_cmd_v6 *, int *);
    375 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
    376 		    int);
    377 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
    378 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
    379 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
    380 		    struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
    381 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
    382 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
    383 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
    384 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
    385 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
    386 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
    387 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
    388 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
    389 static int	iwm_mvm_scan_fill_channels(struct iwm_softc *,
    390 		    struct iwm_scan_cmd *, int, int, int);
    391 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
    392 		    struct ieee80211_frame *, const uint8_t *, int,
    393 		    const uint8_t *, int, const uint8_t *, int, int);
    394 static int	iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
    395 		    int);
    396 static void	iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    397 		    int *);
    398 static void	iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
    399 		    struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
    400 static int	iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
    401 		    struct iwm_mac_ctx_cmd *);
    402 static void	iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
    403 		    struct iwm_node *, struct iwm_mac_data_sta *, int);
    404 static int	iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
    405 		    struct iwm_node *, uint32_t);
    406 static int	iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
    407 		    uint32_t);
    408 static int	iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
    409 static int	iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
    410 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
    411 static int	iwm_auth(struct iwm_softc *);
    412 static int	iwm_assoc(struct iwm_softc *);
    413 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
    414 static void	iwm_calib_timeout(void *);
    415 static void	iwm_setrates(struct iwm_node *);
    416 static int	iwm_media_change(struct ifnet *);
    417 static void	iwm_newstate_cb(struct work *, void *);
    418 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    419 static void	iwm_endscan_cb(struct work *, void *);
    420 static int	iwm_init_hw(struct iwm_softc *);
    421 static int	iwm_init(struct ifnet *);
    422 static void	iwm_start(struct ifnet *);
    423 static void	iwm_stop(struct ifnet *, int);
    424 static void	iwm_watchdog(struct ifnet *);
    425 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    426 #ifdef IWM_DEBUG
    427 static const char *iwm_desc_lookup(uint32_t);
    428 static void	iwm_nic_error(struct iwm_softc *);
    429 #endif
    430 static void	iwm_notif_intr(struct iwm_softc *);
    431 static int	iwm_intr(void *);
    432 static int	iwm_preinit(struct iwm_softc *);
    433 static void	iwm_attach_hook(device_t);
    434 static void	iwm_attach(device_t, device_t, void *);
    435 #if 0
    436 static void	iwm_init_task(void *);
    437 static int	iwm_activate(device_t, enum devact);
    438 static void	iwm_wakeup(struct iwm_softc *);
    439 #endif
    440 static void	iwm_radiotap_attach(struct iwm_softc *);
    441 
    442 static int
    443 iwm_firmload(struct iwm_softc *sc)
    444 {
    445 	struct iwm_fw_info *fw = &sc->sc_fw;
    446 	firmware_handle_t fwh;
    447 	int error;
    448 
    449 	/* Open firmware image. */
    450 	if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
    451 		aprint_error_dev(sc->sc_dev,
    452 		    "could not get firmware handle %s\n", sc->sc_fwname);
    453 		return error;
    454 	}
    455 
    456 	fw->fw_rawsize = firmware_get_size(fwh);
    457 	/*
    458 	 * Well, this is how the Linux driver checks it ....
    459 	 */
    460 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    461 		aprint_error_dev(sc->sc_dev,
    462 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    463 		error = EINVAL;
    464 		goto out;
    465 	}
    466 
    467 	/* some sanity */
    468 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    469 		aprint_error_dev(sc->sc_dev,
    470 		    "firmware size is ridiculous: %zd bytes\n",
    471 		fw->fw_rawsize);
    472 		error = EINVAL;
    473 		goto out;
    474 	}
    475 
    476 	/* Read the firmware. */
    477 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    478 	if (fw->fw_rawdata == NULL) {
    479 		aprint_error_dev(sc->sc_dev,
    480 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    481 		error = ENOMEM;
    482 		goto out;
    483 	}
    484 	error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    485 	if (error) {
    486 		aprint_error_dev(sc->sc_dev,
    487 		    "could not read firmware %s\n", sc->sc_fwname);
    488 		goto out;
    489 	}
    490 
    491  out:
    492 	/* caller will release memory, if necessary */
    493 
    494 	firmware_close(fwh);
    495 	return error;
    496 }
    497 
    498 /*
    499  * just maintaining status quo.
    500  */
    501 static void
    502 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
    503 {
    504 	struct ieee80211_frame *wh;
    505 	uint8_t subtype;
    506 	uint8_t *frm, *efrm;
    507 
    508 	wh = mtod(m, struct ieee80211_frame *);
    509 
    510 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    511 		return;
    512 
    513 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    514 
    515 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    516 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    517 		return;
    518 
    519 	frm = (uint8_t *)(wh + 1);
    520 	efrm = mtod(m, uint8_t *) + m->m_len;
    521 
    522 	frm += 12;      /* skip tstamp, bintval and capinfo fields */
    523 	while (frm < efrm) {
    524 		if (*frm == IEEE80211_ELEMID_DSPARMS) {
    525 #if IEEE80211_CHAN_MAX < 255
    526 			if (frm[2] <= IEEE80211_CHAN_MAX)
    527 #endif
    528 				ic->ic_curchan = &ic->ic_channels[frm[2]];
    529 		}
    530 		frm += frm[1] + 2;
    531 	}
    532 }
    533 
    534 /*
    535  * Firmware parser.
    536  */
    537 
    538 static int
    539 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    540 {
    541 	struct iwm_fw_cscheme_list *l = (void *)data;
    542 
    543 	if (dlen < sizeof(*l) ||
    544 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    545 		return EINVAL;
    546 
    547 	/* we don't actually store anything for now, always use s/w crypto */
    548 
    549 	return 0;
    550 }
    551 
    552 static int
    553 iwm_firmware_store_section(struct iwm_softc *sc,
    554 	enum iwm_ucode_type type, uint8_t *data, size_t dlen)
    555 {
    556 	struct iwm_fw_sects *fws;
    557 	struct iwm_fw_onesect *fwone;
    558 
    559 	if (type >= IWM_UCODE_TYPE_MAX)
    560 		return EINVAL;
    561 	if (dlen < sizeof(uint32_t))
    562 		return EINVAL;
    563 
    564 	fws = &sc->sc_fw.fw_sects[type];
    565 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    566 		return EINVAL;
    567 
    568 	fwone = &fws->fw_sect[fws->fw_count];
    569 
    570 	/* first 32bit are device load offset */
    571 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    572 
    573 	/* rest is data */
    574 	fwone->fws_data = data + sizeof(uint32_t);
    575 	fwone->fws_len = dlen - sizeof(uint32_t);
    576 
    577 	/* for freeing the buffer during driver unload */
    578 	fwone->fws_alloc = data;
    579 	fwone->fws_allocsize = dlen;
    580 
    581 	fws->fw_count++;
    582 	fws->fw_totlen += fwone->fws_len;
    583 
    584 	return 0;
    585 }
    586 
    587 /* iwlwifi: iwl-drv.c */
    588 struct iwm_tlv_calib_data {
    589 	uint32_t ucode_type;
    590 	struct iwm_tlv_calib_ctrl calib;
    591 } __packed;
    592 
    593 static int
    594 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    595 {
    596 	const struct iwm_tlv_calib_data *def_calib = data;
    597 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    598 
    599 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    600 		DPRINTF(("%s: Wrong ucode_type %u for default "
    601 		    "calibration.\n", DEVNAME(sc), ucode_type));
    602 		return EINVAL;
    603 	}
    604 
    605 	sc->sc_default_calib[ucode_type].flow_trigger =
    606 	    def_calib->calib.flow_trigger;
    607 	sc->sc_default_calib[ucode_type].event_trigger =
    608 	    def_calib->calib.event_trigger;
    609 
    610 	return 0;
    611 }
    612 
    613 static int
    614 iwm_read_firmware(struct iwm_softc *sc)
    615 {
    616 	struct iwm_fw_info *fw = &sc->sc_fw;
    617 	struct iwm_tlv_ucode_header *uhdr;
    618 	struct iwm_ucode_tlv tlv;
    619 	enum iwm_ucode_tlv_type tlv_type;
    620 	uint8_t *data;
    621 	int error, status;
    622 	size_t len;
    623 
    624 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    625 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    626 	} else {
    627 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    628 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    629 	}
    630 	status = fw->fw_status;
    631 
    632 	if (status == IWM_FW_STATUS_DONE)
    633 		return 0;
    634 
    635 	/*
    636 	 * Load firmware into driver memory.
    637 	 * fw_rawdata and fw_rawsize will be set.
    638 	 */
    639 	error = iwm_firmload(sc);
    640 	if (error != 0) {
    641 		aprint_error_dev(sc->sc_dev,
    642 		    "could not read firmware %s (error %d)\n",
    643 		    sc->sc_fwname, error);
    644 		goto out;
    645 	}
    646 
    647 	/*
    648 	 * Parse firmware contents
    649 	 */
    650 
    651 	uhdr = (void *)fw->fw_rawdata;
    652 	if (*(uint32_t *)fw->fw_rawdata != 0
    653 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    654 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    655 		    sc->sc_fwname);
    656 		error = EINVAL;
    657 		goto out;
    658 	}
    659 
    660 	sc->sc_fwver = le32toh(uhdr->ver);
    661 	data = uhdr->data;
    662 	len = fw->fw_rawsize - sizeof(*uhdr);
    663 
    664 	while (len >= sizeof(tlv)) {
    665 		size_t tlv_len;
    666 		void *tlv_data;
    667 
    668 		memcpy(&tlv, data, sizeof(tlv));
    669 		tlv_len = le32toh(tlv.length);
    670 		tlv_type = le32toh(tlv.type);
    671 
    672 		len -= sizeof(tlv);
    673 		data += sizeof(tlv);
    674 		tlv_data = data;
    675 
    676 		if (len < tlv_len) {
    677 			aprint_error_dev(sc->sc_dev,
    678 			    "firmware too short: %zu bytes\n", len);
    679 			error = EINVAL;
    680 			goto parse_out;
    681 		}
    682 
    683 		switch ((int)tlv_type) {
    684 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    685 			if (tlv_len < sizeof(uint32_t)) {
    686 				error = EINVAL;
    687 				goto parse_out;
    688 			}
    689 			sc->sc_capa_max_probe_len
    690 			    = le32toh(*(uint32_t *)tlv_data);
    691 			/* limit it to something sensible */
    692 			if (sc->sc_capa_max_probe_len > (1<<16)) {
    693 				DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
    694 				    "ridiculous\n", DEVNAME(sc)));
    695 				error = EINVAL;
    696 				goto parse_out;
    697 			}
    698 			break;
    699 		case IWM_UCODE_TLV_PAN:
    700 			if (tlv_len) {
    701 				error = EINVAL;
    702 				goto parse_out;
    703 			}
    704 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    705 			break;
    706 		case IWM_UCODE_TLV_FLAGS:
    707 			if (tlv_len < sizeof(uint32_t)) {
    708 				error = EINVAL;
    709 				goto parse_out;
    710 			}
    711 			/*
    712 			 * Apparently there can be many flags, but Linux driver
    713 			 * parses only the first one, and so do we.
    714 			 *
    715 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    716 			 * Intentional or a bug?  Observations from
    717 			 * current firmware file:
    718 			 *  1) TLV_PAN is parsed first
    719 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    720 			 * ==> this resets TLV_PAN to itself... hnnnk
    721 			 */
    722 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    723 			break;
    724 		case IWM_UCODE_TLV_CSCHEME:
    725 			if ((error = iwm_store_cscheme(sc,
    726 			    tlv_data, tlv_len)) != 0)
    727 				goto parse_out;
    728 			break;
    729 		case IWM_UCODE_TLV_NUM_OF_CPU:
    730 			if (tlv_len != sizeof(uint32_t)) {
    731 				error = EINVAL;
    732 				goto parse_out;
    733 			}
    734 			if (le32toh(*(uint32_t*)tlv_data) != 1) {
    735 				DPRINTF(("%s: driver supports "
    736 				    "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
    737 				error = EINVAL;
    738 				goto parse_out;
    739 			}
    740 			break;
    741 		case IWM_UCODE_TLV_SEC_RT:
    742 			if ((error = iwm_firmware_store_section(sc,
    743 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
    744 				goto parse_out;
    745 			break;
    746 		case IWM_UCODE_TLV_SEC_INIT:
    747 			if ((error = iwm_firmware_store_section(sc,
    748 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
    749 				goto parse_out;
    750 			break;
    751 		case IWM_UCODE_TLV_SEC_WOWLAN:
    752 			if ((error = iwm_firmware_store_section(sc,
    753 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
    754 				goto parse_out;
    755 			break;
    756 		case IWM_UCODE_TLV_DEF_CALIB:
    757 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    758 				error = EINVAL;
    759 				goto parse_out;
    760 			}
    761 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
    762 				goto parse_out;
    763 			break;
    764 		case IWM_UCODE_TLV_PHY_SKU:
    765 			if (tlv_len != sizeof(uint32_t)) {
    766 				error = EINVAL;
    767 				goto parse_out;
    768 			}
    769 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    770 			break;
    771 
    772 		case IWM_UCODE_TLV_API_CHANGES_SET:
    773 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
    774 			/* ignore, not used by current driver */
    775 			break;
    776 
    777 		default:
    778 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    779 			    DEVNAME(sc), tlv_type));
    780 			error = EINVAL;
    781 			goto parse_out;
    782 		}
    783 
    784 		len -= roundup(tlv_len, 4);
    785 		data += roundup(tlv_len, 4);
    786 	}
    787 
    788 	KASSERT(error == 0);
    789 
    790  parse_out:
    791 	if (error) {
    792 		aprint_error_dev(sc->sc_dev,
    793 		    "firmware parse error, section type %d\n", tlv_type);
    794 	}
    795 
    796 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    797 		aprint_error_dev(sc->sc_dev,
    798 		    "device uses unsupported power ops\n");
    799 		error = ENOTSUP;
    800 	}
    801 
    802  out:
    803 	if (error)
    804 		fw->fw_status = IWM_FW_STATUS_NONE;
    805 	else
    806 		fw->fw_status = IWM_FW_STATUS_DONE;
    807 	wakeup(&sc->sc_fw);
    808 
    809 	if (error) {
    810 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    811 		fw->fw_rawdata = NULL;
    812 	}
    813 	return error;
    814 }
    815 
    816 /*
    817  * basic device access
    818  */
    819 
    820 static uint32_t
    821 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    822 {
    823 	IWM_WRITE(sc,
    824 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    825 	IWM_BARRIER_READ_WRITE(sc);
    826 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    827 }
    828 
    829 static void
    830 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    831 {
    832 	IWM_WRITE(sc,
    833 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    834 	IWM_BARRIER_WRITE(sc);
    835 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    836 }
    837 
    838 #ifdef IWM_DEBUG
    839 /* iwlwifi: pcie/trans.c */
    840 static int
    841 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    842 {
    843 	int offs, ret = 0;
    844 	uint32_t *vals = buf;
    845 
    846 	if (iwm_nic_lock(sc)) {
    847 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    848 		for (offs = 0; offs < dwords; offs++)
    849 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    850 		iwm_nic_unlock(sc);
    851 	} else {
    852 		ret = EBUSY;
    853 	}
    854 	return ret;
    855 }
    856 #endif
    857 
    858 /* iwlwifi: pcie/trans.c */
    859 static int
    860 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    861 {
    862 	int offs;
    863 	const uint32_t *vals = buf;
    864 
    865 	if (iwm_nic_lock(sc)) {
    866 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    867 		/* WADDR auto-increments */
    868 		for (offs = 0; offs < dwords; offs++) {
    869 			uint32_t val = vals ? vals[offs] : 0;
    870 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    871 		}
    872 		iwm_nic_unlock(sc);
    873 	} else {
    874 		DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
    875 		return EBUSY;
    876 	}
    877 	return 0;
    878 }
    879 
    880 static int
    881 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    882 {
    883 	return iwm_write_mem(sc, addr, &val, 1);
    884 }
    885 
    886 static int
    887 iwm_poll_bit(struct iwm_softc *sc, int reg,
    888 	uint32_t bits, uint32_t mask, int timo)
    889 {
    890 	for (;;) {
    891 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
    892 			return 1;
    893 		}
    894 		if (timo < 10) {
    895 			return 0;
    896 		}
    897 		timo -= 10;
    898 		DELAY(10);
    899 	}
    900 }
    901 
    902 static int
    903 iwm_nic_lock(struct iwm_softc *sc)
    904 {
    905 	int rv = 0;
    906 
    907 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
    908 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    909 
    910 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
    911 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
    912 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
    913 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
    914 	    	rv = 1;
    915 	} else {
    916 		/* jolt */
    917 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
    918 	}
    919 
    920 	return rv;
    921 }
    922 
    923 static void
    924 iwm_nic_unlock(struct iwm_softc *sc)
    925 {
    926 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
    927 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    928 }
    929 
    930 static void
    931 iwm_set_bits_mask_prph(struct iwm_softc *sc,
    932 	uint32_t reg, uint32_t bits, uint32_t mask)
    933 {
    934 	uint32_t val;
    935 
    936 	/* XXX: no error path? */
    937 	if (iwm_nic_lock(sc)) {
    938 		val = iwm_read_prph(sc, reg) & mask;
    939 		val |= bits;
    940 		iwm_write_prph(sc, reg, val);
    941 		iwm_nic_unlock(sc);
    942 	}
    943 }
    944 
    945 static void
    946 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    947 {
    948 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
    949 }
    950 
    951 static void
    952 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    953 {
    954 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
    955 }
    956 
    957 /*
    958  * DMA resource routines
    959  */
    960 
    961 static int
    962 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
    963     bus_size_t size, bus_size_t alignment)
    964 {
    965 	int nsegs, error;
    966 	void *va;
    967 
    968 	dma->tag = tag;
    969 	dma->size = size;
    970 
    971 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
    972 	    &dma->map);
    973 	if (error != 0)
    974 		goto fail;
    975 
    976 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
    977 	    BUS_DMA_NOWAIT);
    978 	if (error != 0)
    979 		goto fail;
    980 
    981 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
    982 	    BUS_DMA_NOWAIT);
    983 	if (error != 0)
    984 		goto fail;
    985 	dma->vaddr = va;
    986 
    987 	error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
    988 	    BUS_DMA_NOWAIT);
    989 	if (error != 0)
    990 		goto fail;
    991 
    992 	memset(dma->vaddr, 0, size);
    993 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
    994 	dma->paddr = dma->map->dm_segs[0].ds_addr;
    995 
    996 	return 0;
    997 
    998 fail:	iwm_dma_contig_free(dma);
    999 	return error;
   1000 }
   1001 
   1002 static void
   1003 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1004 {
   1005 	if (dma->map != NULL) {
   1006 		if (dma->vaddr != NULL) {
   1007 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1008 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1009 			bus_dmamap_unload(dma->tag, dma->map);
   1010 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1011 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1012 			dma->vaddr = NULL;
   1013 		}
   1014 		bus_dmamap_destroy(dma->tag, dma->map);
   1015 		dma->map = NULL;
   1016 	}
   1017 }
   1018 
   1019 /* fwmem is used to load firmware onto the card */
   1020 static int
   1021 iwm_alloc_fwmem(struct iwm_softc *sc)
   1022 {
   1023 	/* Must be aligned on a 16-byte boundary. */
   1024 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
   1025 	    sc->sc_fwdmasegsz, 16);
   1026 }
   1027 
   1028 static void
   1029 iwm_free_fwmem(struct iwm_softc *sc)
   1030 {
   1031 	iwm_dma_contig_free(&sc->fw_dma);
   1032 }
   1033 
   1034 /* tx scheduler rings.  not used? */
   1035 static int
   1036 iwm_alloc_sched(struct iwm_softc *sc)
   1037 {
   1038 	int rv;
   1039 
   1040 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   1041 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   1042 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   1043 	return rv;
   1044 }
   1045 
   1046 static void
   1047 iwm_free_sched(struct iwm_softc *sc)
   1048 {
   1049 	iwm_dma_contig_free(&sc->sched_dma);
   1050 }
   1051 
   1052 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
   1053 static int
   1054 iwm_alloc_kw(struct iwm_softc *sc)
   1055 {
   1056 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   1057 }
   1058 
   1059 static void
   1060 iwm_free_kw(struct iwm_softc *sc)
   1061 {
   1062 	iwm_dma_contig_free(&sc->kw_dma);
   1063 }
   1064 
   1065 /* interrupt cause table */
   1066 static int
   1067 iwm_alloc_ict(struct iwm_softc *sc)
   1068 {
   1069 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
   1070 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
   1071 }
   1072 
   1073 static void
   1074 iwm_free_ict(struct iwm_softc *sc)
   1075 {
   1076 	iwm_dma_contig_free(&sc->ict_dma);
   1077 }
   1078 
   1079 static int
   1080 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1081 {
   1082 	bus_size_t size;
   1083 	int i, error;
   1084 
   1085 	ring->cur = 0;
   1086 
   1087 	/* Allocate RX descriptors (256-byte aligned). */
   1088 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1089 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1090 	if (error != 0) {
   1091 		aprint_error_dev(sc->sc_dev,
   1092 		    "could not allocate RX ring DMA memory\n");
   1093 		goto fail;
   1094 	}
   1095 	ring->desc = ring->desc_dma.vaddr;
   1096 
   1097 	/* Allocate RX status area (16-byte aligned). */
   1098 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1099 	    sizeof(*ring->stat), 16);
   1100 	if (error != 0) {
   1101 		aprint_error_dev(sc->sc_dev,
   1102 		    "could not allocate RX status DMA memory\n");
   1103 		goto fail;
   1104 	}
   1105 	ring->stat = ring->stat_dma.vaddr;
   1106 
   1107 	/*
   1108 	 * Allocate and map RX buffers.
   1109 	 */
   1110 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1111 		struct iwm_rx_data *data = &ring->data[i];
   1112 
   1113 		memset(data, 0, sizeof(*data));
   1114 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1115 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1116 		    &data->map);
   1117 		if (error != 0) {
   1118 			aprint_error_dev(sc->sc_dev,
   1119 			    "could not create RX buf DMA map\n");
   1120 			goto fail;
   1121 		}
   1122 
   1123 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
   1124 			goto fail;
   1125 		}
   1126 	}
   1127 	return 0;
   1128 
   1129 fail:	iwm_free_rx_ring(sc, ring);
   1130 	return error;
   1131 }
   1132 
   1133 static void
   1134 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1135 {
   1136 	int ntries;
   1137 
   1138 	if (iwm_nic_lock(sc)) {
   1139 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1140 		for (ntries = 0; ntries < 1000; ntries++) {
   1141 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1142 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1143 				break;
   1144 			DELAY(10);
   1145 		}
   1146 		iwm_nic_unlock(sc);
   1147 	}
   1148 	ring->cur = 0;
   1149 }
   1150 
   1151 static void
   1152 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1153 {
   1154 	int i;
   1155 
   1156 	iwm_dma_contig_free(&ring->desc_dma);
   1157 	iwm_dma_contig_free(&ring->stat_dma);
   1158 
   1159 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1160 		struct iwm_rx_data *data = &ring->data[i];
   1161 
   1162 		if (data->m != NULL) {
   1163 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1164 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1165 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1166 			m_freem(data->m);
   1167 		}
   1168 		if (data->map != NULL)
   1169 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1170 	}
   1171 }
   1172 
   1173 static int
   1174 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1175 {
   1176 	bus_addr_t paddr;
   1177 	bus_size_t size;
   1178 	int i, error;
   1179 
   1180 	ring->qid = qid;
   1181 	ring->queued = 0;
   1182 	ring->cur = 0;
   1183 
   1184 	/* Allocate TX descriptors (256-byte aligned). */
   1185 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1186 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1187 	if (error != 0) {
   1188 		aprint_error_dev(sc->sc_dev,
   1189 		    "could not allocate TX ring DMA memory\n");
   1190 		goto fail;
   1191 	}
   1192 	ring->desc = ring->desc_dma.vaddr;
   1193 
   1194 	/*
   1195 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1196 	 * to allocate commands space for other rings.
   1197 	 */
   1198 	if (qid > IWM_MVM_CMD_QUEUE)
   1199 		return 0;
   1200 
   1201 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1202 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1203 	if (error != 0) {
   1204 		aprint_error_dev(sc->sc_dev,
   1205 		    "could not allocate TX cmd DMA memory\n");
   1206 		goto fail;
   1207 	}
   1208 	ring->cmd = ring->cmd_dma.vaddr;
   1209 
   1210 	paddr = ring->cmd_dma.paddr;
   1211 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1212 		struct iwm_tx_data *data = &ring->data[i];
   1213 
   1214 		data->cmd_paddr = paddr;
   1215 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1216 		    + offsetof(struct iwm_tx_cmd, scratch);
   1217 		paddr += sizeof(struct iwm_device_cmd);
   1218 
   1219 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
   1220 		    IWM_NUM_OF_TBS, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
   1221 		    &data->map);
   1222 		if (error != 0) {
   1223 			aprint_error_dev(sc->sc_dev,
   1224 			    "could not create TX buf DMA map\n");
   1225 			goto fail;
   1226 		}
   1227 	}
   1228 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1229 	return 0;
   1230 
   1231 fail:	iwm_free_tx_ring(sc, ring);
   1232 	return error;
   1233 }
   1234 
   1235 static void
   1236 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1237 {
   1238 	int i;
   1239 
   1240 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1241 		struct iwm_tx_data *data = &ring->data[i];
   1242 
   1243 		if (data->m != NULL) {
   1244 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1245 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1246 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1247 			m_freem(data->m);
   1248 			data->m = NULL;
   1249 		}
   1250 	}
   1251 	/* Clear TX descriptors. */
   1252 	memset(ring->desc, 0, ring->desc_dma.size);
   1253 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1254 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1255 	sc->qfullmsk &= ~(1 << ring->qid);
   1256 	ring->queued = 0;
   1257 	ring->cur = 0;
   1258 }
   1259 
   1260 static void
   1261 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1262 {
   1263 	int i;
   1264 
   1265 	iwm_dma_contig_free(&ring->desc_dma);
   1266 	iwm_dma_contig_free(&ring->cmd_dma);
   1267 
   1268 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1269 		struct iwm_tx_data *data = &ring->data[i];
   1270 
   1271 		if (data->m != NULL) {
   1272 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1273 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1274 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1275 			m_freem(data->m);
   1276 		}
   1277 		if (data->map != NULL)
   1278 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1279 	}
   1280 }
   1281 
   1282 /*
   1283  * High-level hardware frobbing routines
   1284  */
   1285 
   1286 static void
   1287 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1288 {
   1289 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1290 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1291 }
   1292 
   1293 static int
   1294 iwm_check_rfkill(struct iwm_softc *sc)
   1295 {
   1296 	uint32_t v;
   1297 	int s;
   1298 	int rv;
   1299 
   1300 	s = splnet();
   1301 
   1302 	/*
   1303 	 * "documentation" is not really helpful here:
   1304 	 *  27:	HW_RF_KILL_SW
   1305 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1306 	 *
   1307 	 * But apparently when it's off, it's on ...
   1308 	 */
   1309 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1310 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1311 	if (rv) {
   1312 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1313 	} else {
   1314 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1315 	}
   1316 
   1317 	splx(s);
   1318 	return rv;
   1319 }
   1320 
   1321 static void
   1322 iwm_enable_interrupts(struct iwm_softc *sc)
   1323 {
   1324 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1325 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1326 }
   1327 
   1328 static void
   1329 iwm_restore_interrupts(struct iwm_softc *sc)
   1330 {
   1331 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1332 }
   1333 
   1334 static void
   1335 iwm_disable_interrupts(struct iwm_softc *sc)
   1336 {
   1337 	int s = splnet();
   1338 
   1339 	/* disable interrupts */
   1340 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1341 
   1342 	/* acknowledge all interrupts */
   1343 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1344 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1345 
   1346 	splx(s);
   1347 }
   1348 
   1349 static void
   1350 iwm_ict_reset(struct iwm_softc *sc)
   1351 {
   1352 	iwm_disable_interrupts(sc);
   1353 
   1354 	/* Reset ICT table. */
   1355 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1356 	sc->ict_cur = 0;
   1357 
   1358 	/* Set physical address of ICT table (4KB aligned). */
   1359 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1360 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1361 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1362 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1363 
   1364 	/* Switch to ICT interrupt mode in driver. */
   1365 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1366 
   1367 	/* Re-enable interrupts. */
   1368 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1369 	iwm_enable_interrupts(sc);
   1370 }
   1371 
   1372 #define IWM_HW_READY_TIMEOUT 50
   1373 static int
   1374 iwm_set_hw_ready(struct iwm_softc *sc)
   1375 {
   1376 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1377 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1378 
   1379 	return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1380 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1381 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1382 	    IWM_HW_READY_TIMEOUT);
   1383 }
   1384 #undef IWM_HW_READY_TIMEOUT
   1385 
   1386 static int
   1387 iwm_prepare_card_hw(struct iwm_softc *sc)
   1388 {
   1389 	int rv = 0;
   1390 	int t = 0;
   1391 
   1392 	if (!iwm_set_hw_ready(sc))
   1393 		goto out;
   1394 
   1395 	/* If HW is not ready, prepare the conditions to check again */
   1396 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1397 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1398 
   1399 	do {
   1400 		if (iwm_set_hw_ready(sc))
   1401 			goto out;
   1402 		DELAY(200);
   1403 		t += 200;
   1404 	} while (t < 150000);
   1405 
   1406 	rv = ETIMEDOUT;
   1407 
   1408  out:
   1409 	return rv;
   1410 }
   1411 
   1412 static void
   1413 iwm_apm_config(struct iwm_softc *sc)
   1414 {
   1415 	pcireg_t reg;
   1416 
   1417 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1418 	    sc->sc_cap_off + PCIE_LCSR);
   1419 	if (reg & PCIE_LCSR_ASPM_L1) {
   1420 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1421 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1422 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1423 	} else {
   1424 		/* ... and "Enabling" here */
   1425 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1426 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1427 	}
   1428 }
   1429 
   1430 /*
   1431  * Start up NIC's basic functionality after it has been reset
   1432  * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
   1433  * NOTE:  This does not load uCode nor start the embedded processor
   1434  */
   1435 static int
   1436 iwm_apm_init(struct iwm_softc *sc)
   1437 {
   1438 	int error = 0;
   1439 
   1440 	DPRINTF(("iwm apm start\n"));
   1441 
   1442 	/* Disable L0S exit timer (platform NMI Work/Around) */
   1443 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1444 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1445 
   1446 	/*
   1447 	 * Disable L0s without affecting L1;
   1448 	 *  don't wait for ICH L0s (ICH bug W/A)
   1449 	 */
   1450 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1451 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1452 
   1453 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1454 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1455 
   1456 	/*
   1457 	 * Enable HAP INTA (interrupt from management bus) to
   1458 	 * wake device's PCI Express link L1a -> L0s
   1459 	 */
   1460 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1461 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1462 
   1463 	iwm_apm_config(sc);
   1464 
   1465 #if 0 /* not for 7k */
   1466 	/* Configure analog phase-lock-loop before activating to D0A */
   1467 	if (trans->cfg->base_params->pll_cfg_val)
   1468 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1469 		    trans->cfg->base_params->pll_cfg_val);
   1470 #endif
   1471 
   1472 	/*
   1473 	 * Set "initialization complete" bit to move adapter from
   1474 	 * D0U* --> D0A* (powered-up active) state.
   1475 	 */
   1476 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1477 
   1478 	/*
   1479 	 * Wait for clock stabilization; once stabilized, access to
   1480 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1481 	 * and accesses to uCode SRAM.
   1482 	 */
   1483 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1484 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1485 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1486 		aprint_error_dev(sc->sc_dev,
   1487 		    "timeout waiting for clock stabilization\n");
   1488 		goto out;
   1489 	}
   1490 
   1491 	/*
   1492 	 * This is a bit of an abuse - This is needed for 7260 / 3160
   1493 	 * only check host_interrupt_operation_mode even if this is
   1494 	 * not related to host_interrupt_operation_mode.
   1495 	 *
   1496 	 * Enable the oscillator to count wake up time for L1 exit. This
   1497 	 * consumes slightly more power (100uA) - but allows to be sure
   1498 	 * that we wake up from L1 on time.
   1499 	 *
   1500 	 * This looks weird: read twice the same register, discard the
   1501 	 * value, set a bit, and yet again, read that same register
   1502 	 * just to discard the value. But that's the way the hardware
   1503 	 * seems to like it.
   1504 	 */
   1505 	iwm_read_prph(sc, IWM_OSC_CLK);
   1506 	iwm_read_prph(sc, IWM_OSC_CLK);
   1507 	iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1508 	iwm_read_prph(sc, IWM_OSC_CLK);
   1509 	iwm_read_prph(sc, IWM_OSC_CLK);
   1510 
   1511 	/*
   1512 	 * Enable DMA clock and wait for it to stabilize.
   1513 	 *
   1514 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1515 	 * do not disable clocks.  This preserves any hardware bits already
   1516 	 * set by default in "CLK_CTRL_REG" after reset.
   1517 	 */
   1518 	iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1519 	//kpause("iwmapm", 0, mstohz(20), NULL);
   1520 	DELAY(20);
   1521 
   1522 	/* Disable L1-Active */
   1523 	iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1524 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1525 
   1526 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1527 	iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1528 	    IWM_APMG_RTC_INT_STT_RFKILL);
   1529 
   1530  out:
   1531 	if (error)
   1532 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
   1533 	return error;
   1534 }
   1535 
   1536 /* iwlwifi/pcie/trans.c */
   1537 static void
   1538 iwm_apm_stop(struct iwm_softc *sc)
   1539 {
   1540 	/* stop device's busmaster DMA activity */
   1541 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1542 
   1543 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1544 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1545 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1546 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1547 	DPRINTF(("iwm apm stop\n"));
   1548 }
   1549 
   1550 /* iwlwifi pcie/trans.c */
   1551 static int
   1552 iwm_start_hw(struct iwm_softc *sc)
   1553 {
   1554 	int error;
   1555 
   1556 	if ((error = iwm_prepare_card_hw(sc)) != 0)
   1557 		return error;
   1558 
   1559 	/* Reset the entire device */
   1560 	IWM_WRITE(sc, IWM_CSR_RESET,
   1561 	    IWM_CSR_RESET_REG_FLAG_SW_RESET |
   1562 	    IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1563 	DELAY(10);
   1564 
   1565 	if ((error = iwm_apm_init(sc)) != 0)
   1566 		return error;
   1567 
   1568 	iwm_enable_rfkill_int(sc);
   1569 	iwm_check_rfkill(sc);
   1570 
   1571 	return 0;
   1572 }
   1573 
   1574 /* iwlwifi pcie/trans.c */
   1575 
   1576 static void
   1577 iwm_stop_device(struct iwm_softc *sc)
   1578 {
   1579 	int chnl, ntries;
   1580 	int qid;
   1581 
   1582 	/* tell the device to stop sending interrupts */
   1583 	iwm_disable_interrupts(sc);
   1584 
   1585 	/* device going down, Stop using ICT table */
   1586 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1587 
   1588 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
   1589 
   1590 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1591 
   1592 	/* Stop all DMA channels. */
   1593 	if (iwm_nic_lock(sc)) {
   1594 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1595 			IWM_WRITE(sc,
   1596 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1597 			for (ntries = 0; ntries < 200; ntries++) {
   1598 				uint32_t r;
   1599 
   1600 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1601 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1602 				    chnl))
   1603 					break;
   1604 				DELAY(20);
   1605 			}
   1606 		}
   1607 		iwm_nic_unlock(sc);
   1608 	}
   1609 
   1610 	/* Stop RX ring. */
   1611 	iwm_reset_rx_ring(sc, &sc->rxq);
   1612 
   1613 	/* Reset all TX rings. */
   1614 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1615 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1616 
   1617 	/*
   1618 	 * Power-down device's busmaster DMA clocks
   1619 	 */
   1620 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1621 	DELAY(5);
   1622 
   1623 	/* Make sure (redundant) we've released our request to stay awake */
   1624 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1625 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1626 
   1627 	/* Stop the device, and put it in low power state */
   1628 	iwm_apm_stop(sc);
   1629 
   1630 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
   1631 	 * Clean again the interrupt here
   1632 	 */
   1633 	iwm_disable_interrupts(sc);
   1634 	/* stop and reset the on-board processor */
   1635 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1636 
   1637 	/*
   1638 	 * Even if we stop the HW, we still want the RF kill
   1639 	 * interrupt
   1640 	 */
   1641 	iwm_enable_rfkill_int(sc);
   1642 	iwm_check_rfkill(sc);
   1643 }
   1644 
   1645 /* iwlwifi pcie/trans.c (always main power) */
   1646 static void
   1647 iwm_set_pwr(struct iwm_softc *sc)
   1648 {
   1649 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1650 	    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1651 }
   1652 
   1653 /* iwlwifi: mvm/ops.c */
   1654 static void
   1655 iwm_mvm_nic_config(struct iwm_softc *sc)
   1656 {
   1657 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1658 	uint32_t reg_val = 0;
   1659 
   1660 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1661 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1662 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1663 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1664 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1665 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1666 
   1667 	/* SKU control */
   1668 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1669 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1670 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1671 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1672 
   1673 	/* radio configuration */
   1674 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1675 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1676 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1677 
   1678 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1679 
   1680 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1681 	    radio_cfg_step, radio_cfg_dash));
   1682 
   1683 	/*
   1684 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1685 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1686 	 * to lose ownership and not being able to obtain it back.
   1687 	 */
   1688 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1689 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1690 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1691 }
   1692 
   1693 static int
   1694 iwm_nic_rx_init(struct iwm_softc *sc)
   1695 {
   1696 	if (!iwm_nic_lock(sc))
   1697 		return EBUSY;
   1698 
   1699 	/*
   1700 	 * Initialize RX ring.  This is from the iwn driver.
   1701 	 */
   1702 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1703 
   1704 	/* stop DMA */
   1705 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1706 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1707 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1708 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1709 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1710 
   1711 	/* Set physical address of RX ring (256-byte aligned). */
   1712 	IWM_WRITE(sc,
   1713 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1714 
   1715 	/* Set physical address of RX status (16-byte aligned). */
   1716 	IWM_WRITE(sc,
   1717 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1718 
   1719 	/* Enable RX. */
   1720 	/*
   1721 	 * Note: Linux driver also sets this:
   1722 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1723 	 *
   1724 	 * It causes weird behavior.  YMMV.
   1725 	 */
   1726 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1727 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1728 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1729 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1730 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1731 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1732 
   1733 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1734 	IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1735 
   1736 	/*
   1737 	 * Thus sayeth el jefe (iwlwifi) via a comment:
   1738 	 *
   1739 	 * This value should initially be 0 (before preparing any
   1740  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
   1741 	 */
   1742 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1743 
   1744 	iwm_nic_unlock(sc);
   1745 
   1746 	return 0;
   1747 }
   1748 
   1749 static int
   1750 iwm_nic_tx_init(struct iwm_softc *sc)
   1751 {
   1752 	int qid;
   1753 
   1754 	if (!iwm_nic_lock(sc))
   1755 		return EBUSY;
   1756 
   1757 	/* Deactivate TX scheduler. */
   1758 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1759 
   1760 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1761 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1762 
   1763 	/* Initialize TX rings. */
   1764 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1765 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1766 
   1767 		/* Set physical address of TX ring (256-byte aligned). */
   1768 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1769 		    txq->desc_dma.paddr >> 8);
   1770 		DPRINTF(("loading ring %d descriptors (%p) at %lx\n",
   1771 		    qid, txq->desc, txq->desc_dma.paddr >> 8));
   1772 	}
   1773 	iwm_nic_unlock(sc);
   1774 
   1775 	return 0;
   1776 }
   1777 
   1778 static int
   1779 iwm_nic_init(struct iwm_softc *sc)
   1780 {
   1781 	int error;
   1782 
   1783 	iwm_apm_init(sc);
   1784 	iwm_set_pwr(sc);
   1785 
   1786 	iwm_mvm_nic_config(sc);
   1787 
   1788 	if ((error = iwm_nic_rx_init(sc)) != 0)
   1789 		return error;
   1790 
   1791 	/*
   1792 	 * Ditto for TX, from iwn
   1793 	 */
   1794 	if ((error = iwm_nic_tx_init(sc)) != 0)
   1795 		return error;
   1796 
   1797 	DPRINTF(("shadow registers enabled\n"));
   1798 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1799 
   1800 	return 0;
   1801 }
   1802 
   1803 #if 0
   1804 enum iwm_mvm_tx_fifo {
   1805 	IWM_MVM_TX_FIFO_BK = 0,
   1806 	IWM_MVM_TX_FIFO_BE,
   1807 	IWM_MVM_TX_FIFO_VI,
   1808 	IWM_MVM_TX_FIFO_VO,
   1809 	IWM_MVM_TX_FIFO_MCAST = 5,
   1810 };
   1811 
   1812 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
   1813 	IWM_MVM_TX_FIFO_VO,
   1814 	IWM_MVM_TX_FIFO_VI,
   1815 	IWM_MVM_TX_FIFO_BE,
   1816 	IWM_MVM_TX_FIFO_BK,
   1817 };
   1818 #endif
   1819 
   1820 static void
   1821 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
   1822 {
   1823 	if (!iwm_nic_lock(sc)) {
   1824 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1825 		return; /* XXX return EBUSY */
   1826 	}
   1827 
   1828 	/* unactivate before configuration */
   1829 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1830 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1831 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1832 
   1833 	if (qid != IWM_MVM_CMD_QUEUE) {
   1834 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
   1835 	}
   1836 
   1837 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1838 
   1839 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1840 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1841 
   1842 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1843 	/* Set scheduler window size and frame limit. */
   1844 	iwm_write_mem32(sc,
   1845 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1846 	    sizeof(uint32_t),
   1847 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1848 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1849 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1850 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1851 
   1852 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1853 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1854 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1855 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1856 	    IWM_SCD_QUEUE_STTS_REG_MSK);
   1857 
   1858 	iwm_nic_unlock(sc);
   1859 
   1860 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1861 }
   1862 
   1863 static int
   1864 iwm_post_alive(struct iwm_softc *sc)
   1865 {
   1866 	int nwords;
   1867 	int error, chnl;
   1868 
   1869 	if (!iwm_nic_lock(sc))
   1870 		return EBUSY;
   1871 
   1872 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
   1873 		DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
   1874 		error = EINVAL;
   1875 		goto out;
   1876 	}
   1877 
   1878 	iwm_ict_reset(sc);
   1879 
   1880 	/* Clear TX scheduler state in SRAM. */
   1881 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1882 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1883 	    / sizeof(uint32_t);
   1884 	error = iwm_write_mem(sc,
   1885 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1886 	    NULL, nwords);
   1887 	if (error)
   1888 		goto out;
   1889 
   1890 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1891 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1892 
   1893 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1894 
   1895 	/* enable command channel */
   1896 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
   1897 
   1898 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1899 
   1900 	/* Enable DMA channels. */
   1901 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1902 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1903 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1904 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1905 	}
   1906 
   1907 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1908 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1909 
   1910 	/* Enable L1-Active */
   1911 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1912 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1913 
   1914  out:
   1915  	iwm_nic_unlock(sc);
   1916 	return error;
   1917 }
   1918 
   1919 /*
   1920  * PHY db
   1921  * iwlwifi/iwl-phy-db.c
   1922  */
   1923 
   1924 /*
   1925  * BEGIN iwl-phy-db.c
   1926  */
   1927 
   1928 enum iwm_phy_db_section_type {
   1929 	IWM_PHY_DB_CFG = 1,
   1930 	IWM_PHY_DB_CALIB_NCH,
   1931 	IWM_PHY_DB_UNUSED,
   1932 	IWM_PHY_DB_CALIB_CHG_PAPD,
   1933 	IWM_PHY_DB_CALIB_CHG_TXP,
   1934 	IWM_PHY_DB_MAX
   1935 };
   1936 
   1937 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
   1938 
   1939 /*
   1940  * phy db - configure operational ucode
   1941  */
   1942 struct iwm_phy_db_cmd {
   1943 	uint16_t type;
   1944 	uint16_t length;
   1945 	uint8_t data[];
   1946 } __packed;
   1947 
   1948 /* for parsing of tx power channel group data that comes from the firmware*/
   1949 struct iwm_phy_db_chg_txp {
   1950 	uint32_t space;
   1951 	uint16_t max_channel_idx;
   1952 } __packed;
   1953 
   1954 /*
   1955  * phy db - Receive phy db chunk after calibrations
   1956  */
   1957 struct iwm_calib_res_notif_phy_db {
   1958 	uint16_t type;
   1959 	uint16_t length;
   1960 	uint8_t data[];
   1961 } __packed;
   1962 
   1963 /*
   1964  * get phy db section: returns a pointer to a phy db section specified by
   1965  * type and channel group id.
   1966  */
   1967 static struct iwm_phy_db_entry *
   1968 iwm_phy_db_get_section(struct iwm_softc *sc,
   1969 	enum iwm_phy_db_section_type type, uint16_t chg_id)
   1970 {
   1971 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   1972 
   1973 	if (type >= IWM_PHY_DB_MAX)
   1974 		return NULL;
   1975 
   1976 	switch (type) {
   1977 	case IWM_PHY_DB_CFG:
   1978 		return &phy_db->cfg;
   1979 	case IWM_PHY_DB_CALIB_NCH:
   1980 		return &phy_db->calib_nch;
   1981 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   1982 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   1983 			return NULL;
   1984 		return &phy_db->calib_ch_group_papd[chg_id];
   1985 	case IWM_PHY_DB_CALIB_CHG_TXP:
   1986 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   1987 			return NULL;
   1988 		return &phy_db->calib_ch_group_txp[chg_id];
   1989 	default:
   1990 		return NULL;
   1991 	}
   1992 	return NULL;
   1993 }
   1994 
   1995 static int
   1996 iwm_phy_db_set_section(struct iwm_softc *sc,
   1997     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   1998 {
   1999 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   2000 	struct iwm_phy_db_entry *entry;
   2001 	uint16_t chg_id = 0;
   2002 
   2003 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2004 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2005 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2006 
   2007 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2008 	if (!entry)
   2009 		return EINVAL;
   2010 
   2011 	if (entry->data)
   2012 		kmem_intr_free(entry->data, entry->size);
   2013 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2014 	if (!entry->data) {
   2015 		entry->size = 0;
   2016 		return ENOMEM;
   2017 	}
   2018 	memcpy(entry->data, phy_db_notif->data, size);
   2019 	entry->size = size;
   2020 
   2021 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2022 	    __func__, __LINE__, type, size, entry->data));
   2023 
   2024 	return 0;
   2025 }
   2026 
   2027 static int
   2028 iwm_is_valid_channel(uint16_t ch_id)
   2029 {
   2030 	if (ch_id <= 14 ||
   2031 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2032 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2033 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2034 		return 1;
   2035 	return 0;
   2036 }
   2037 
   2038 static uint8_t
   2039 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2040 {
   2041 	if (!iwm_is_valid_channel(ch_id))
   2042 		return 0xff;
   2043 
   2044 	if (ch_id <= 14)
   2045 		return ch_id - 1;
   2046 	if (ch_id <= 64)
   2047 		return (ch_id + 20) / 4;
   2048 	if (ch_id <= 140)
   2049 		return (ch_id - 12) / 4;
   2050 	return (ch_id - 13) / 4;
   2051 }
   2052 
   2053 
   2054 static uint16_t
   2055 iwm_channel_id_to_papd(uint16_t ch_id)
   2056 {
   2057 	if (!iwm_is_valid_channel(ch_id))
   2058 		return 0xff;
   2059 
   2060 	if (1 <= ch_id && ch_id <= 14)
   2061 		return 0;
   2062 	if (36 <= ch_id && ch_id <= 64)
   2063 		return 1;
   2064 	if (100 <= ch_id && ch_id <= 140)
   2065 		return 2;
   2066 	return 3;
   2067 }
   2068 
   2069 static uint16_t
   2070 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2071 {
   2072 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2073 	struct iwm_phy_db_chg_txp *txp_chg;
   2074 	int i;
   2075 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2076 
   2077 	if (ch_index == 0xff)
   2078 		return 0xff;
   2079 
   2080 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2081 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2082 		if (!txp_chg)
   2083 			return 0xff;
   2084 		/*
   2085 		 * Looking for the first channel group that its max channel is
   2086 		 * higher then wanted channel.
   2087 		 */
   2088 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2089 			return i;
   2090 	}
   2091 	return 0xff;
   2092 }
   2093 
   2094 static int
   2095 iwm_phy_db_get_section_data(struct iwm_softc *sc,
   2096 	uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
   2097 {
   2098 	struct iwm_phy_db_entry *entry;
   2099 	uint16_t ch_group_id = 0;
   2100 
   2101 	/* find wanted channel group */
   2102 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2103 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2104 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2105 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2106 
   2107 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2108 	if (!entry)
   2109 		return EINVAL;
   2110 
   2111 	*data = entry->data;
   2112 	*size = entry->size;
   2113 
   2114 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2115 		       __func__, __LINE__, type, *size));
   2116 
   2117 	return 0;
   2118 }
   2119 
   2120 static int
   2121 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
   2122 	uint16_t length, void *data)
   2123 {
   2124 	struct iwm_phy_db_cmd phy_db_cmd;
   2125 	struct iwm_host_cmd cmd = {
   2126 		.id = IWM_PHY_DB_CMD,
   2127 		.flags = IWM_CMD_SYNC,
   2128 	};
   2129 
   2130 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2131 	    type, length));
   2132 
   2133 	/* Set phy db cmd variables */
   2134 	phy_db_cmd.type = le16toh(type);
   2135 	phy_db_cmd.length = le16toh(length);
   2136 
   2137 	/* Set hcmd variables */
   2138 	cmd.data[0] = &phy_db_cmd;
   2139 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2140 	cmd.data[1] = data;
   2141 	cmd.len[1] = length;
   2142 	cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
   2143 
   2144 	return iwm_send_cmd(sc, &cmd);
   2145 }
   2146 
   2147 static int
   2148 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2149 	enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2150 {
   2151 	uint16_t i;
   2152 	int err;
   2153 	struct iwm_phy_db_entry *entry;
   2154 
   2155 	/* Send all the channel-specific groups to operational fw */
   2156 	for (i = 0; i < max_ch_groups; i++) {
   2157 		entry = iwm_phy_db_get_section(sc, type, i);
   2158 		if (!entry)
   2159 			return EINVAL;
   2160 
   2161 		if (!entry->size)
   2162 			continue;
   2163 
   2164 		/* Send the requested PHY DB section */
   2165 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2166 		if (err) {
   2167 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2168 			    "err %d\n", DEVNAME(sc), type, i, err));
   2169 			return err;
   2170 		}
   2171 
   2172 		DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
   2173 	}
   2174 
   2175 	return 0;
   2176 }
   2177 
   2178 static int
   2179 iwm_send_phy_db_data(struct iwm_softc *sc)
   2180 {
   2181 	uint8_t *data = NULL;
   2182 	uint16_t size = 0;
   2183 	int err;
   2184 
   2185 	DPRINTF(("Sending phy db data and configuration to runtime image\n"));
   2186 
   2187 	/* Send PHY DB CFG section */
   2188 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2189 	if (err) {
   2190 		DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
   2191 		    DEVNAME(sc), err));
   2192 		return err;
   2193 	}
   2194 
   2195 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2196 	if (err) {
   2197 		DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
   2198 		    DEVNAME(sc), err));
   2199 		return err;
   2200 	}
   2201 
   2202 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2203 	    &data, &size, 0);
   2204 	if (err) {
   2205 		DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
   2206 		    "%d\n", DEVNAME(sc), err));
   2207 		return err;
   2208 	}
   2209 
   2210 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2211 	if (err) {
   2212 		DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
   2213 		    "sect, %d\n", DEVNAME(sc), err));
   2214 		return err;
   2215 	}
   2216 
   2217 	/* Send all the TXP channel specific data */
   2218 	err = iwm_phy_db_send_all_channel_groups(sc,
   2219 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2220 	if (err) {
   2221 		DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
   2222 		    DEVNAME(sc), err));
   2223 		return err;
   2224 	}
   2225 
   2226 	/* Send all the TXP channel specific data */
   2227 	err = iwm_phy_db_send_all_channel_groups(sc,
   2228 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2229 	if (err) {
   2230 		DPRINTF(("%s: Cannot send channel specific TX power groups, "
   2231 		    "%d\n", DEVNAME(sc), err));
   2232 		return err;
   2233 	}
   2234 
   2235 	DPRINTF(("Finished sending phy db non channel data\n"));
   2236 	return 0;
   2237 }
   2238 
   2239 /*
   2240  * END iwl-phy-db.c
   2241  */
   2242 
   2243 /*
   2244  * BEGIN iwlwifi/mvm/time-event.c
   2245  */
   2246 
   2247 /*
   2248  * For the high priority TE use a time event type that has similar priority to
   2249  * the FW's action scan priority.
   2250  */
   2251 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2252 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2253 
   2254 /* used to convert from time event API v2 to v1 */
   2255 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2256 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2257 static inline uint16_t
   2258 iwm_te_v2_get_notify(uint16_t policy)
   2259 {
   2260 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2261 }
   2262 
   2263 static inline uint16_t
   2264 iwm_te_v2_get_dep_policy(uint16_t policy)
   2265 {
   2266 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2267 		IWM_TE_V2_PLACEMENT_POS;
   2268 }
   2269 
   2270 static inline uint16_t
   2271 iwm_te_v2_get_absence(uint16_t policy)
   2272 {
   2273 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2274 }
   2275 
   2276 static void
   2277 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2278 	struct iwm_time_event_cmd_v1 *cmd_v1)
   2279 {
   2280 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2281 	cmd_v1->action = cmd_v2->action;
   2282 	cmd_v1->id = cmd_v2->id;
   2283 	cmd_v1->apply_time = cmd_v2->apply_time;
   2284 	cmd_v1->max_delay = cmd_v2->max_delay;
   2285 	cmd_v1->depends_on = cmd_v2->depends_on;
   2286 	cmd_v1->interval = cmd_v2->interval;
   2287 	cmd_v1->duration = cmd_v2->duration;
   2288 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2289 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2290 	else
   2291 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2292 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2293 	cmd_v1->interval_reciprocal = 0; /* unused */
   2294 
   2295 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2296 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2297 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2298 }
   2299 
   2300 static int
   2301 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
   2302 	const struct iwm_time_event_cmd_v2 *cmd)
   2303 {
   2304 	struct iwm_time_event_cmd_v1 cmd_v1;
   2305 
   2306 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2307 		return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
   2308 		    IWM_CMD_SYNC, sizeof(*cmd), cmd);
   2309 
   2310 	iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
   2311 	return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
   2312 	    sizeof(cmd_v1), &cmd_v1);
   2313 }
   2314 
   2315 static int
   2316 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
   2317 	void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
   2318 {
   2319 	int ret;
   2320 
   2321 	DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
   2322 
   2323 	ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
   2324 	if (ret) {
   2325 		DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
   2326 		    DEVNAME(sc), ret));
   2327 	}
   2328 
   2329 	return ret;
   2330 }
   2331 
   2332 static void
   2333 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2334 	uint32_t duration, uint32_t min_duration, uint32_t max_delay)
   2335 {
   2336 	struct iwm_time_event_cmd_v2 time_cmd;
   2337 
   2338 	memset(&time_cmd, 0, sizeof(time_cmd));
   2339 
   2340 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2341 	time_cmd.id_and_color =
   2342 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2343 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2344 
   2345 	time_cmd.apply_time = htole32(iwm_read_prph(sc,
   2346 	    IWM_DEVICE_SYSTEM_TIME_REG));
   2347 
   2348 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2349 	time_cmd.max_delay = htole32(max_delay);
   2350 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2351 	time_cmd.interval = htole32(1);
   2352 	time_cmd.duration = htole32(duration);
   2353 	time_cmd.repeat = 1;
   2354 	time_cmd.policy
   2355 	    = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2356 	        IWM_TE_V2_NOTIF_HOST_EVENT_END);
   2357 
   2358 	iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
   2359 }
   2360 
   2361 /*
   2362  * END iwlwifi/mvm/time-event.c
   2363  */
   2364 
   2365 /*
   2366  * NVM read access and content parsing.  We do not support
   2367  * external NVM or writing NVM.
   2368  * iwlwifi/mvm/nvm.c
   2369  */
   2370 
   2371 /* list of NVM sections we are allowed/need to read */
   2372 static const int nvm_to_read[] = {
   2373 	IWM_NVM_SECTION_TYPE_HW,
   2374 	IWM_NVM_SECTION_TYPE_SW,
   2375 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2376 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2377 };
   2378 
   2379 /* Default NVM size to read */
   2380 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
   2381 #define IWM_MAX_NVM_SECTION_SIZE 7000
   2382 
   2383 #define IWM_NVM_WRITE_OPCODE 1
   2384 #define IWM_NVM_READ_OPCODE 0
   2385 
   2386 static int
   2387 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
   2388 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
   2389 {
   2390 	offset = 0;
   2391 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2392 		.offset = htole16(offset),
   2393 		.length = htole16(length),
   2394 		.type = htole16(section),
   2395 		.op_code = IWM_NVM_READ_OPCODE,
   2396 	};
   2397 	struct iwm_nvm_access_resp *nvm_resp;
   2398 	struct iwm_rx_packet *pkt;
   2399 	struct iwm_host_cmd cmd = {
   2400 		.id = IWM_NVM_ACCESS_CMD,
   2401 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
   2402 		    IWM_CMD_SEND_IN_RFKILL,
   2403 		.data = { &nvm_access_cmd, },
   2404 	};
   2405 	int ret, bytes_read, offset_read;
   2406 	uint8_t *resp_data;
   2407 
   2408 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2409 
   2410 	ret = iwm_send_cmd(sc, &cmd);
   2411 	if (ret)
   2412 		return ret;
   2413 
   2414 	pkt = cmd.resp_pkt;
   2415 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2416 		DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
   2417 		    DEVNAME(sc), pkt->hdr.flags));
   2418 		ret = EIO;
   2419 		goto exit;
   2420 	}
   2421 
   2422 	/* Extract NVM response */
   2423 	nvm_resp = (void *)pkt->data;
   2424 
   2425 	ret = le16toh(nvm_resp->status);
   2426 	bytes_read = le16toh(nvm_resp->length);
   2427 	offset_read = le16toh(nvm_resp->offset);
   2428 	resp_data = nvm_resp->data;
   2429 	if (ret) {
   2430 		DPRINTF(("%s: NVM access command failed with status %d\n",
   2431 		    DEVNAME(sc), ret));
   2432 		ret = EINVAL;
   2433 		goto exit;
   2434 	}
   2435 
   2436 	if (offset_read != offset) {
   2437 		DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
   2438 		    DEVNAME(sc), offset_read));
   2439 		ret = EINVAL;
   2440 		goto exit;
   2441 	}
   2442 
   2443 	memcpy(data + offset, resp_data, bytes_read);
   2444 	*len = bytes_read;
   2445 
   2446  exit:
   2447 	iwm_free_resp(sc, &cmd);
   2448 	return ret;
   2449 }
   2450 
   2451 /*
   2452  * Reads an NVM section completely.
   2453  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2454  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2455  * by uCode, we need to manually check in this case that we don't
   2456  * overflow and try to read more than the EEPROM size.
   2457  * For 7000 family NICs, we supply the maximal size we can read, and
   2458  * the uCode fills the response with as much data as we can,
   2459  * without overflowing, so no check is needed.
   2460  */
   2461 static int
   2462 iwm_nvm_read_section(struct iwm_softc *sc,
   2463 	uint16_t section, uint8_t *data, uint16_t *len)
   2464 {
   2465 	uint16_t length, seglen;
   2466 	int error;
   2467 
   2468 	/* Set nvm section read length */
   2469 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2470 	*len = 0;
   2471 
   2472 	/* Read the NVM until exhausted (reading less than requested) */
   2473 	while (seglen == length) {
   2474 		error = iwm_nvm_read_chunk(sc,
   2475 		    section, *len, length, data, &seglen);
   2476 		if (error) {
   2477 			aprint_error_dev(sc->sc_dev,
   2478 			    "Cannot read NVM from section %d offset %d, "
   2479 			    "length %d\n", section, *len, length);
   2480 			return error;
   2481 		}
   2482 		*len += seglen;
   2483 	}
   2484 
   2485 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2486 	return 0;
   2487 }
   2488 
   2489 /*
   2490  * BEGIN IWM_NVM_PARSE
   2491  */
   2492 
   2493 /* iwlwifi/iwl-nvm-parse.c */
   2494 
   2495 /* NVM offsets (in words) definitions */
   2496 enum wkp_nvm_offsets {
   2497 	/* NVM HW-Section offset (in words) definitions */
   2498 	IWM_HW_ADDR = 0x15,
   2499 
   2500 /* NVM SW-Section offset (in words) definitions */
   2501 	IWM_NVM_SW_SECTION = 0x1C0,
   2502 	IWM_NVM_VERSION = 0,
   2503 	IWM_RADIO_CFG = 1,
   2504 	IWM_SKU = 2,
   2505 	IWM_N_HW_ADDRS = 3,
   2506 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
   2507 
   2508 /* NVM calibration section offset (in words) definitions */
   2509 	IWM_NVM_CALIB_SECTION = 0x2B8,
   2510 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
   2511 };
   2512 
   2513 /* SKU Capabilities (actual values from NVM definition) */
   2514 enum nvm_sku_bits {
   2515 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
   2516 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
   2517 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
   2518 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
   2519 };
   2520 
   2521 /* radio config bits (actual values from NVM definition) */
   2522 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
   2523 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
   2524 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
   2525 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
   2526 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
   2527 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
   2528 
   2529 #define DEFAULT_MAX_TX_POWER 16
   2530 
   2531 /**
   2532  * enum iwm_nvm_channel_flags - channel flags in NVM
   2533  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
   2534  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
   2535  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
   2536  * @IWM_NVM_CHANNEL_RADAR: radar detection required
   2537  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
   2538  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
   2539  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
   2540  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
   2541  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
   2542  */
   2543 enum iwm_nvm_channel_flags {
   2544 	IWM_NVM_CHANNEL_VALID = (1 << 0),
   2545 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
   2546 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
   2547 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
   2548 	IWM_NVM_CHANNEL_DFS = (1 << 7),
   2549 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
   2550 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
   2551 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
   2552 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
   2553 };
   2554 
   2555 static void
   2556 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
   2557 {
   2558 	struct ieee80211com *ic = &sc->sc_ic;
   2559 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2560 	int ch_idx;
   2561 	struct ieee80211_channel *channel;
   2562 	uint16_t ch_flags;
   2563 	int is_5ghz;
   2564 	int flags, hw_value;
   2565 
   2566 	for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
   2567 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2568 
   2569 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2570 		    !data->sku_cap_band_52GHz_enable)
   2571 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2572 
   2573 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2574 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2575 			    iwm_nvm_channels[ch_idx],
   2576 			    ch_flags,
   2577 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2578 			    "5.2" : "2.4"));
   2579 			continue;
   2580 		}
   2581 
   2582 		hw_value = iwm_nvm_channels[ch_idx];
   2583 		channel = &ic->ic_channels[hw_value];
   2584 
   2585 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2586 		if (!is_5ghz) {
   2587 			flags = IEEE80211_CHAN_2GHZ;
   2588 			channel->ic_flags
   2589 			    = IEEE80211_CHAN_CCK
   2590 			    | IEEE80211_CHAN_OFDM
   2591 			    | IEEE80211_CHAN_DYN
   2592 			    | IEEE80211_CHAN_2GHZ;
   2593 		} else {
   2594 			flags = IEEE80211_CHAN_5GHZ;
   2595 			channel->ic_flags =
   2596 			    IEEE80211_CHAN_A;
   2597 		}
   2598 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2599 
   2600 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2601 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2602 	}
   2603 }
   2604 
   2605 static int
   2606 iwm_parse_nvm_data(struct iwm_softc *sc,
   2607 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
   2608 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
   2609 {
   2610 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2611 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2612 	uint16_t radio_cfg, sku;
   2613 
   2614 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2615 
   2616 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2617 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2618 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2619 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2620 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2621 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
   2622 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
   2623 
   2624 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2625 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2626 #ifndef IWM_NO_5GHZ
   2627 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2628 #else
   2629 	data->sku_cap_band_52GHz_enable = 0;
   2630 #endif
   2631 	data->sku_cap_11n_enable = 0;
   2632 
   2633 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
   2634 		DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
   2635 		    data->valid_tx_ant, data->valid_rx_ant));
   2636 		return EINVAL;
   2637 	}
   2638 
   2639 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2640 
   2641 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
   2642 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
   2643 
   2644 	/* The byte order is little endian 16 bit, meaning 214365 */
   2645 	memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2646 	data->hw_addr[0] = hw_addr[1];
   2647 	data->hw_addr[1] = hw_addr[0];
   2648 	data->hw_addr[2] = hw_addr[3];
   2649 	data->hw_addr[3] = hw_addr[2];
   2650 	data->hw_addr[4] = hw_addr[5];
   2651 	data->hw_addr[5] = hw_addr[4];
   2652 
   2653 	iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
   2654 	data->calib_version = 255;   /* TODO:
   2655 					this value will prevent some checks from
   2656 					failing, we need to check if this
   2657 					field is still needed, and if it does,
   2658 					where is it in the NVM */
   2659 
   2660 	return 0;
   2661 }
   2662 
   2663 /*
   2664  * END NVM PARSE
   2665  */
   2666 
   2667 struct iwm_nvm_section {
   2668 	uint16_t length;
   2669 	const uint8_t *data;
   2670 };
   2671 
   2672 #define IWM_FW_VALID_TX_ANT(sc) \
   2673     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
   2674     >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
   2675 #define IWM_FW_VALID_RX_ANT(sc) \
   2676     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
   2677     >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
   2678 
   2679 static int
   2680 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2681 {
   2682 	const uint16_t *hw, *sw, *calib;
   2683 
   2684 	/* Checking for required sections */
   2685 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2686 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2687 		DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
   2688 		return ENOENT;
   2689 	}
   2690 
   2691 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
   2692 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2693 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2694 	return iwm_parse_nvm_data(sc, hw, sw, calib,
   2695 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
   2696 }
   2697 
   2698 static int
   2699 iwm_nvm_init(struct iwm_softc *sc)
   2700 {
   2701 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2702 	int i, section, error;
   2703 	uint16_t len;
   2704 	uint8_t *nvm_buffer, *temp;
   2705 
   2706 	/* Read From FW NVM */
   2707 	DPRINTF(("Read NVM\n"));
   2708 
   2709 	/* TODO: find correct NVM max size for a section */
   2710 	nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
   2711 	for (i = 0; i < __arraycount(nvm_to_read); i++) {
   2712 		section = nvm_to_read[i];
   2713 		KASSERT(section <= __arraycount(nvm_sections));
   2714 
   2715 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
   2716 		if (error)
   2717 			break;
   2718 
   2719 		temp = kmem_alloc(len, KM_SLEEP);
   2720 		memcpy(temp, nvm_buffer, len);
   2721 		nvm_sections[section].data = temp;
   2722 		nvm_sections[section].length = len;
   2723 	}
   2724 	kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
   2725 	if (error)
   2726 		return error;
   2727 
   2728 	return iwm_parse_nvm_sections(sc, nvm_sections);
   2729 }
   2730 
   2731 /*
   2732  * Firmware loading gunk.  This is kind of a weird hybrid between the
   2733  * iwn driver and the Linux iwlwifi driver.
   2734  */
   2735 
   2736 static int
   2737 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2738 	const uint8_t *section, uint32_t byte_cnt)
   2739 {
   2740 	struct iwm_dma_info *dma = &sc->fw_dma;
   2741 	int error;
   2742 
   2743 	/* Copy firmware section into pre-allocated DMA-safe memory. */
   2744 	memcpy(dma->vaddr, section, byte_cnt);
   2745 	bus_dmamap_sync(sc->sc_dmat,
   2746 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
   2747 
   2748 	if (!iwm_nic_lock(sc))
   2749 		return EBUSY;
   2750 
   2751 	sc->sc_fw_chunk_done = 0;
   2752 
   2753 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2754 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2755 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2756 	    dst_addr);
   2757 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2758 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2759 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2760 	    (iwm_get_dma_hi_addr(dma->paddr)
   2761 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2762 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2763 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2764 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2765 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2766 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2767 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   2768 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   2769 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   2770 
   2771 	iwm_nic_unlock(sc);
   2772 
   2773 	/* wait 1s for this segment to load */
   2774 	while (!sc->sc_fw_chunk_done)
   2775 		if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
   2776 			break;
   2777 
   2778 	return error;
   2779 }
   2780 
   2781 static int
   2782 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2783 {
   2784 	struct iwm_fw_sects *fws;
   2785 	int error, i, w;
   2786 	void *data;
   2787 	uint32_t dlen;
   2788 	uint32_t offset;
   2789 
   2790 	sc->sc_uc.uc_intr = 0;
   2791 
   2792 	fws = &sc->sc_fw.fw_sects[ucode_type];
   2793 	for (i = 0; i < fws->fw_count; i++) {
   2794 		data = fws->fw_sect[i].fws_data;
   2795 		dlen = fws->fw_sect[i].fws_len;
   2796 		offset = fws->fw_sect[i].fws_devoff;
   2797 		DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
   2798 		    ucode_type, offset, dlen));
   2799 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
   2800 		if (error) {
   2801 			DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
   2802 			    "returned error %02d\n", i, fws->fw_count, error));
   2803 			return error;
   2804 		}
   2805 	}
   2806 
   2807 	/* wait for the firmware to load */
   2808 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   2809 
   2810 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
   2811 		error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
   2812 	}
   2813 
   2814 	return error;
   2815 }
   2816 
   2817 /* iwlwifi: pcie/trans.c */
   2818 static int
   2819 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2820 {
   2821 	int error;
   2822 
   2823 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2824 
   2825 	if ((error = iwm_nic_init(sc)) != 0) {
   2826 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   2827 		return error;
   2828 	}
   2829 
   2830 	/* make sure rfkill handshake bits are cleared */
   2831 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2832 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   2833 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   2834 
   2835 	/* clear (again), then enable host interrupts */
   2836 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2837 	iwm_enable_interrupts(sc);
   2838 
   2839 	/* really make sure rfkill handshake bits are cleared */
   2840 	/* maybe we should write a few times more?  just to make sure */
   2841 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2842 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2843 
   2844 	/* Load the given image to the HW */
   2845 	error = iwm_load_firmware(sc, ucode_type);
   2846 	if (error) {
   2847 		aprint_error_dev(sc->sc_dev, "failed to load firmware: %d\n",
   2848 		    error);
   2849 	}
   2850 	return error;
   2851 }
   2852 
   2853 static int
   2854 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
   2855 {
   2856 	return iwm_post_alive(sc);
   2857 }
   2858 
   2859 static int
   2860 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   2861 {
   2862 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   2863 		.valid = htole32(valid_tx_ant),
   2864 	};
   2865 
   2866 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
   2867 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
   2868 }
   2869 
   2870 /* iwlwifi: mvm/fw.c */
   2871 static int
   2872 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   2873 {
   2874 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   2875 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   2876 
   2877 	/* Set parameters */
   2878 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   2879 	phy_cfg_cmd.calib_control.event_trigger =
   2880 	    sc->sc_default_calib[ucode_type].event_trigger;
   2881 	phy_cfg_cmd.calib_control.flow_trigger =
   2882 	    sc->sc_default_calib[ucode_type].flow_trigger;
   2883 
   2884 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   2885 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
   2886 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   2887 }
   2888 
   2889 static int
   2890 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
   2891 	enum iwm_ucode_type ucode_type)
   2892 {
   2893 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   2894 	int error;
   2895 
   2896 	if ((error = iwm_read_firmware(sc)) != 0)
   2897 		return error;
   2898 
   2899 	sc->sc_uc_current = ucode_type;
   2900 	error = iwm_start_fw(sc, ucode_type);
   2901 	if (error) {
   2902 		sc->sc_uc_current = old_type;
   2903 		return error;
   2904 	}
   2905 
   2906 	return iwm_fw_alive(sc, sc->sched_base);
   2907 }
   2908 
   2909 /*
   2910  * mvm misc bits
   2911  */
   2912 
   2913 /*
   2914  * follows iwlwifi/fw.c
   2915  */
   2916 static int
   2917 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   2918 {
   2919 	int error;
   2920 
   2921 	/* do not operate with rfkill switch turned on */
   2922 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   2923 		aprint_error_dev(sc->sc_dev,
   2924 		    "radio is disabled by hardware switch\n");
   2925 		return EPERM;
   2926 	}
   2927 
   2928 	sc->sc_init_complete = 0;
   2929 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
   2930 	    IWM_UCODE_TYPE_INIT)) != 0)
   2931 		return error;
   2932 
   2933 	if (justnvm) {
   2934 		if ((error = iwm_nvm_init(sc)) != 0) {
   2935 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   2936 			return error;
   2937 		}
   2938 		memcpy(&sc->sc_ic.ic_myaddr,
   2939 		    &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
   2940 
   2941 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
   2942 		    + sc->sc_capa_max_probe_len
   2943 		    + IWM_MAX_NUM_SCAN_CHANNELS
   2944 		    * sizeof(struct iwm_scan_channel);
   2945 		sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
   2946 
   2947 		return 0;
   2948 	}
   2949 
   2950 	/* Send TX valid antennas before triggering calibrations */
   2951 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   2952 		return error;
   2953 
   2954 	/*
   2955 	* Send phy configurations command to init uCode
   2956 	* to start the 16.0 uCode init image internal calibrations.
   2957 	*/
   2958 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
   2959 		DPRINTF(("%s: failed to run internal calibration: %d\n",
   2960 		    DEVNAME(sc), error));
   2961 		return error;
   2962 	}
   2963 
   2964 	/*
   2965 	 * Nothing to do but wait for the init complete notification
   2966 	 * from the firmware
   2967 	 */
   2968 	while (!sc->sc_init_complete)
   2969 		if ((error = tsleep(&sc->sc_init_complete,
   2970 		    0, "iwminit", 2*hz)) != 0)
   2971 			break;
   2972 
   2973 	return error;
   2974 }
   2975 
   2976 /*
   2977  * receive side
   2978  */
   2979 
   2980 /* (re)stock rx ring, called at init-time and at runtime */
   2981 static int
   2982 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   2983 {
   2984 	struct iwm_rx_ring *ring = &sc->rxq;
   2985 	struct iwm_rx_data *data = &ring->data[idx];
   2986 	struct mbuf *m;
   2987 	int error;
   2988 	int fatal = 0;
   2989 
   2990 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   2991 	if (m == NULL)
   2992 		return ENOBUFS;
   2993 
   2994 	if (size <= MCLBYTES) {
   2995 		MCLGET(m, M_DONTWAIT);
   2996 	} else {
   2997 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   2998 	}
   2999 	if ((m->m_flags & M_EXT) == 0) {
   3000 		m_freem(m);
   3001 		return ENOBUFS;
   3002 	}
   3003 
   3004 	if (data->m != NULL) {
   3005 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3006 		fatal = 1;
   3007 	}
   3008 
   3009 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3010 	if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3011 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
   3012 		/* XXX */
   3013 		if (fatal)
   3014 			panic("iwm: could not load RX mbuf");
   3015 		m_freem(m);
   3016 		return error;
   3017 	}
   3018 	data->m = m;
   3019 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   3020 
   3021 	/* Update RX descriptor. */
   3022 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   3023 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3024 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   3025 
   3026 	return 0;
   3027 }
   3028 
   3029 /* iwlwifi: mvm/rx.c */
   3030 #define IWM_RSSI_OFFSET 50
   3031 static int
   3032 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3033 {
   3034 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   3035 	uint32_t agc_a, agc_b;
   3036 	uint32_t val;
   3037 
   3038 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3039 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3040 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3041 
   3042 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3043 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3044 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3045 
   3046 	/*
   3047 	 * dBm = rssi dB - agc dB - constant.
   3048 	 * Higher AGC (higher radio gain) means lower signal.
   3049 	 */
   3050 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3051 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3052 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3053 
   3054 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3055 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3056 
   3057 	return max_rssi_dbm;
   3058 }
   3059 
   3060 /* iwlwifi: mvm/rx.c */
   3061 /*
   3062  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
   3063  * values are reported by the fw as positive values - need to negate
   3064  * to obtain their dBM.  Account for missing antennas by replacing 0
   3065  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3066  */
   3067 static int
   3068 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
   3069     struct iwm_rx_phy_info *phy_info)
   3070 {
   3071 	int energy_a, energy_b, energy_c, max_energy;
   3072 	uint32_t val;
   3073 
   3074 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3075 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3076 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3077 	energy_a = energy_a ? -energy_a : -256;
   3078 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3079 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3080 	energy_b = energy_b ? -energy_b : -256;
   3081 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3082 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3083 	energy_c = energy_c ? -energy_c : -256;
   3084 	max_energy = MAX(energy_a, energy_b);
   3085 	max_energy = MAX(max_energy, energy_c);
   3086 
   3087 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   3088 	    energy_a, energy_b, energy_c, max_energy));
   3089 
   3090 	return max_energy;
   3091 }
   3092 
   3093 static void
   3094 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
   3095 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3096 {
   3097 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3098 
   3099 	DPRINTFN(20, ("received PHY stats\n"));
   3100 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3101 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3102 
   3103 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3104 }
   3105 
   3106 /*
   3107  * Retrieve the average noise (in dBm) among receivers.
   3108  */
   3109 static int
   3110 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
   3111 {
   3112 	int i, total, nbant, noise;
   3113 
   3114 	total = nbant = noise = 0;
   3115 	for (i = 0; i < 3; i++) {
   3116 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3117 		if (noise) {
   3118 			total += noise;
   3119 			nbant++;
   3120 		}
   3121 	}
   3122 
   3123 	/* There should be at least one antenna but check anyway. */
   3124 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3125 }
   3126 
   3127 /*
   3128  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
   3129  *
   3130  * Handles the actual data of the Rx packet from the fw
   3131  */
   3132 static void
   3133 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
   3134 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3135 {
   3136 	struct ieee80211com *ic = &sc->sc_ic;
   3137 	struct ieee80211_frame *wh;
   3138 	struct ieee80211_node *ni;
   3139 	struct ieee80211_channel *c = NULL;
   3140 	struct mbuf *m;
   3141 	struct iwm_rx_phy_info *phy_info;
   3142 	struct iwm_rx_mpdu_res_start *rx_res;
   3143 	int device_timestamp;
   3144 	uint32_t len;
   3145 	uint32_t rx_pkt_status;
   3146 	int rssi;
   3147 
   3148 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3149 	    BUS_DMASYNC_POSTREAD);
   3150 
   3151 	phy_info = &sc->sc_last_phy_info;
   3152 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3153 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3154 	len = le16toh(rx_res->byte_count);
   3155 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
   3156 
   3157 	m = data->m;
   3158 	m->m_data = pkt->data + sizeof(*rx_res);
   3159 	m->m_pkthdr.len = m->m_len = len;
   3160 
   3161 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3162 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3163 		    phy_info->cfg_phy_cnt));
   3164 		return;
   3165 	}
   3166 
   3167 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3168 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3169 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3170 		return; /* drop */
   3171 	}
   3172 
   3173 	device_timestamp = le32toh(phy_info->system_timestamp);
   3174 
   3175 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3176 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
   3177 	} else {
   3178 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
   3179 	}
   3180 	rssi = -rssi;
   3181 
   3182 	if (ic->ic_state == IEEE80211_S_SCAN)
   3183 		iwm_fix_channel(ic, m);
   3184 
   3185 	/* replenish ring for the buffer we're going to feed to the sharks */
   3186 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3187 		return;
   3188 
   3189 	m->m_pkthdr.rcvif = IC2IFP(ic);
   3190 
   3191 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
   3192 		if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3193 			c = &ic->ic_channels[le32toh(phy_info->channel)];
   3194 	}
   3195 
   3196 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3197 	if (c)
   3198 		ni->ni_chan = c;
   3199 
   3200 	if (sc->sc_drvbpf != NULL) {
   3201 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3202 
   3203 		tap->wr_flags = 0;
   3204 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3205 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3206 		tap->wr_chan_freq =
   3207 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3208 		tap->wr_chan_flags =
   3209 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3210 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3211 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3212 		tap->wr_tsft = phy_info->system_timestamp;
   3213 		switch (phy_info->rate) {
   3214 		/* CCK rates. */
   3215 		case  10: tap->wr_rate =   2; break;
   3216 		case  20: tap->wr_rate =   4; break;
   3217 		case  55: tap->wr_rate =  11; break;
   3218 		case 110: tap->wr_rate =  22; break;
   3219 		/* OFDM rates. */
   3220 		case 0xd: tap->wr_rate =  12; break;
   3221 		case 0xf: tap->wr_rate =  18; break;
   3222 		case 0x5: tap->wr_rate =  24; break;
   3223 		case 0x7: tap->wr_rate =  36; break;
   3224 		case 0x9: tap->wr_rate =  48; break;
   3225 		case 0xb: tap->wr_rate =  72; break;
   3226 		case 0x1: tap->wr_rate =  96; break;
   3227 		case 0x3: tap->wr_rate = 108; break;
   3228 		/* Unknown rate: should not happen. */
   3229 		default:  tap->wr_rate =   0;
   3230 		}
   3231 
   3232 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3233 	}
   3234 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3235 	ieee80211_free_node(ni);
   3236 }
   3237 
   3238 static void
   3239 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3240 	struct iwm_node *in)
   3241 {
   3242 	struct ieee80211com *ic = &sc->sc_ic;
   3243 	struct ifnet *ifp = IC2IFP(ic);
   3244 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
   3245 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3246 	int failack = tx_resp->failure_frame;
   3247 
   3248 	KASSERT(tx_resp->frame_count == 1);
   3249 
   3250 	/* Update rate control statistics. */
   3251 	in->in_amn.amn_txcnt++;
   3252 	if (failack > 0) {
   3253 		in->in_amn.amn_retrycnt++;
   3254 	}
   3255 
   3256 	if (status != IWM_TX_STATUS_SUCCESS &&
   3257 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3258 		ifp->if_oerrors++;
   3259 	else
   3260 		ifp->if_opackets++;
   3261 }
   3262 
   3263 static void
   3264 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
   3265 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3266 {
   3267 	struct ieee80211com *ic = &sc->sc_ic;
   3268 	struct ifnet *ifp = IC2IFP(ic);
   3269 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3270 	int idx = cmd_hdr->idx;
   3271 	int qid = cmd_hdr->qid;
   3272 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3273 	struct iwm_tx_data *txd = &ring->data[idx];
   3274 	struct iwm_node *in = txd->in;
   3275 
   3276 	if (txd->done) {
   3277 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3278 		    DEVNAME(sc)));
   3279 		return;
   3280 	}
   3281 
   3282 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3283 	    BUS_DMASYNC_POSTREAD);
   3284 
   3285 	sc->sc_tx_timer = 0;
   3286 
   3287 	iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
   3288 
   3289 	/* Unmap and free mbuf. */
   3290 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3291 	    BUS_DMASYNC_POSTWRITE);
   3292 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3293 	m_freem(txd->m);
   3294 
   3295 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3296 	KASSERT(txd->done == 0);
   3297 	txd->done = 1;
   3298 	KASSERT(txd->in);
   3299 
   3300 	txd->m = NULL;
   3301 	txd->in = NULL;
   3302 	ieee80211_free_node(&in->in_ni);
   3303 
   3304 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3305 		sc->qfullmsk &= ~(1 << ring->qid);
   3306 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3307 			ifp->if_flags &= ~IFF_OACTIVE;
   3308 			/*
   3309 			 * Well, we're in interrupt context, but then again
   3310 			 * I guess net80211 does all sorts of stunts in
   3311 			 * interrupt context, so maybe this is no biggie.
   3312 			 */
   3313 			(*ifp->if_start)(ifp);
   3314 		}
   3315 	}
   3316 }
   3317 
   3318 /*
   3319  * BEGIN iwlwifi/mvm/binding.c
   3320  */
   3321 
   3322 static int
   3323 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3324 {
   3325 	struct iwm_binding_cmd cmd;
   3326 	struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
   3327 	int i, ret;
   3328 	uint32_t status;
   3329 
   3330 	memset(&cmd, 0, sizeof(cmd));
   3331 
   3332 	cmd.id_and_color
   3333 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3334 	cmd.action = htole32(action);
   3335 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3336 
   3337 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3338 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3339 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3340 
   3341 	status = 0;
   3342 	ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3343 	    sizeof(cmd), &cmd, &status);
   3344 	if (ret) {
   3345 		DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
   3346 		    DEVNAME(sc), action, ret));
   3347 		return ret;
   3348 	}
   3349 
   3350 	if (status) {
   3351 		DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
   3352 		    status));
   3353 		ret = EIO;
   3354 	}
   3355 
   3356 	return ret;
   3357 }
   3358 
   3359 static int
   3360 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
   3361 {
   3362 	return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3363 }
   3364 
   3365 static int
   3366 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
   3367 {
   3368 	return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3369 }
   3370 
   3371 /*
   3372  * END iwlwifi/mvm/binding.c
   3373  */
   3374 
   3375 /*
   3376  * BEGIN iwlwifi/mvm/phy-ctxt.c
   3377  */
   3378 
   3379 /*
   3380  * Construct the generic fields of the PHY context command
   3381  */
   3382 static void
   3383 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3384 	struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3385 {
   3386 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3387 
   3388 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3389 	    ctxt->color));
   3390 	cmd->action = htole32(action);
   3391 	cmd->apply_time = htole32(apply_time);
   3392 }
   3393 
   3394 /*
   3395  * Add the phy configuration to the PHY context command
   3396  */
   3397 static void
   3398 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
   3399 	struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
   3400 	uint8_t chains_static, uint8_t chains_dynamic)
   3401 {
   3402 	struct ieee80211com *ic = &sc->sc_ic;
   3403 	uint8_t active_cnt, idle_cnt;
   3404 
   3405 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3406 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3407 
   3408 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3409 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3410 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3411 
   3412 	/* Set rx the chains */
   3413 	idle_cnt = chains_static;
   3414 	active_cnt = chains_dynamic;
   3415 
   3416 	cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
   3417 					IWM_PHY_RX_CHAIN_VALID_POS);
   3418 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3419 	cmd->rxchain_info |= htole32(active_cnt <<
   3420 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3421 
   3422 	cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
   3423 }
   3424 
   3425 /*
   3426  * Send a command
   3427  * only if something in the configuration changed: in case that this is the
   3428  * first time that the phy configuration is applied or in case that the phy
   3429  * configuration changed from the previous apply.
   3430  */
   3431 static int
   3432 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
   3433 	struct iwm_mvm_phy_ctxt *ctxt,
   3434 	uint8_t chains_static, uint8_t chains_dynamic,
   3435 	uint32_t action, uint32_t apply_time)
   3436 {
   3437 	struct iwm_phy_context_cmd cmd;
   3438 	int ret;
   3439 
   3440 	/* Set the command header fields */
   3441 	iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3442 
   3443 	/* Set the command data */
   3444 	iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3445 	    chains_static, chains_dynamic);
   3446 
   3447 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
   3448 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3449 	if (ret) {
   3450 		DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
   3451 	}
   3452 	return ret;
   3453 }
   3454 
   3455 /*
   3456  * Send a command to add a PHY context based on the current HW configuration.
   3457  */
   3458 static int
   3459 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3460 	struct ieee80211_channel *chan,
   3461 	uint8_t chains_static, uint8_t chains_dynamic)
   3462 {
   3463 	ctxt->channel = chan;
   3464 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3465 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
   3466 }
   3467 
   3468 /*
   3469  * Send a command to modify the PHY context based on the current HW
   3470  * configuration. Note that the function does not check that the configuration
   3471  * changed.
   3472  */
   3473 static int
   3474 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
   3475 	struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
   3476 	uint8_t chains_static, uint8_t chains_dynamic)
   3477 {
   3478 	ctxt->channel = chan;
   3479 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3480 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
   3481 }
   3482 
   3483 /*
   3484  * END iwlwifi/mvm/phy-ctxt.c
   3485  */
   3486 
   3487 /*
   3488  * transmit side
   3489  */
   3490 
   3491 /*
   3492  * Send a command to the firmware.  We try to implement the Linux
   3493  * driver interface for the routine.
   3494  * mostly from if_iwn (iwn_cmd()).
   3495  *
   3496  * For now, we always copy the first part and map the second one (if it exists).
   3497  */
   3498 static int
   3499 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3500 {
   3501 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3502 	struct iwm_tfd *desc;
   3503 	struct iwm_tx_data *data;
   3504 	struct iwm_device_cmd *cmd;
   3505 	struct mbuf *m;
   3506 	bus_addr_t paddr;
   3507 	uint32_t addr_lo;
   3508 	int error, i, paylen, off, s;
   3509 	int code;
   3510 	int async, wantresp;
   3511 
   3512 	code = hcmd->id;
   3513 	async = hcmd->flags & IWM_CMD_ASYNC;
   3514 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3515 
   3516 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3517 		paylen += hcmd->len[i];
   3518 	}
   3519 
   3520 	/* if the command wants an answer, busy sc_cmd_resp */
   3521 	if (wantresp) {
   3522 		KASSERT(!async);
   3523 		while (sc->sc_wantresp != -1)
   3524 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3525 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3526 		DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
   3527 	}
   3528 
   3529 	/*
   3530 	 * Is the hardware still available?  (after e.g. above wait).
   3531 	 */
   3532 	s = splnet();
   3533 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3534 		error = ENXIO;
   3535 		goto out;
   3536 	}
   3537 
   3538 	desc = &ring->desc[ring->cur];
   3539 	data = &ring->data[ring->cur];
   3540 
   3541 	if (paylen > sizeof(cmd->data)) {
   3542 		/* Command is too large */
   3543 		if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
   3544 			error = EINVAL;
   3545 			goto out;
   3546 		}
   3547 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3548 		if (m == NULL) {
   3549 			error = ENOMEM;
   3550 			goto out;
   3551 		}
   3552 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3553 		if (!(m->m_flags & M_EXT)) {
   3554 			m_freem(m);
   3555 			error = ENOMEM;
   3556 			goto out;
   3557 		}
   3558 		cmd = mtod(m, struct iwm_device_cmd *);
   3559 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
   3560 		    IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3561 		if (error != 0) {
   3562 			m_freem(m);
   3563 			goto out;
   3564 		}
   3565 		data->m = m;
   3566 		paddr = data->map->dm_segs[0].ds_addr;
   3567 	} else {
   3568 		cmd = &ring->cmd[ring->cur];
   3569 		paddr = data->cmd_paddr;
   3570 	}
   3571 
   3572 	cmd->hdr.code = code;
   3573 	cmd->hdr.flags = 0;
   3574 	cmd->hdr.qid = ring->qid;
   3575 	cmd->hdr.idx = ring->cur;
   3576 
   3577 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3578 		if (hcmd->len[i] == 0)
   3579 			continue;
   3580 		memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
   3581 		off += hcmd->len[i];
   3582 	}
   3583 	KASSERT(off == paylen);
   3584 
   3585 	/* lo field is not aligned */
   3586 	addr_lo = htole32((uint32_t)paddr);
   3587 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3588 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3589 	    | ((sizeof(cmd->hdr) + paylen) << 4));
   3590 	desc->num_tbs = 1;
   3591 
   3592 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%lu %s\n",
   3593 	    code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
   3594 
   3595 	if (paylen > sizeof(cmd->data)) {
   3596 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3597 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3598 	} else {
   3599 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3600 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3601 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3602 	}
   3603 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3604 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3605 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   3606 
   3607 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3608 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3609 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3610 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3611 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3612 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3613 		DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
   3614 		error = EBUSY;
   3615 		goto out;
   3616 	}
   3617 
   3618 #if 0
   3619 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3620 #endif
   3621 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3622 	    code, ring->qid, ring->cur));
   3623 
   3624 	/* Kick command ring. */
   3625 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3626 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3627 
   3628 	if (!async) {
   3629 		/* m..m-mmyy-mmyyyy-mym-ym m-my generation */
   3630 		int generation = sc->sc_generation;
   3631 		error = tsleep(desc, PCATCH, "iwmcmd", hz);
   3632 		if (error == 0) {
   3633 			/* if hardware is no longer up, return error */
   3634 			if (generation != sc->sc_generation) {
   3635 				error = ENXIO;
   3636 			} else {
   3637 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3638 			}
   3639 		}
   3640 	}
   3641  out:
   3642 	if (wantresp && error != 0) {
   3643 		iwm_free_resp(sc, hcmd);
   3644 	}
   3645 	splx(s);
   3646 
   3647 	return error;
   3648 }
   3649 
   3650 /* iwlwifi: mvm/utils.c */
   3651 static int
   3652 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
   3653 	uint32_t flags, uint16_t len, const void *data)
   3654 {
   3655 	struct iwm_host_cmd cmd = {
   3656 		.id = id,
   3657 		.len = { len, },
   3658 		.data = { data, },
   3659 		.flags = flags,
   3660 	};
   3661 
   3662 	return iwm_send_cmd(sc, &cmd);
   3663 }
   3664 
   3665 /* iwlwifi: mvm/utils.c */
   3666 static int
   3667 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
   3668 	struct iwm_host_cmd *cmd, uint32_t *status)
   3669 {
   3670 	struct iwm_rx_packet *pkt;
   3671 	struct iwm_cmd_response *resp;
   3672 	int error, resp_len;
   3673 
   3674 	//lockdep_assert_held(&mvm->mutex);
   3675 
   3676 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3677 	cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
   3678 
   3679 	if ((error = iwm_send_cmd(sc, cmd)) != 0)
   3680 		return error;
   3681 	pkt = cmd->resp_pkt;
   3682 
   3683 	/* Can happen if RFKILL is asserted */
   3684 	if (!pkt) {
   3685 		error = 0;
   3686 		goto out_free_resp;
   3687 	}
   3688 
   3689 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3690 		error = EIO;
   3691 		goto out_free_resp;
   3692 	}
   3693 
   3694 	resp_len = iwm_rx_packet_payload_len(pkt);
   3695 	if (resp_len != sizeof(*resp)) {
   3696 		error = EIO;
   3697 		goto out_free_resp;
   3698 	}
   3699 
   3700 	resp = (void *)pkt->data;
   3701 	*status = le32toh(resp->status);
   3702  out_free_resp:
   3703 	iwm_free_resp(sc, cmd);
   3704 	return error;
   3705 }
   3706 
   3707 /* iwlwifi/mvm/utils.c */
   3708 static int
   3709 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
   3710 	uint16_t len, const void *data, uint32_t *status)
   3711 {
   3712 	struct iwm_host_cmd cmd = {
   3713 		.id = id,
   3714 		.len = { len, },
   3715 		.data = { data, },
   3716 	};
   3717 
   3718 	return iwm_mvm_send_cmd_status(sc, &cmd, status);
   3719 }
   3720 
   3721 static void
   3722 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3723 {
   3724 	KASSERT(sc->sc_wantresp != -1);
   3725 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
   3726 	    == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
   3727 	sc->sc_wantresp = -1;
   3728 	wakeup(&sc->sc_wantresp);
   3729 }
   3730 
   3731 /*
   3732  * Process a "command done" firmware notification.  This is where we wakeup
   3733  * processes waiting for a synchronous command completion.
   3734  * from if_iwn
   3735  */
   3736 static void
   3737 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
   3738 {
   3739 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3740 	struct iwm_tx_data *data;
   3741 
   3742 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
   3743 		return;	/* Not a command ack. */
   3744 	}
   3745 
   3746 	data = &ring->data[pkt->hdr.idx];
   3747 
   3748 	/* If the command was mapped in an mbuf, free it. */
   3749 	if (data->m != NULL) {
   3750 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3751 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3752 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3753 		m_freem(data->m);
   3754 		data->m = NULL;
   3755 	}
   3756 	wakeup(&ring->desc[pkt->hdr.idx]);
   3757 }
   3758 
   3759 #if 0
   3760 /*
   3761  * necessary only for block ack mode
   3762  */
   3763 void
   3764 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   3765 	uint16_t len)
   3766 {
   3767 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   3768 	uint16_t w_val;
   3769 
   3770 	scd_bc_tbl = sc->sched_dma.vaddr;
   3771 
   3772 	len += 8; /* magic numbers came naturally from paris */
   3773 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   3774 		len = roundup(len, 4) / 4;
   3775 
   3776 	w_val = htole16(sta_id << 12 | len);
   3777 
   3778 	/* Update TX scheduler. */
   3779 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   3780 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3781 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   3782 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   3783 
   3784 	/* I really wonder what this is ?!? */
   3785 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   3786 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   3787 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3788 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   3789 		    (char *)(void *)sc->sched_dma.vaddr,
   3790 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   3791 	}
   3792 }
   3793 #endif
   3794 
   3795 /*
   3796  * Fill in various bit for management frames, and leave them
   3797  * unfilled for data frames (firmware takes care of that).
   3798  * Return the selected TX rate.
   3799  */
   3800 static const struct iwm_rate *
   3801 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   3802 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   3803 {
   3804 	const struct iwm_rate *rinfo;
   3805 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3806 	int ridx, rate_flags;
   3807 	int nrates = in->in_ni.ni_rates.rs_nrates;
   3808 
   3809 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   3810 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   3811 
   3812 	/* for data frames, use RS table */
   3813 	if (type == IEEE80211_FC0_TYPE_DATA) {
   3814 		if (sc->sc_fixed_ridx != -1) {
   3815 			tx->initial_rate_index = sc->sc_fixed_ridx;
   3816 		} else {
   3817 			tx->initial_rate_index = (nrates-1) - in->in_ni.ni_txrate;
   3818 		}
   3819 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   3820 		DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
   3821 		return &iwm_rates[tx->initial_rate_index];
   3822 	}
   3823 
   3824 	/* for non-data, use the lowest supported rate */
   3825 	ridx = in->in_ridx[0];
   3826 	rinfo = &iwm_rates[ridx];
   3827 
   3828 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   3829 	if (IWM_RIDX_IS_CCK(ridx))
   3830 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   3831 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   3832 
   3833 	return rinfo;
   3834 }
   3835 
   3836 #define TB0_SIZE 16
   3837 static int
   3838 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   3839 {
   3840 	struct ieee80211com *ic = &sc->sc_ic;
   3841 	struct iwm_node *in = (void *)ni;
   3842 	struct iwm_tx_ring *ring;
   3843 	struct iwm_tx_data *data;
   3844 	struct iwm_tfd *desc;
   3845 	struct iwm_device_cmd *cmd;
   3846 	struct iwm_tx_cmd *tx;
   3847 	struct ieee80211_frame *wh;
   3848 	struct ieee80211_key *k = NULL;
   3849 	struct mbuf *m1;
   3850 	const struct iwm_rate *rinfo;
   3851 	uint32_t flags;
   3852 	u_int hdrlen;
   3853 	bus_dma_segment_t *seg;
   3854 	uint8_t tid, type;
   3855 	int i, totlen, error, pad;
   3856 	int hdrlen2;
   3857 
   3858 	wh = mtod(m, struct ieee80211_frame *);
   3859 	hdrlen = ieee80211_anyhdrsize(wh);
   3860 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3861 
   3862 	hdrlen2 = (ieee80211_has_qos(wh)) ?
   3863 	    sizeof (struct ieee80211_qosframe) :
   3864 	    sizeof (struct ieee80211_frame);
   3865 
   3866 	if (hdrlen != hdrlen2)
   3867 		DPRINTF(("%s: hdrlen error (%d != %d)\n",
   3868 		    DEVNAME(sc), hdrlen, hdrlen2));
   3869 
   3870 	tid = 0;
   3871 
   3872 	ring = &sc->txq[ac];
   3873 	desc = &ring->desc[ring->cur];
   3874 	memset(desc, 0, sizeof(*desc));
   3875 	data = &ring->data[ring->cur];
   3876 
   3877 	/* Fill out iwm_tx_cmd to send to the firmware */
   3878 	cmd = &ring->cmd[ring->cur];
   3879 	cmd->hdr.code = IWM_TX_CMD;
   3880 	cmd->hdr.flags = 0;
   3881 	cmd->hdr.qid = ring->qid;
   3882 	cmd->hdr.idx = ring->cur;
   3883 
   3884 	tx = (void *)cmd->data;
   3885 	memset(tx, 0, sizeof(*tx));
   3886 
   3887 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   3888 
   3889 	if (sc->sc_drvbpf != NULL) {
   3890 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   3891 
   3892 		tap->wt_flags = 0;
   3893 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   3894 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   3895 		tap->wt_rate = rinfo->rate;
   3896 		tap->wt_hwqueue = ac;
   3897 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   3898 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   3899 
   3900 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   3901 	}
   3902 
   3903 	/* Encrypt the frame if need be. */
   3904 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   3905 		k = ieee80211_crypto_encap(ic, ni, m);
   3906 		if (k == NULL) {
   3907 			m_freem(m);
   3908 			return ENOBUFS;
   3909 		}
   3910 		/* Packet header may have moved, reset our local pointer. */
   3911 		wh = mtod(m, struct ieee80211_frame *);
   3912 	}
   3913 	totlen = m->m_pkthdr.len;
   3914 
   3915 	flags = 0;
   3916 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3917 		flags |= IWM_TX_CMD_FLG_ACK;
   3918 	}
   3919 
   3920 	if (type != IEEE80211_FC0_TYPE_DATA
   3921 	    && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
   3922 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3923 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   3924 	}
   3925 
   3926 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   3927 	    type != IEEE80211_FC0_TYPE_DATA)
   3928 		tx->sta_id = sc->sc_aux_sta.sta_id;
   3929 	else
   3930 		tx->sta_id = IWM_STATION_ID;
   3931 
   3932 	if (type == IEEE80211_FC0_TYPE_MGT) {
   3933 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   3934 
   3935 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   3936 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   3937 			tx->pm_frame_timeout = htole16(3);
   3938 		else
   3939 			tx->pm_frame_timeout = htole16(2);
   3940 	} else {
   3941 		tx->pm_frame_timeout = htole16(0);
   3942 	}
   3943 
   3944 	if (hdrlen & 3) {
   3945 		/* First segment length must be a multiple of 4. */
   3946 		flags |= IWM_TX_CMD_FLG_MH_PAD;
   3947 		pad = 4 - (hdrlen & 3);
   3948 	} else
   3949 		pad = 0;
   3950 
   3951 	tx->driver_txop = 0;
   3952 	tx->next_frame_len = 0;
   3953 
   3954 	tx->len = htole16(totlen);
   3955 	tx->tid_tspec = tid;
   3956 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   3957 
   3958 	/* Set physical address of "scratch area". */
   3959 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   3960 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   3961 
   3962 	/* Copy 802.11 header in TX command. */
   3963 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   3964 
   3965 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   3966 
   3967 	tx->sec_ctl = 0;
   3968 	tx->tx_flags |= htole32(flags);
   3969 
   3970 	/* Trim 802.11 header. */
   3971 	m_adj(m, hdrlen);
   3972 
   3973 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3974 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3975 	if (error != 0) {
   3976 		if (error != EFBIG) {
   3977 			aprint_error_dev(sc->sc_dev,
   3978 			    "can't map mbuf (error %d)\n", error);
   3979 			m_freem(m);
   3980 			return error;
   3981 		}
   3982 		/* Too many DMA segments, linearize mbuf. */
   3983 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   3984 		if (m1 == NULL) {
   3985 			m_freem(m);
   3986 			return ENOBUFS;
   3987 		}
   3988 		if (m->m_pkthdr.len > MHLEN) {
   3989 			MCLGET(m1, M_DONTWAIT);
   3990 			if (!(m1->m_flags & M_EXT)) {
   3991 				m_freem(m);
   3992 				m_freem(m1);
   3993 				return ENOBUFS;
   3994 			}
   3995 		}
   3996 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   3997 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   3998 		m_freem(m);
   3999 		m = m1;
   4000 
   4001 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4002 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4003 		if (error != 0) {
   4004 			aprint_error_dev(sc->sc_dev,
   4005 			    "can't map mbuf (error %d)\n", error);
   4006 			m_freem(m);
   4007 			return error;
   4008 		}
   4009 	}
   4010 	data->m = m;
   4011 	data->in = in;
   4012 	data->done = 0;
   4013 
   4014 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   4015 	KASSERT(data->in != NULL);
   4016 
   4017 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   4018 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   4019 
   4020 	/* Fill TX descriptor. */
   4021 	desc->num_tbs = 2 + data->map->dm_nsegs;
   4022 
   4023 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   4024 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4025 	    (TB0_SIZE << 4);
   4026 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   4027 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4028 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   4029 	      + hdrlen + pad - TB0_SIZE) << 4);
   4030 
   4031 	/* Other DMA segments are for data payload. */
   4032 	seg = data->map->dm_segs;
   4033 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   4034 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   4035 		desc->tbs[i+2].hi_n_len = \
   4036 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4037 		    | ((seg->ds_len) << 4);
   4038 	}
   4039 
   4040 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4041 	    BUS_DMASYNC_PREWRITE);
   4042 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4043 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4044 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4045 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4046 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4047 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4048 
   4049 #if 0
   4050 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
   4051 #endif
   4052 
   4053 	/* Kick TX ring. */
   4054 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4055 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4056 
   4057 	/* Mark TX ring as full if we reach a certain threshold. */
   4058 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4059 		sc->qfullmsk |= 1 << ring->qid;
   4060 	}
   4061 
   4062 	return 0;
   4063 }
   4064 
   4065 #if 0
   4066 /* not necessary? */
   4067 static int
   4068 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4069 {
   4070 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4071 		.queues_ctl = htole32(tfd_msk),
   4072 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4073 	};
   4074 	int ret;
   4075 
   4076 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
   4077 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
   4078 	    sizeof(flush_cmd), &flush_cmd);
   4079 	if (ret)
   4080 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4081 		    ret);
   4082 	return ret;
   4083 }
   4084 #endif
   4085 
   4086 
   4087 /*
   4088  * BEGIN mvm/power.c
   4089  */
   4090 
   4091 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4092 
   4093 static int
   4094 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4095 	struct iwm_beacon_filter_cmd *cmd)
   4096 {
   4097 	int ret;
   4098 
   4099 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4100 	    IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4101 
   4102 	if (!ret) {
   4103 		DPRINTF(("ba_enable_beacon_abort is: %d\n",
   4104 		    le32toh(cmd->ba_enable_beacon_abort)));
   4105 		DPRINTF(("ba_escape_timer is: %d\n",
   4106 		    le32toh(cmd->ba_escape_timer)));
   4107 		DPRINTF(("bf_debug_flag is: %d\n",
   4108 		    le32toh(cmd->bf_debug_flag)));
   4109 		DPRINTF(("bf_enable_beacon_filter is: %d\n",
   4110 		    le32toh(cmd->bf_enable_beacon_filter)));
   4111 		DPRINTF(("bf_energy_delta is: %d\n",
   4112 		    le32toh(cmd->bf_energy_delta)));
   4113 		DPRINTF(("bf_escape_timer is: %d\n",
   4114 		    le32toh(cmd->bf_escape_timer)));
   4115 		DPRINTF(("bf_roaming_energy_delta is: %d\n",
   4116 		    le32toh(cmd->bf_roaming_energy_delta)));
   4117 		DPRINTF(("bf_roaming_state is: %d\n",
   4118 		    le32toh(cmd->bf_roaming_state)));
   4119 		DPRINTF(("bf_temp_threshold is: %d\n",
   4120 		    le32toh(cmd->bf_temp_threshold)));
   4121 		DPRINTF(("bf_temp_fast_filter is: %d\n",
   4122 		    le32toh(cmd->bf_temp_fast_filter)));
   4123 		DPRINTF(("bf_temp_slow_filter is: %d\n",
   4124 		    le32toh(cmd->bf_temp_slow_filter)));
   4125 	}
   4126 	return ret;
   4127 }
   4128 
   4129 static void
   4130 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
   4131 	struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
   4132 {
   4133 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4134 }
   4135 
   4136 static int
   4137 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
   4138 	int enable)
   4139 {
   4140 	struct iwm_beacon_filter_cmd cmd = {
   4141 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4142 		.bf_enable_beacon_filter = htole32(1),
   4143 		.ba_enable_beacon_abort = htole32(enable),
   4144 	};
   4145 
   4146 	if (!sc->sc_bf.bf_enabled)
   4147 		return 0;
   4148 
   4149 	sc->sc_bf.ba_enabled = enable;
   4150 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4151 	return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4152 }
   4153 
   4154 static void
   4155 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
   4156 {
   4157 	DPRINTF(("Sending power table command on mac id 0x%X for "
   4158 	    "power level %d, flags = 0x%X\n",
   4159 	    cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
   4160 	DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
   4161 
   4162 	if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
   4163 		DPRINTF(("Disable power management\n"));
   4164 		return;
   4165 	}
   4166 	KASSERT(0);
   4167 
   4168 #if 0
   4169 	DPRINTF(mvm, "Rx timeout = %u usec\n",
   4170 			le32_to_cpu(cmd->rx_data_timeout));
   4171 	DPRINTF(mvm, "Tx timeout = %u usec\n",
   4172 			le32_to_cpu(cmd->tx_data_timeout));
   4173 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
   4174 		DPRINTF(mvm, "DTIM periods to skip = %u\n",
   4175 				cmd->skip_dtim_periods);
   4176 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
   4177 		DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
   4178 				cmd->lprx_rssi_threshold);
   4179 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
   4180 		DPRINTF(mvm, "uAPSD enabled\n");
   4181 		DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
   4182 				le32_to_cpu(cmd->rx_data_timeout_uapsd));
   4183 		DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
   4184 				le32_to_cpu(cmd->tx_data_timeout_uapsd));
   4185 		DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
   4186 		DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
   4187 		DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
   4188 	}
   4189 #endif
   4190 }
   4191 
   4192 static void
   4193 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4194 	struct iwm_mac_power_cmd *cmd)
   4195 {
   4196 	struct ieee80211com *ic = &sc->sc_ic;
   4197 	struct ieee80211_node *ni = &in->in_ni;
   4198 	int dtimper, dtimper_msec;
   4199 	int keep_alive;
   4200 
   4201 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4202 	    in->in_color));
   4203 	dtimper = ic->ic_dtim_period ?: 1;
   4204 
   4205 	/*
   4206 	 * Regardless of power management state the driver must set
   4207 	 * keep alive period. FW will use it for sending keep alive NDPs
   4208 	 * immediately after association. Check that keep alive period
   4209 	 * is at least 3 * DTIM
   4210 	 */
   4211 	dtimper_msec = dtimper * ni->ni_intval;
   4212 	keep_alive
   4213 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4214 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4215 	cmd->keep_alive_seconds = htole16(keep_alive);
   4216 }
   4217 
   4218 static int
   4219 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4220 {
   4221 	int ret;
   4222 	int ba_enable;
   4223 	struct iwm_mac_power_cmd cmd;
   4224 
   4225 	memset(&cmd, 0, sizeof(cmd));
   4226 
   4227 	iwm_mvm_power_build_cmd(sc, in, &cmd);
   4228 	iwm_mvm_power_log(sc, &cmd);
   4229 
   4230 	if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
   4231 	    IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
   4232 		return ret;
   4233 
   4234 	ba_enable = !!(cmd.flags &
   4235 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4236 	return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
   4237 }
   4238 
   4239 static int
   4240 iwm_mvm_power_update_device(struct iwm_softc *sc)
   4241 {
   4242 	struct iwm_device_power_cmd cmd = {
   4243 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4244 	};
   4245 
   4246 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4247 		return 0;
   4248 
   4249 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4250 	DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
   4251 
   4252 	return iwm_mvm_send_cmd_pdu(sc,
   4253 	    IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   4254 }
   4255 
   4256 static int
   4257 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4258 {
   4259 	struct iwm_beacon_filter_cmd cmd = {
   4260 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4261 		.bf_enable_beacon_filter = htole32(1),
   4262 	};
   4263 	int ret;
   4264 
   4265 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4266 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4267 
   4268 	if (ret == 0)
   4269 		sc->sc_bf.bf_enabled = 1;
   4270 
   4271 	return ret;
   4272 }
   4273 
   4274 static int
   4275 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4276 {
   4277 	struct iwm_beacon_filter_cmd cmd;
   4278 	int ret;
   4279 
   4280 	memset(&cmd, 0, sizeof(cmd));
   4281 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4282 		return 0;
   4283 
   4284 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4285 	if (ret == 0)
   4286 		sc->sc_bf.bf_enabled = 0;
   4287 
   4288 	return ret;
   4289 }
   4290 
   4291 #if 0
   4292 static int
   4293 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4294 {
   4295 	if (!sc->sc_bf.bf_enabled)
   4296 		return 0;
   4297 
   4298 	return iwm_mvm_enable_beacon_filter(sc, in);
   4299 }
   4300 #endif
   4301 
   4302 /*
   4303  * END mvm/power.c
   4304  */
   4305 
   4306 /*
   4307  * BEGIN mvm/sta.c
   4308  */
   4309 
   4310 static void
   4311 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
   4312 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
   4313 {
   4314 	memset(cmd_v5, 0, sizeof(*cmd_v5));
   4315 
   4316 	cmd_v5->add_modify = cmd_v6->add_modify;
   4317 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
   4318 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
   4319 	memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
   4320 	cmd_v5->sta_id = cmd_v6->sta_id;
   4321 	cmd_v5->modify_mask = cmd_v6->modify_mask;
   4322 	cmd_v5->station_flags = cmd_v6->station_flags;
   4323 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
   4324 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
   4325 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
   4326 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
   4327 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
   4328 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
   4329 	cmd_v5->assoc_id = cmd_v6->assoc_id;
   4330 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
   4331 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
   4332 }
   4333 
   4334 static int
   4335 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
   4336 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
   4337 {
   4338 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
   4339 
   4340 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
   4341 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
   4342 		    sizeof(*cmd), cmd, status);
   4343 	}
   4344 
   4345 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
   4346 
   4347 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
   4348 	    &cmd_v5, status);
   4349 }
   4350 
   4351 /* send station add/update command to firmware */
   4352 static int
   4353 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
   4354 {
   4355 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
   4356 	int ret;
   4357 	uint32_t status;
   4358 
   4359 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4360 
   4361 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4362 	add_sta_cmd.mac_id_n_color
   4363 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4364 	if (!update) {
   4365 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
   4366 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4367 	}
   4368 	add_sta_cmd.add_modify = update ? 1 : 0;
   4369 	add_sta_cmd.station_flags_msk
   4370 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4371 
   4372 	status = IWM_ADD_STA_SUCCESS;
   4373 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
   4374 	if (ret)
   4375 		return ret;
   4376 
   4377 	switch (status) {
   4378 	case IWM_ADD_STA_SUCCESS:
   4379 		break;
   4380 	default:
   4381 		ret = EIO;
   4382 		DPRINTF(("IWM_ADD_STA failed\n"));
   4383 		break;
   4384 	}
   4385 
   4386 	return ret;
   4387 }
   4388 
   4389 static int
   4390 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
   4391 {
   4392 	int ret;
   4393 
   4394 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
   4395 	if (ret)
   4396 		return ret;
   4397 
   4398 	return 0;
   4399 }
   4400 
   4401 static int
   4402 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
   4403 {
   4404 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
   4405 }
   4406 
   4407 static int
   4408 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
   4409 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
   4410 {
   4411 	struct iwm_mvm_add_sta_cmd_v6 cmd;
   4412 	int ret;
   4413 	uint32_t status;
   4414 
   4415 	memset(&cmd, 0, sizeof(cmd));
   4416 	cmd.sta_id = sta->sta_id;
   4417 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
   4418 
   4419 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
   4420 
   4421 	if (addr)
   4422 		memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
   4423 
   4424 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
   4425 	if (ret)
   4426 		return ret;
   4427 
   4428 	switch (status) {
   4429 	case IWM_ADD_STA_SUCCESS:
   4430 		DPRINTF(("Internal station added.\n"));
   4431 		return 0;
   4432 	default:
   4433 		DPRINTF(("%s: Add internal station failed, status=0x%x\n",
   4434 		    DEVNAME(sc), status));
   4435 		ret = EIO;
   4436 		break;
   4437 	}
   4438 	return ret;
   4439 }
   4440 
   4441 static int
   4442 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
   4443 {
   4444 	int ret;
   4445 
   4446 	sc->sc_aux_sta.sta_id = 3;
   4447 	sc->sc_aux_sta.tfd_queue_msk = 0;
   4448 
   4449 	ret = iwm_mvm_add_int_sta_common(sc,
   4450 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
   4451 
   4452 	if (ret)
   4453 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
   4454 	return ret;
   4455 }
   4456 
   4457 /*
   4458  * END mvm/sta.c
   4459  */
   4460 
   4461 /*
   4462  * BEGIN mvm/scan.c
   4463  */
   4464 
   4465 #define IWM_PLCP_QUIET_THRESH 1
   4466 #define IWM_ACTIVE_QUIET_TIME 10
   4467 #define LONG_OUT_TIME_PERIOD 600
   4468 #define SHORT_OUT_TIME_PERIOD 200
   4469 #define SUSPEND_TIME_PERIOD 100
   4470 
   4471 static uint16_t
   4472 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
   4473 {
   4474 	uint16_t rx_chain;
   4475 	uint8_t rx_ant;
   4476 
   4477 	rx_ant = IWM_FW_VALID_RX_ANT(sc);
   4478 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4479 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4480 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4481 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4482 	return htole16(rx_chain);
   4483 }
   4484 
   4485 #define ieee80211_tu_to_usec(a) (1024*(a))
   4486 
   4487 static uint32_t
   4488 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
   4489 {
   4490 	if (!is_assoc)
   4491 		return 0;
   4492 	if (flags & 0x1)
   4493 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
   4494 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
   4495 }
   4496 
   4497 static uint32_t
   4498 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
   4499 {
   4500 	if (!is_assoc)
   4501 		return 0;
   4502 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
   4503 }
   4504 
   4505 static uint32_t
   4506 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
   4507 {
   4508 	if (flags & IEEE80211_CHAN_2GHZ)
   4509 		return htole32(IWM_PHY_BAND_24);
   4510 	else
   4511 		return htole32(IWM_PHY_BAND_5);
   4512 }
   4513 
   4514 static uint32_t
   4515 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4516 {
   4517 	uint32_t tx_ant;
   4518 	int i, ind;
   4519 
   4520 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4521 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4522 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4523 		if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
   4524 			sc->sc_scan_last_antenna = ind;
   4525 			break;
   4526 		}
   4527 	}
   4528 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4529 
   4530 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4531 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4532 				   tx_ant);
   4533 	else
   4534 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4535 }
   4536 
   4537 /*
   4538  * If req->n_ssids > 0, it means we should do an active scan.
   4539  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4540  * just to notify that this scan is active and not passive.
   4541  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4542  * the zero-length one), we need to set the corresponding bits in chan->type,
   4543  * one for each SSID, and set the active bit (first). If the first SSID is
   4544  * already included in the probe template, so we need to set only
   4545  * req->n_ssids - 1 bits in addition to the first bit.
   4546  */
   4547 static uint16_t
   4548 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4549 {
   4550 	if (flags & IEEE80211_CHAN_2GHZ)
   4551 		return 30  + 3 * (n_ssids + 1);
   4552 	return 20  + 2 * (n_ssids + 1);
   4553 }
   4554 
   4555 static uint16_t
   4556 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4557 {
   4558 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4559 }
   4560 
   4561 static int
   4562 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
   4563 	int flags, int n_ssids, int basic_ssid)
   4564 {
   4565 	struct ieee80211com *ic = &sc->sc_ic;
   4566 	uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
   4567 	uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
   4568 	struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
   4569 		(cmd->data + le16toh(cmd->tx_cmd.len));
   4570 	int type = (1 << n_ssids) - 1;
   4571 	struct ieee80211_channel *c;
   4572 	int nchan;
   4573 
   4574 	if (!basic_ssid)
   4575 		type |= (1 << n_ssids);
   4576 
   4577 	for (nchan = 0, c = &ic->ic_channels[1];
   4578 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
   4579 	    c++) {
   4580 		if ((c->ic_flags & flags) != flags)
   4581 			continue;
   4582 
   4583 		chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
   4584 		chan->type = htole32(type);
   4585 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
   4586 			chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
   4587 		chan->active_dwell = htole16(active_dwell);
   4588 		chan->passive_dwell = htole16(passive_dwell);
   4589 		chan->iteration_count = htole16(1);
   4590 		chan++;
   4591 		nchan++;
   4592 	}
   4593 	if (nchan == 0)
   4594 		DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
   4595 	return nchan;
   4596 }
   4597 
   4598 /*
   4599  * Fill in probe request with the following parameters:
   4600  * TA is our vif HW address, which mac80211 ensures we have.
   4601  * Packet is broadcasted, so this is both SA and DA.
   4602  * The probe request IE is made out of two: first comes the most prioritized
   4603  * SSID if a directed scan is requested. Second comes whatever extra
   4604  * information was given to us as the scan request IE.
   4605  */
   4606 static uint16_t
   4607 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
   4608 	const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
   4609 	const uint8_t *ie, int ie_len, int left)
   4610 {
   4611 	int len = 0;
   4612 	uint8_t *pos = NULL;
   4613 
   4614 	/* Make sure there is enough space for the probe request,
   4615 	 * two mandatory IEs and the data */
   4616 	left -= sizeof(*frame);
   4617 	if (left < 0)
   4618 		return 0;
   4619 
   4620 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4621 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4622 	frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4623 	IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
   4624 	memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
   4625 	IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
   4626 
   4627 	len += sizeof(*frame);
   4628 	CTASSERT(sizeof(*frame) == 24);
   4629 
   4630 	/* for passive scans, no need to fill anything */
   4631 	if (n_ssids == 0)
   4632 		return (uint16_t)len;
   4633 
   4634 	/* points to the payload of the request */
   4635 	pos = (uint8_t *)frame + sizeof(*frame);
   4636 
   4637 	/* fill in our SSID IE */
   4638 	left -= ssid_len + 2;
   4639 	if (left < 0)
   4640 		return 0;
   4641 	*pos++ = IEEE80211_ELEMID_SSID;
   4642 	*pos++ = ssid_len;
   4643 	if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
   4644 		memcpy(pos, ssid, ssid_len);
   4645 		pos += ssid_len;
   4646 	}
   4647 
   4648 	len += ssid_len + 2;
   4649 
   4650 	if (left < ie_len)
   4651 		return len;
   4652 
   4653 	if (ie && ie_len) {
   4654 		memcpy(pos, ie, ie_len);
   4655 		len += ie_len;
   4656 	}
   4657 
   4658 	return (uint16_t)len;
   4659 }
   4660 
   4661 static int
   4662 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
   4663 	int n_ssids, uint8_t *ssid, int ssid_len)
   4664 {
   4665 	struct ieee80211com *ic = &sc->sc_ic;
   4666 	struct iwm_host_cmd hcmd = {
   4667 		.id = IWM_SCAN_REQUEST_CMD,
   4668 		.len = { 0, },
   4669 		.data = { sc->sc_scan_cmd, },
   4670 		.flags = IWM_CMD_SYNC,
   4671 		.dataflags = { IWM_HCMD_DFL_NOCOPY, },
   4672 	};
   4673 	struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
   4674 	int is_assoc = 0;
   4675 	int ret;
   4676 	uint32_t status;
   4677 	int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
   4678 
   4679 	//lockdep_assert_held(&mvm->mutex);
   4680 
   4681 	sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
   4682 
   4683 	DPRINTF(("Handling ieee80211 scan request\n"));
   4684 	memset(cmd, 0, sc->sc_scan_cmd_len);
   4685 
   4686 	cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
   4687 	cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
   4688 	cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
   4689 	cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
   4690 	cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
   4691 	cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
   4692 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
   4693 	    IWM_MAC_FILTER_IN_BEACON);
   4694 
   4695 	cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
   4696 	cmd->repeats = htole32(1);
   4697 
   4698 	/*
   4699 	 * If the user asked for passive scan, don't change to active scan if
   4700 	 * you see any activity on the channel - remain passive.
   4701 	 */
   4702 	if (n_ssids > 0) {
   4703 		cmd->passive2active = htole16(1);
   4704 		cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4705 #if 0
   4706 		if (basic_ssid) {
   4707 			ssid = req->ssids[0].ssid;
   4708 			ssid_len = req->ssids[0].ssid_len;
   4709 		}
   4710 #endif
   4711 	} else {
   4712 		cmd->passive2active = 0;
   4713 		cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4714 	}
   4715 
   4716 	cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4717 	    IWM_TX_CMD_FLG_BT_DIS);
   4718 	cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
   4719 	cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4720 	cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
   4721 
   4722 	cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
   4723 			    (struct ieee80211_frame *)cmd->data,
   4724 			    ic->ic_myaddr, n_ssids, ssid, ssid_len,
   4725 			    NULL, 0, sc->sc_capa_max_probe_len));
   4726 
   4727 	cmd->channel_count
   4728 	    = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
   4729 
   4730 	cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
   4731 		le16toh(cmd->tx_cmd.len) +
   4732 		(cmd->channel_count * sizeof(struct iwm_scan_channel)));
   4733 	hcmd.len[0] = le16toh(cmd->len);
   4734 
   4735 	status = IWM_SCAN_RESPONSE_OK;
   4736 	ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
   4737 	if (!ret && status == IWM_SCAN_RESPONSE_OK) {
   4738 		DPRINTF(("Scan request was sent successfully\n"));
   4739 	} else {
   4740 		/*
   4741 		 * If the scan failed, it usually means that the FW was unable
   4742 		 * to allocate the time events. Warn on it, but maybe we
   4743 		 * should try to send the command again with different params.
   4744 		 */
   4745 		sc->sc_scanband = 0;
   4746 		ret = EIO;
   4747 	}
   4748 	return ret;
   4749 }
   4750 
   4751 /*
   4752  * END mvm/scan.c
   4753  */
   4754 
   4755 /*
   4756  * BEGIN mvm/mac-ctxt.c
   4757  */
   4758 
   4759 static void
   4760 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
   4761 	int *cck_rates, int *ofdm_rates)
   4762 {
   4763 	int lowest_present_ofdm = 100;
   4764 	int lowest_present_cck = 100;
   4765 	uint8_t cck = 0;
   4766 	uint8_t ofdm = 0;
   4767 	int i;
   4768 
   4769 	for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
   4770 		cck |= (1 << i);
   4771 		if (lowest_present_cck > i)
   4772 			lowest_present_cck = i;
   4773 	}
   4774 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   4775 		int adj = i - IWM_FIRST_OFDM_RATE;
   4776 		ofdm |= (1 << adj);
   4777 		if (lowest_present_cck > adj)
   4778 			lowest_present_cck = adj;
   4779 	}
   4780 
   4781 	/*
   4782 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   4783 	 * variables. This isn't sufficient though, as there might not
   4784 	 * be all the right rates in the bitmap. E.g. if the only basic
   4785 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   4786 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   4787 	 *
   4788 	 *    [...] a STA responding to a received frame shall transmit
   4789 	 *    its Control Response frame [...] at the highest rate in the
   4790 	 *    BSSBasicRateSet parameter that is less than or equal to the
   4791 	 *    rate of the immediately previous frame in the frame exchange
   4792 	 *    sequence ([...]) and that is of the same modulation class
   4793 	 *    ([...]) as the received frame. If no rate contained in the
   4794 	 *    BSSBasicRateSet parameter meets these conditions, then the
   4795 	 *    control frame sent in response to a received frame shall be
   4796 	 *    transmitted at the highest mandatory rate of the PHY that is
   4797 	 *    less than or equal to the rate of the received frame, and
   4798 	 *    that is of the same modulation class as the received frame.
   4799 	 *
   4800 	 * As a consequence, we need to add all mandatory rates that are
   4801 	 * lower than all of the basic rates to these bitmaps.
   4802 	 */
   4803 
   4804 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   4805 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   4806 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   4807 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   4808 	/* 6M already there or needed so always add */
   4809 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   4810 
   4811 	/*
   4812 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   4813 	 * Note, however:
   4814 	 *  - if no CCK rates are basic, it must be ERP since there must
   4815 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   4816 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   4817 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   4818 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   4819 	 *  - if 2M is basic, 1M is mandatory
   4820 	 *  - if 1M is basic, that's the only valid ACK rate.
   4821 	 * As a consequence, it's not as complicated as it sounds, just add
   4822 	 * any lower rates to the ACK rate bitmap.
   4823 	 */
   4824 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   4825 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   4826 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   4827 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   4828 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   4829 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   4830 	/* 1M already there or needed so always add */
   4831 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   4832 
   4833 	*cck_rates = cck;
   4834 	*ofdm_rates = ofdm;
   4835 }
   4836 
   4837 static void
   4838 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   4839 	struct iwm_mac_ctx_cmd *cmd, uint32_t action)
   4840 {
   4841 	struct ieee80211com *ic = &sc->sc_ic;
   4842 	struct ieee80211_node *ni = ic->ic_bss;
   4843 	int cck_ack_rates, ofdm_ack_rates;
   4844 	int i;
   4845 
   4846 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4847 	    in->in_color));
   4848 	cmd->action = htole32(action);
   4849 
   4850 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   4851 	cmd->tsf_id = htole32(in->in_tsfid);
   4852 
   4853 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   4854 	if (in->in_assoc) {
   4855 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   4856 	} else {
   4857 		memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
   4858 	}
   4859 	iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   4860 	cmd->cck_rates = htole32(cck_ack_rates);
   4861 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   4862 
   4863 	cmd->cck_short_preamble
   4864 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   4865 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   4866 	cmd->short_slot
   4867 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   4868 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   4869 
   4870 	for (i = 0; i < IWM_AC_NUM+1; i++) {
   4871 		int txf = i;
   4872 
   4873 		cmd->ac[txf].cw_min = htole16(0x0f);
   4874 		cmd->ac[txf].cw_max = htole16(0x3f);
   4875 		cmd->ac[txf].aifsn = 1;
   4876 		cmd->ac[txf].fifos_mask = (1 << txf);
   4877 		cmd->ac[txf].edca_txop = 0;
   4878 	}
   4879 
   4880 	cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   4881 	cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
   4882 
   4883 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   4884 }
   4885 
   4886 static int
   4887 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
   4888 {
   4889 	int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
   4890 				       sizeof(*cmd), cmd);
   4891 	if (ret)
   4892 		DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
   4893 		    DEVNAME(sc), le32toh(cmd->action), ret));
   4894 	return ret;
   4895 }
   4896 
   4897 /*
   4898  * Fill the specific data for mac context of type station or p2p client
   4899  */
   4900 static void
   4901 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   4902 	struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
   4903 {
   4904 	struct ieee80211_node *ni = &in->in_ni;
   4905 	unsigned dtim_period, dtim_count;
   4906 
   4907 	dtim_period = ni->ni_dtim_period;
   4908 	dtim_count = ni->ni_dtim_count;
   4909 
   4910 	/* We need the dtim_period to set the MAC as associated */
   4911 	if (in->in_assoc && dtim_period && !force_assoc_off) {
   4912 		uint64_t tsf;
   4913 		uint32_t dtim_offs;
   4914 
   4915 		/*
   4916 		 * The DTIM count counts down, so when it is N that means N
   4917 		 * more beacon intervals happen until the DTIM TBTT. Therefore
   4918 		 * add this to the current time. If that ends up being in the
   4919 		 * future, the firmware will handle it.
   4920 		 *
   4921 		 * Also note that the system_timestamp (which we get here as
   4922 		 * "sync_device_ts") and TSF timestamp aren't at exactly the
   4923 		 * same offset in the frame -- the TSF is at the first symbol
   4924 		 * of the TSF, the system timestamp is at signal acquisition
   4925 		 * time. This means there's an offset between them of at most
   4926 		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
   4927 		 * 384us in the longest case), this is currently not relevant
   4928 		 * as the firmware wakes up around 2ms before the TBTT.
   4929 		 */
   4930 		dtim_offs = dtim_count * ni->ni_intval;
   4931 		/* convert TU to usecs */
   4932 		dtim_offs *= 1024;
   4933 
   4934 		tsf = ni->ni_tstamp.tsf;
   4935 
   4936 		ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
   4937 		ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
   4938 
   4939 		DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
   4940 		    (long long)le64toh(ctxt_sta->dtim_tsf),
   4941 		    le32toh(ctxt_sta->dtim_time), dtim_offs));
   4942 
   4943 		ctxt_sta->is_assoc = htole32(1);
   4944 	} else {
   4945 		ctxt_sta->is_assoc = htole32(0);
   4946 	}
   4947 
   4948 	ctxt_sta->bi = htole32(ni->ni_intval);
   4949 	ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
   4950 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
   4951 	ctxt_sta->dtim_reciprocal =
   4952 	    htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
   4953 
   4954 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
   4955 	ctxt_sta->listen_interval = htole32(10);
   4956 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
   4957 }
   4958 
   4959 static int
   4960 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
   4961 	uint32_t action)
   4962 {
   4963 	struct iwm_mac_ctx_cmd cmd;
   4964 
   4965 	memset(&cmd, 0, sizeof(cmd));
   4966 
   4967 	/* Fill the common data for all mac context types */
   4968 	iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
   4969 
   4970 	if (in->in_assoc)
   4971 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   4972 	else
   4973 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
   4974 
   4975 	/* Fill the data specific for station mode */
   4976 	iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
   4977 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
   4978 
   4979 	return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
   4980 }
   4981 
   4982 static int
   4983 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   4984 {
   4985 	return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
   4986 }
   4987 
   4988 static int
   4989 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
   4990 {
   4991 	int ret;
   4992 
   4993 	ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
   4994 	if (ret)
   4995 		return ret;
   4996 
   4997 	return 0;
   4998 }
   4999 
   5000 static int
   5001 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
   5002 {
   5003 	return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
   5004 }
   5005 
   5006 #if 0
   5007 static int
   5008 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
   5009 {
   5010 	struct iwm_mac_ctx_cmd cmd;
   5011 	int ret;
   5012 
   5013 	if (!in->in_uploaded) {
   5014 		print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
   5015 		return EIO;
   5016 	}
   5017 
   5018 	memset(&cmd, 0, sizeof(cmd));
   5019 
   5020 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5021 	    in->in_color));
   5022 	cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
   5023 
   5024 	ret = iwm_mvm_send_cmd_pdu(sc,
   5025 	    IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   5026 	if (ret) {
   5027 		aprint_error_dev(sc->sc_dev,
   5028 		    "Failed to remove MAC context: %d\n", ret);
   5029 		return ret;
   5030 	}
   5031 	in->in_uploaded = 0;
   5032 
   5033 	return 0;
   5034 }
   5035 #endif
   5036 
   5037 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
   5038 
   5039 static void
   5040 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5041 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5042 {
   5043 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5044 
   5045 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5046 	    le32toh(mb->mac_id),
   5047 	    le32toh(mb->consec_missed_beacons),
   5048 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5049 	    le32toh(mb->num_recvd_beacons),
   5050 	    le32toh(mb->num_expected_beacons)));
   5051 
   5052 	/*
   5053 	 * TODO: the threshold should be adjusted based on latency conditions,
   5054 	 * and/or in case of a CS flow on one of the other AP vifs.
   5055 	 */
   5056 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5057 	    IWM_MVM_MISSED_BEACONS_THRESHOLD)
   5058 		ieee80211_beacon_miss(&sc->sc_ic);
   5059 }
   5060 
   5061 /*
   5062  * END mvm/mac-ctxt.c
   5063  */
   5064 
   5065 /*
   5066  * BEGIN mvm/quota.c
   5067  */
   5068 
   5069 static int
   5070 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5071 {
   5072 	struct iwm_time_quota_cmd cmd;
   5073 	int i, idx, ret, num_active_macs, quota, quota_rem;
   5074 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5075 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5076 	uint16_t id;
   5077 
   5078 	memset(&cmd, 0, sizeof(cmd));
   5079 
   5080 	/* currently, PHY ID == binding ID */
   5081 	if (in) {
   5082 		id = in->in_phyctxt->id;
   5083 		KASSERT(id < IWM_MAX_BINDINGS);
   5084 		colors[id] = in->in_phyctxt->color;
   5085 
   5086 		if (1)
   5087 			n_ifs[id] = 1;
   5088 	}
   5089 
   5090 	/*
   5091 	 * The FW's scheduling session consists of
   5092 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
   5093 	 * equally between all the bindings that require quota
   5094 	 */
   5095 	num_active_macs = 0;
   5096 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5097 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5098 		num_active_macs += n_ifs[i];
   5099 	}
   5100 
   5101 	quota = 0;
   5102 	quota_rem = 0;
   5103 	if (num_active_macs) {
   5104 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
   5105 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
   5106 	}
   5107 
   5108 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5109 		if (colors[i] < 0)
   5110 			continue;
   5111 
   5112 		cmd.quotas[idx].id_and_color =
   5113 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5114 
   5115 		if (n_ifs[i] <= 0) {
   5116 			cmd.quotas[idx].quota = htole32(0);
   5117 			cmd.quotas[idx].max_duration = htole32(0);
   5118 		} else {
   5119 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5120 			cmd.quotas[idx].max_duration = htole32(0);
   5121 		}
   5122 		idx++;
   5123 	}
   5124 
   5125 	/* Give the remainder of the session to the first binding */
   5126 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5127 
   5128 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
   5129 	    sizeof(cmd), &cmd);
   5130 	if (ret)
   5131 		DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
   5132 	return ret;
   5133 }
   5134 
   5135 /*
   5136  * END mvm/quota.c
   5137  */
   5138 
   5139 /*
   5140  * aieee80211 routines
   5141  */
   5142 
   5143 /*
   5144  * Change to AUTH state in 80211 state machine.  Roughly matches what
   5145  * Linux does in bss_info_changed().
   5146  */
   5147 static int
   5148 iwm_auth(struct iwm_softc *sc)
   5149 {
   5150 	struct ieee80211com *ic = &sc->sc_ic;
   5151 	struct iwm_node *in = (void *)ic->ic_bss;
   5152 	uint32_t duration;
   5153 	uint32_t min_duration;
   5154 	int error;
   5155 
   5156 	in->in_assoc = 0;
   5157 	if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
   5158 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5159 		return error;
   5160 	}
   5161 
   5162 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
   5163 	    in->in_ni.ni_chan, 1, 1)) != 0) {
   5164 		DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
   5165 		return error;
   5166 	}
   5167 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5168 
   5169 	if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
   5170 		DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
   5171 		return error;
   5172 	}
   5173 
   5174 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
   5175 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5176 		return error;
   5177 	}
   5178 
   5179 	/* a bit superfluous? */
   5180 	while (sc->sc_auth_prot)
   5181 		tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
   5182 	sc->sc_auth_prot = 1;
   5183 
   5184 	duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
   5185 	    200 + in->in_ni.ni_intval);
   5186 	min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
   5187 	    100 + in->in_ni.ni_intval);
   5188 	iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
   5189 
   5190 	while (sc->sc_auth_prot != 2) {
   5191 		/*
   5192 		 * well, meh, but if the kernel is sleeping for half a
   5193 		 * second, we have bigger problems
   5194 		 */
   5195 		if (sc->sc_auth_prot == 0) {
   5196 			DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
   5197 			return ETIMEDOUT;
   5198 		} else if (sc->sc_auth_prot == -1) {
   5199 			DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
   5200 			sc->sc_auth_prot = 0;
   5201 			return EAUTH;
   5202 		}
   5203 		tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
   5204 	}
   5205 
   5206 	return 0;
   5207 }
   5208 
   5209 static int
   5210 iwm_assoc(struct iwm_softc *sc)
   5211 {
   5212 	struct ieee80211com *ic = &sc->sc_ic;
   5213 	struct iwm_node *in = (void *)ic->ic_bss;
   5214 	int error;
   5215 
   5216 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
   5217 		DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
   5218 		return error;
   5219 	}
   5220 
   5221 	in->in_assoc = 1;
   5222 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5223 		DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
   5224 		return error;
   5225 	}
   5226 
   5227 	return 0;
   5228 }
   5229 
   5230 static int
   5231 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
   5232 {
   5233 	/*
   5234 	 * Ok, so *technically* the proper set of calls for going
   5235 	 * from RUN back to SCAN is:
   5236 	 *
   5237 	 * iwm_mvm_power_mac_disable(sc, in);
   5238 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5239 	 * iwm_mvm_rm_sta(sc, in);
   5240 	 * iwm_mvm_update_quotas(sc, NULL);
   5241 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5242 	 * iwm_mvm_binding_remove_vif(sc, in);
   5243 	 * iwm_mvm_mac_ctxt_remove(sc, in);
   5244 	 *
   5245 	 * However, that freezes the device not matter which permutations
   5246 	 * and modifications are attempted.  Obviously, this driver is missing
   5247 	 * something since it works in the Linux driver, but figuring out what
   5248 	 * is missing is a little more complicated.  Now, since we're going
   5249 	 * back to nothing anyway, we'll just do a complete device reset.
   5250 	 * Up your's, device!
   5251 	 */
   5252 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
   5253 	iwm_stop_device(sc);
   5254 	iwm_init_hw(sc);
   5255 	if (in)
   5256 		in->in_assoc = 0;
   5257 	return 0;
   5258 
   5259 #if 0
   5260 	int error;
   5261 
   5262 	iwm_mvm_power_mac_disable(sc, in);
   5263 
   5264 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5265 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
   5266 		    error);
   5267 		return error;
   5268 	}
   5269 
   5270 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
   5271 		aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
   5272 		return error;
   5273 	}
   5274 	error = iwm_mvm_rm_sta(sc, in);
   5275 	in->in_assoc = 0;
   5276 	iwm_mvm_update_quotas(sc, NULL);
   5277 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5278 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
   5279 		    error);
   5280 		return error;
   5281 	}
   5282 	iwm_mvm_binding_remove_vif(sc, in);
   5283 
   5284 	iwm_mvm_mac_ctxt_remove(sc, in);
   5285 
   5286 	return error;
   5287 #endif
   5288 }
   5289 
   5290 
   5291 static struct ieee80211_node *
   5292 iwm_node_alloc(struct ieee80211_node_table *nt)
   5293 {
   5294 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   5295 }
   5296 
   5297 static void
   5298 iwm_calib_timeout(void *arg)
   5299 {
   5300 	struct iwm_softc *sc = arg;
   5301 	struct ieee80211com *ic = &sc->sc_ic;
   5302 	int s;
   5303 
   5304 	s = splnet();
   5305 	if (ic->ic_fixed_rate == -1
   5306 	    && ic->ic_opmode == IEEE80211_M_STA
   5307 	    && ic->ic_bss) {
   5308 		struct iwm_node *in = (void *)ic->ic_bss;
   5309 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5310 	}
   5311 	splx(s);
   5312 
   5313 	callout_schedule(&sc->sc_calib_to, hz/2);
   5314 }
   5315 
   5316 static void
   5317 iwm_setrates(struct iwm_node *in)
   5318 {
   5319 	struct ieee80211_node *ni = &in->in_ni;
   5320 	struct ieee80211com *ic = ni->ni_ic;
   5321 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5322 	struct iwm_lq_cmd *lq = &in->in_lq;
   5323 	int nrates = ni->ni_rates.rs_nrates;
   5324 	int i, ridx, tab = 0;
   5325 	int txant = 0;
   5326 
   5327 	if (nrates > __arraycount(lq->rs_table)) {
   5328 		DPRINTF(("%s: node supports %d rates, driver handles only "
   5329 		    "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
   5330 		return;
   5331 	}
   5332 
   5333 	/* first figure out which rates we should support */
   5334 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
   5335 	for (i = 0; i < nrates; i++) {
   5336 		int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
   5337 
   5338 		/* Map 802.11 rate to HW rate index. */
   5339 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5340 			if (iwm_rates[ridx].rate == rate)
   5341 				break;
   5342 		if (ridx > IWM_RIDX_MAX)
   5343 			DPRINTF(("%s: WARNING: device rate for %d not found!\n",
   5344 			    DEVNAME(sc), rate));
   5345 		else
   5346 			in->in_ridx[i] = ridx;
   5347 	}
   5348 
   5349 	/* then construct a lq_cmd based on those */
   5350 	memset(lq, 0, sizeof(*lq));
   5351 	lq->sta_id = IWM_STATION_ID;
   5352 
   5353 	/*
   5354 	 * are these used? (we don't do SISO or MIMO)
   5355 	 * need to set them to non-zero, though, or we get an error.
   5356 	 */
   5357 	lq->single_stream_ant_msk = 1;
   5358 	lq->dual_stream_ant_msk = 1;
   5359 
   5360 	/*
   5361 	 * Build the actual rate selection table.
   5362 	 * The lowest bits are the rates.  Additionally,
   5363 	 * CCK needs bit 9 to be set.  The rest of the bits
   5364 	 * we add to the table select the tx antenna
   5365 	 * Note that we add the rates in the highest rate first
   5366 	 * (opposite of ni_rates).
   5367 	 */
   5368 	for (i = 0; i < nrates; i++) {
   5369 		int nextant;
   5370 
   5371 		if (txant == 0)
   5372 			txant = IWM_FW_VALID_TX_ANT(sc);
   5373 		nextant = 1<<(ffs(txant)-1);
   5374 		txant &= ~nextant;
   5375 
   5376 		ridx = in->in_ridx[(nrates-1)-i];
   5377 		tab = iwm_rates[ridx].plcp;
   5378 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
   5379 		if (IWM_RIDX_IS_CCK(ridx))
   5380 			tab |= IWM_RATE_MCS_CCK_MSK;
   5381 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5382 		lq->rs_table[i] = htole32(tab);
   5383 	}
   5384 	/* then fill the rest with the lowest possible rate */
   5385 	for (i = nrates; i < __arraycount(lq->rs_table); i++) {
   5386 		KASSERT(tab != 0);
   5387 		lq->rs_table[i] = htole32(tab);
   5388 	}
   5389 
   5390 	/* init amrr */
   5391 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5392 	ni->ni_txrate = nrates-1;
   5393 }
   5394 
   5395 static int
   5396 iwm_media_change(struct ifnet *ifp)
   5397 {
   5398 	struct iwm_softc *sc = ifp->if_softc;
   5399 	struct ieee80211com *ic = &sc->sc_ic;
   5400 	uint8_t rate, ridx;
   5401 	int error;
   5402 
   5403 	error = ieee80211_media_change(ifp);
   5404 	if (error != ENETRESET)
   5405 		return error;
   5406 
   5407 	if (ic->ic_fixed_rate != -1) {
   5408 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5409 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5410 		/* Map 802.11 rate to HW rate index. */
   5411 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5412 			if (iwm_rates[ridx].rate == rate)
   5413 				break;
   5414 		sc->sc_fixed_ridx = ridx;
   5415 	}
   5416 
   5417 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5418 	    (IFF_UP | IFF_RUNNING)) {
   5419 		iwm_stop(ifp, 0);
   5420 		error = iwm_init(ifp);
   5421 	}
   5422 	return error;
   5423 }
   5424 
   5425 static void
   5426 iwm_newstate_cb(struct work *wk, void *v)
   5427 {
   5428 	struct iwm_softc *sc = v;
   5429 	struct ieee80211com *ic = &sc->sc_ic;
   5430 	struct iwm_newstate_state *iwmns = (void *)wk;
   5431 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5432 	int generation = iwmns->ns_generation;
   5433 	struct iwm_node *in;
   5434 	int arg = iwmns->ns_arg;
   5435 	int error;
   5436 
   5437 	kmem_free(iwmns, sizeof(*iwmns));
   5438 
   5439 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
   5440 	if (sc->sc_generation != generation) {
   5441 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5442 		if (nstate == IEEE80211_S_INIT) {
   5443 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5444 			sc->sc_newstate(ic, nstate, arg);
   5445 		}
   5446 		return;
   5447 	}
   5448 
   5449 	DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
   5450 
   5451 	/* disable beacon filtering if we're hopping out of RUN */
   5452 	if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
   5453 		iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
   5454 
   5455 		if (((in = (void *)ic->ic_bss) != NULL))
   5456 			in->in_assoc = 0;
   5457 		iwm_release(sc, NULL);
   5458 
   5459 		/*
   5460 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
   5461 		 * above then the card will be completely reinitialized,
   5462 		 * so the driver must do everything necessary to bring the card
   5463 		 * from INIT to SCAN.
   5464 		 *
   5465 		 * Additionally, upon receiving deauth frame from AP,
   5466 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
   5467 		 * state. This will also fail with this driver, so bring the FSM
   5468 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
   5469 		 */
   5470 		if (nstate == IEEE80211_S_SCAN ||
   5471 		    nstate == IEEE80211_S_AUTH ||
   5472 		    nstate == IEEE80211_S_ASSOC) {
   5473 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5474 			sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
   5475 			DPRINTF(("Going INIT->SCAN\n"));
   5476 			nstate = IEEE80211_S_SCAN;
   5477 		}
   5478 	}
   5479 
   5480 	switch (nstate) {
   5481 	case IEEE80211_S_INIT:
   5482 		sc->sc_scanband = 0;
   5483 		break;
   5484 
   5485 	case IEEE80211_S_SCAN:
   5486 		if (sc->sc_scanband)
   5487 			break;
   5488 
   5489 		if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
   5490 		    ic->ic_des_esslen != 0,
   5491 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5492 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5493 			return;
   5494 		}
   5495 		ic->ic_state = nstate;
   5496 		return;
   5497 
   5498 	case IEEE80211_S_AUTH:
   5499 		if ((error = iwm_auth(sc)) != 0) {
   5500 			DPRINTF(("%s: could not move to auth state: %d\n",
   5501 			    DEVNAME(sc), error));
   5502 			return;
   5503 		}
   5504 
   5505 		break;
   5506 
   5507 	case IEEE80211_S_ASSOC:
   5508 		if ((error = iwm_assoc(sc)) != 0) {
   5509 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5510 			    error));
   5511 			return;
   5512 		}
   5513 		break;
   5514 
   5515 	case IEEE80211_S_RUN: {
   5516 		struct iwm_host_cmd cmd = {
   5517 			.id = IWM_LQ_CMD,
   5518 			.len = { sizeof(in->in_lq), },
   5519 			.flags = IWM_CMD_SYNC,
   5520 		};
   5521 
   5522 		in = (struct iwm_node *)ic->ic_bss;
   5523 		iwm_mvm_power_mac_update_mode(sc, in);
   5524 		iwm_mvm_enable_beacon_filter(sc, in);
   5525 		iwm_mvm_update_quotas(sc, in);
   5526 		iwm_setrates(in);
   5527 
   5528 		cmd.data[0] = &in->in_lq;
   5529 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
   5530 			DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
   5531 		}
   5532 
   5533 		callout_schedule(&sc->sc_calib_to, hz/2);
   5534 
   5535 		break; }
   5536 
   5537 	default:
   5538 		DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
   5539 		break;
   5540 	}
   5541 
   5542 	sc->sc_newstate(ic, nstate, arg);
   5543 }
   5544 
   5545 static int
   5546 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5547 {
   5548 	struct iwm_newstate_state *iwmns;
   5549 	struct ifnet *ifp = IC2IFP(ic);
   5550 	struct iwm_softc *sc = ifp->if_softc;
   5551 
   5552 	callout_stop(&sc->sc_calib_to);
   5553 
   5554 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5555 	if (!iwmns) {
   5556 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5557 		return ENOMEM;
   5558 	}
   5559 
   5560 	iwmns->ns_nstate = nstate;
   5561 	iwmns->ns_arg = arg;
   5562 	iwmns->ns_generation = sc->sc_generation;
   5563 
   5564 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5565 
   5566 	return 0;
   5567 }
   5568 
   5569 static void
   5570 iwm_endscan_cb(struct work *work __unused, void *arg)
   5571 {
   5572 	struct iwm_softc *sc = arg;
   5573 	struct ieee80211com *ic = &sc->sc_ic;
   5574 	int done;
   5575 
   5576 	DPRINTF(("scan ended\n"));
   5577 
   5578 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ) {
   5579 #ifndef IWM_NO_5GHZ
   5580 		int error;
   5581 		done = 0;
   5582 		if ((error = iwm_mvm_scan_request(sc,
   5583 		    IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
   5584 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5585 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5586 			done = 1;
   5587 		}
   5588 #else
   5589 		done = 1;
   5590 #endif
   5591 	} else {
   5592 		done = 1;
   5593 	}
   5594 
   5595 	if (done) {
   5596 		if (!sc->sc_scanband) {
   5597 			ieee80211_cancel_scan(ic);
   5598 		} else {
   5599 			ieee80211_end_scan(ic);
   5600 		}
   5601 		sc->sc_scanband = 0;
   5602 	}
   5603 }
   5604 
   5605 static int
   5606 iwm_init_hw(struct iwm_softc *sc)
   5607 {
   5608 	struct ieee80211com *ic = &sc->sc_ic;
   5609 	int error, i, qid;
   5610 
   5611 	if ((error = iwm_preinit(sc)) != 0)
   5612 		return error;
   5613 
   5614 	if ((error = iwm_start_hw(sc)) != 0)
   5615 		return error;
   5616 
   5617 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
   5618 		return error;
   5619 	}
   5620 
   5621 	/*
   5622 	 * should stop and start HW since that INIT
   5623 	 * image just loaded
   5624 	 */
   5625 	iwm_stop_device(sc);
   5626 	if ((error = iwm_start_hw(sc)) != 0) {
   5627 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   5628 		return error;
   5629 	}
   5630 
   5631 	/* omstart, this time with the regular firmware */
   5632 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   5633 	if (error) {
   5634 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   5635 		goto error;
   5636 	}
   5637 
   5638 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   5639 		goto error;
   5640 
   5641 	/* Send phy db control command and then phy db calibration*/
   5642 	if ((error = iwm_send_phy_db_data(sc)) != 0)
   5643 		goto error;
   5644 
   5645 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
   5646 		goto error;
   5647 
   5648 	/* Add auxiliary station for scanning */
   5649 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
   5650 		goto error;
   5651 
   5652 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   5653 		/*
   5654 		 * The channel used here isn't relevant as it's
   5655 		 * going to be overwritten in the other flows.
   5656 		 * For now use the first channel we have.
   5657 		 */
   5658 		if ((error = iwm_mvm_phy_ctxt_add(sc,
   5659 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
   5660 			goto error;
   5661 	}
   5662 
   5663 	error = iwm_mvm_power_update_device(sc);
   5664 	if (error)
   5665 		goto error;
   5666 
   5667 	/* Mark TX rings as active. */
   5668 	for (qid = 0; qid < 4; qid++) {
   5669 		iwm_enable_txq(sc, qid, qid);
   5670 	}
   5671 
   5672 	return 0;
   5673 
   5674  error:
   5675 	iwm_stop_device(sc);
   5676 	return error;
   5677 }
   5678 
   5679 /*
   5680  * ifnet interfaces
   5681  */
   5682 
   5683 static int
   5684 iwm_init(struct ifnet *ifp)
   5685 {
   5686 	struct iwm_softc *sc = ifp->if_softc;
   5687 	int error;
   5688 
   5689 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
   5690 		return 0;
   5691 	}
   5692 	sc->sc_generation++;
   5693 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   5694 
   5695 	if ((error = iwm_init_hw(sc)) != 0) {
   5696 		iwm_stop(ifp, 1);
   5697 		return error;
   5698 	}
   5699 
   5700 	/*
   5701  	 * Ok, firmware loaded and we are jogging
   5702 	 */
   5703 
   5704 	ifp->if_flags &= ~IFF_OACTIVE;
   5705 	ifp->if_flags |= IFF_RUNNING;
   5706 
   5707 	ieee80211_begin_scan(&sc->sc_ic, 0);
   5708 	sc->sc_flags |= IWM_FLAG_HW_INITED;
   5709 
   5710 	return 0;
   5711 }
   5712 
   5713 /*
   5714  * Dequeue packets from sendq and call send.
   5715  * mostly from iwn
   5716  */
   5717 static void
   5718 iwm_start(struct ifnet *ifp)
   5719 {
   5720 	struct iwm_softc *sc = ifp->if_softc;
   5721 	struct ieee80211com *ic = &sc->sc_ic;
   5722 	struct ieee80211_node *ni;
   5723 	struct ether_header *eh;
   5724 	struct mbuf *m;
   5725 	int ac;
   5726 
   5727 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   5728 		return;
   5729 
   5730 	for (;;) {
   5731 		/* why isn't this done per-queue? */
   5732 		if (sc->qfullmsk != 0) {
   5733 			ifp->if_flags |= IFF_OACTIVE;
   5734 			break;
   5735 		}
   5736 
   5737 		/* need to send management frames even if we're not RUNning */
   5738 		IF_DEQUEUE(&ic->ic_mgtq, m);
   5739 		if (m) {
   5740 			ni = (void *)m->m_pkthdr.rcvif;
   5741 			ac = 0;
   5742 			goto sendit;
   5743 		}
   5744 		if (ic->ic_state != IEEE80211_S_RUN) {
   5745 			break;
   5746 		}
   5747 
   5748 		IFQ_DEQUEUE(&ifp->if_snd, m);
   5749 		if (!m)
   5750 			break;
   5751 		if (m->m_len < sizeof (*eh) &&
   5752 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
   5753 			ifp->if_oerrors++;
   5754 			continue;
   5755 		}
   5756 		if (ifp->if_bpf != NULL)
   5757 			bpf_mtap(ifp, m);
   5758 
   5759 		eh = mtod(m, struct ether_header *);
   5760 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   5761 		if (ni == NULL) {
   5762 			m_freem(m);
   5763 			ifp->if_oerrors++;
   5764 			continue;
   5765 		}
   5766 		/* classify mbuf so we can find which tx ring to use */
   5767 		if (ieee80211_classify(ic, m, ni) != 0) {
   5768 			m_freem(m);
   5769 			ieee80211_free_node(ni);
   5770 			ifp->if_oerrors++;
   5771 			continue;
   5772 		}
   5773 
   5774 		/* No QoS encapsulation for EAPOL frames. */
   5775 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   5776 		    M_WME_GETAC(m) : WME_AC_BE;
   5777 
   5778 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   5779 			ieee80211_free_node(ni);
   5780 			ifp->if_oerrors++;
   5781 			continue;
   5782 		}
   5783 
   5784  sendit:
   5785 		if (ic->ic_rawbpf != NULL)
   5786 			bpf_mtap3(ic->ic_rawbpf, m);
   5787 		if (iwm_tx(sc, m, ni, ac) != 0) {
   5788 			ieee80211_free_node(ni);
   5789 			ifp->if_oerrors++;
   5790 			continue;
   5791 		}
   5792 
   5793 		if (ifp->if_flags & IFF_UP) {
   5794 			sc->sc_tx_timer = 15;
   5795 			ifp->if_timer = 1;
   5796 		}
   5797 	}
   5798 
   5799 	return;
   5800 }
   5801 
   5802 static void
   5803 iwm_stop(struct ifnet *ifp, int disable)
   5804 {
   5805 	struct iwm_softc *sc = ifp->if_softc;
   5806 	struct ieee80211com *ic = &sc->sc_ic;
   5807 
   5808 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   5809 	sc->sc_flags |= IWM_FLAG_STOPPED;
   5810 	sc->sc_generation++;
   5811 	sc->sc_scanband = 0;
   5812 	sc->sc_auth_prot = 0;
   5813 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5814 
   5815 	if (ic->ic_state != IEEE80211_S_INIT)
   5816 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   5817 
   5818 	ifp->if_timer = sc->sc_tx_timer = 0;
   5819 	iwm_stop_device(sc);
   5820 }
   5821 
   5822 static void
   5823 iwm_watchdog(struct ifnet *ifp)
   5824 {
   5825 	struct iwm_softc *sc = ifp->if_softc;
   5826 
   5827 	ifp->if_timer = 0;
   5828 	if (sc->sc_tx_timer > 0) {
   5829 		if (--sc->sc_tx_timer == 0) {
   5830 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   5831 #ifdef IWM_DEBUG
   5832 			iwm_nic_error(sc);
   5833 #endif
   5834 			ifp->if_flags &= ~IFF_UP;
   5835 			iwm_stop(ifp, 1);
   5836 			ifp->if_oerrors++;
   5837 			return;
   5838 		}
   5839 		ifp->if_timer = 1;
   5840 	}
   5841 
   5842 	ieee80211_watchdog(&sc->sc_ic);
   5843 }
   5844 
   5845 static int
   5846 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   5847 {
   5848 	struct iwm_softc *sc = ifp->if_softc;
   5849 	struct ieee80211com *ic = &sc->sc_ic;
   5850 	const struct sockaddr *sa;
   5851 	int s, error = 0;
   5852 
   5853 	s = splnet();
   5854 
   5855 	switch (cmd) {
   5856 	case SIOCSIFADDR:
   5857 		ifp->if_flags |= IFF_UP;
   5858 		/* FALLTHROUGH */
   5859 	case SIOCSIFFLAGS:
   5860 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
   5861 			break;
   5862 		if (ifp->if_flags & IFF_UP) {
   5863 			if (!(ifp->if_flags & IFF_RUNNING)) {
   5864 				if ((error = iwm_init(ifp)) != 0)
   5865 					ifp->if_flags &= ~IFF_UP;
   5866 			}
   5867 		} else {
   5868 			if (ifp->if_flags & IFF_RUNNING)
   5869 				iwm_stop(ifp, 1);
   5870 		}
   5871 		break;
   5872 
   5873 	case SIOCADDMULTI:
   5874 	case SIOCDELMULTI:
   5875 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   5876 		error = (cmd == SIOCADDMULTI) ?
   5877 		    ether_addmulti(sa, &sc->sc_ec) :
   5878 		    ether_delmulti(sa, &sc->sc_ec);
   5879 
   5880 		if (error == ENETRESET)
   5881 			error = 0;
   5882 		break;
   5883 
   5884 	default:
   5885 		error = ieee80211_ioctl(ic, cmd, data);
   5886 	}
   5887 
   5888 	if (error == ENETRESET) {
   5889 		error = 0;
   5890 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5891 		    (IFF_UP | IFF_RUNNING)) {
   5892 			iwm_stop(ifp, 0);
   5893 			error = iwm_init(ifp);
   5894 		}
   5895 	}
   5896 
   5897 	splx(s);
   5898 	return error;
   5899 }
   5900 
   5901 /*
   5902  * The interrupt side of things
   5903  */
   5904 
   5905 /*
   5906  * error dumping routines are from iwlwifi/mvm/utils.c
   5907  */
   5908 
   5909 /*
   5910  * Note: This structure is read from the device with IO accesses,
   5911  * and the reading already does the endian conversion. As it is
   5912  * read with uint32_t-sized accesses, any members with a different size
   5913  * need to be ordered correctly though!
   5914  */
   5915 struct iwm_error_event_table {
   5916 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   5917 	uint32_t error_id;		/* type of error */
   5918 	uint32_t pc;			/* program counter */
   5919 	uint32_t blink1;		/* branch link */
   5920 	uint32_t blink2;		/* branch link */
   5921 	uint32_t ilink1;		/* interrupt link */
   5922 	uint32_t ilink2;		/* interrupt link */
   5923 	uint32_t data1;		/* error-specific data */
   5924 	uint32_t data2;		/* error-specific data */
   5925 	uint32_t data3;		/* error-specific data */
   5926 	uint32_t bcon_time;		/* beacon timer */
   5927 	uint32_t tsf_low;		/* network timestamp function timer */
   5928 	uint32_t tsf_hi;		/* network timestamp function timer */
   5929 	uint32_t gp1;		/* GP1 timer register */
   5930 	uint32_t gp2;		/* GP2 timer register */
   5931 	uint32_t gp3;		/* GP3 timer register */
   5932 	uint32_t ucode_ver;		/* uCode version */
   5933 	uint32_t hw_ver;		/* HW Silicon version */
   5934 	uint32_t brd_ver;		/* HW board version */
   5935 	uint32_t log_pc;		/* log program counter */
   5936 	uint32_t frame_ptr;		/* frame pointer */
   5937 	uint32_t stack_ptr;		/* stack pointer */
   5938 	uint32_t hcmd;		/* last host command header */
   5939 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   5940 				 * rxtx_flag */
   5941 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   5942 				 * host_flag */
   5943 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   5944 				 * enc_flag */
   5945 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   5946 				 * time_flag */
   5947 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   5948 				 * wico interrupt */
   5949 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
   5950 	uint32_t wait_event;		/* wait event() caller address */
   5951 	uint32_t l2p_control;	/* L2pControlField */
   5952 	uint32_t l2p_duration;	/* L2pDurationField */
   5953 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   5954 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   5955 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   5956 				 * (LMPM_PMG_SEL) */
   5957 	uint32_t u_timestamp;	/* indicate when the date and time of the
   5958 				 * compilation */
   5959 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   5960 } __packed;
   5961 
   5962 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   5963 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   5964 
   5965 #ifdef IWM_DEBUG
   5966 static const struct {
   5967 	const char *name;
   5968 	uint8_t num;
   5969 } advanced_lookup[] = {
   5970 	{ "NMI_INTERRUPT_WDG", 0x34 },
   5971 	{ "SYSASSERT", 0x35 },
   5972 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   5973 	{ "BAD_COMMAND", 0x38 },
   5974 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   5975 	{ "FATAL_ERROR", 0x3D },
   5976 	{ "NMI_TRM_HW_ERR", 0x46 },
   5977 	{ "NMI_INTERRUPT_TRM", 0x4C },
   5978 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   5979 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   5980 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   5981 	{ "NMI_INTERRUPT_HOST", 0x66 },
   5982 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   5983 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   5984 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   5985 	{ "ADVANCED_SYSASSERT", 0 },
   5986 };
   5987 
   5988 static const char *
   5989 iwm_desc_lookup(uint32_t num)
   5990 {
   5991 	int i;
   5992 
   5993 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   5994 		if (advanced_lookup[i].num == num)
   5995 			return advanced_lookup[i].name;
   5996 
   5997 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   5998 	return advanced_lookup[i].name;
   5999 }
   6000 
   6001 /*
   6002  * Support for dumping the error log seemed like a good idea ...
   6003  * but it's mostly hex junk and the only sensible thing is the
   6004  * hw/ucode revision (which we know anyway).  Since it's here,
   6005  * I'll just leave it in, just in case e.g. the Intel guys want to
   6006  * help us decipher some "ADVANCED_SYSASSERT" later.
   6007  */
   6008 static void
   6009 iwm_nic_error(struct iwm_softc *sc)
   6010 {
   6011 	struct iwm_error_event_table table;
   6012 	uint32_t base;
   6013 
   6014 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   6015 	base = sc->sc_uc.uc_error_event_table;
   6016 	if (base < 0x800000 || base >= 0x80C000) {
   6017 		aprint_error_dev(sc->sc_dev,
   6018 		    "Not valid error log pointer 0x%08x\n", base);
   6019 		return;
   6020 	}
   6021 
   6022 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
   6023 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6024 		return;
   6025 	}
   6026 
   6027 	if (!table.valid) {
   6028 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   6029 		return;
   6030 	}
   6031 
   6032 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
   6033 		aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
   6034 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6035 		    sc->sc_flags, table.valid);
   6036 	}
   6037 
   6038 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
   6039 		iwm_desc_lookup(table.error_id));
   6040 	aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
   6041 	aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
   6042 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
   6043 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
   6044 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
   6045 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
   6046 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
   6047 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
   6048 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
   6049 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
   6050 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
   6051 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
   6052 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
   6053 	aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
   6054 	aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
   6055 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
   6056 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
   6057 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
   6058 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
   6059 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
   6060 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
   6061 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
   6062 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
   6063 	aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
   6064 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
   6065 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
   6066 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
   6067 	    table.l2p_duration);
   6068 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
   6069 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6070 	    table.l2p_addr_match);
   6071 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
   6072 	    table.lmpm_pmg_sel);
   6073 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
   6074 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
   6075 	    table.flow_handler);
   6076 }
   6077 #endif
   6078 
   6079 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6080 do {									\
   6081 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6082 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6083 	_var_ = (void *)((_pkt_)+1);					\
   6084 } while (/*CONSTCOND*/0)
   6085 
   6086 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6087 do {									\
   6088 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6089 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6090 	_ptr_ = (void *)((_pkt_)+1);					\
   6091 } while (/*CONSTCOND*/0)
   6092 
   6093 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6094 
   6095 /*
   6096  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
   6097  * Basic structure from if_iwn
   6098  */
   6099 static void
   6100 iwm_notif_intr(struct iwm_softc *sc)
   6101 {
   6102 	uint16_t hw;
   6103 
   6104 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6105 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6106 
   6107 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6108 	while (sc->rxq.cur != hw) {
   6109 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6110 		struct iwm_rx_packet *pkt, tmppkt;
   6111 		struct iwm_cmd_response *cresp;
   6112 		int qid, idx;
   6113 
   6114 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6115 		    BUS_DMASYNC_POSTREAD);
   6116 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6117 
   6118 		qid = pkt->hdr.qid & ~0x80;
   6119 		idx = pkt->hdr.idx;
   6120 
   6121 		DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
   6122 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
   6123 		    pkt->hdr.code, sc->rxq.cur, hw));
   6124 
   6125 		/*
   6126 		 * randomly get these from the firmware, no idea why.
   6127 		 * they at least seem harmless, so just ignore them for now
   6128 		 */
   6129 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6130 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6131 			ADVANCE_RXQ(sc);
   6132 			continue;
   6133 		}
   6134 
   6135 		switch (pkt->hdr.code) {
   6136 		case IWM_REPLY_RX_PHY_CMD:
   6137 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
   6138 			break;
   6139 
   6140 		case IWM_REPLY_RX_MPDU_CMD:
   6141 			tmppkt = *pkt; // XXX m is freed by ieee80211_input()
   6142 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
   6143 			pkt = &tmppkt;
   6144 			break;
   6145 
   6146 		case IWM_TX_CMD:
   6147 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
   6148 			break;
   6149 
   6150 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6151 			iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
   6152 			break;
   6153 
   6154 		case IWM_MVM_ALIVE: {
   6155 			struct iwm_mvm_alive_resp *resp;
   6156 			SYNC_RESP_STRUCT(resp, pkt);
   6157 
   6158 			sc->sc_uc.uc_error_event_table
   6159 			    = le32toh(resp->error_event_table_ptr);
   6160 			sc->sc_uc.uc_log_event_table
   6161 			    = le32toh(resp->log_event_table_ptr);
   6162 			sc->sched_base = le32toh(resp->scd_base_ptr);
   6163 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
   6164 
   6165 			sc->sc_uc.uc_intr = 1;
   6166 			wakeup(&sc->sc_uc);
   6167 			break; }
   6168 
   6169 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6170 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6171 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6172 
   6173 			uint16_t size = le16toh(phy_db_notif->length);
   6174 			bus_dmamap_sync(sc->sc_dmat, data->map,
   6175 			    sizeof(*pkt) + sizeof(*phy_db_notif),
   6176 			    size, BUS_DMASYNC_POSTREAD);
   6177 			iwm_phy_db_set_section(sc, phy_db_notif, size);
   6178 
   6179 			break; }
   6180 
   6181 		case IWM_STATISTICS_NOTIFICATION: {
   6182 			struct iwm_notif_statistics *stats;
   6183 			SYNC_RESP_STRUCT(stats, pkt);
   6184 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6185 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6186 			break; }
   6187 
   6188 		case IWM_NVM_ACCESS_CMD:
   6189 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6190 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6191 				    sizeof(sc->sc_cmd_resp),
   6192 				    BUS_DMASYNC_POSTREAD);
   6193 				memcpy(sc->sc_cmd_resp,
   6194 				    pkt, sizeof(sc->sc_cmd_resp));
   6195 			}
   6196 			break;
   6197 
   6198 		case IWM_PHY_CONFIGURATION_CMD:
   6199 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6200 		case IWM_ADD_STA:
   6201 		case IWM_MAC_CONTEXT_CMD:
   6202 		case IWM_REPLY_SF_CFG_CMD:
   6203 		case IWM_POWER_TABLE_CMD:
   6204 		case IWM_PHY_CONTEXT_CMD:
   6205 		case IWM_BINDING_CONTEXT_CMD:
   6206 		case IWM_TIME_EVENT_CMD:
   6207 		case IWM_SCAN_REQUEST_CMD:
   6208 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6209 		case IWM_MAC_PM_POWER_TABLE:
   6210 		case IWM_TIME_QUOTA_CMD:
   6211 		case IWM_REMOVE_STA:
   6212 		case IWM_TXPATH_FLUSH:
   6213 		case IWM_LQ_CMD:
   6214 			SYNC_RESP_STRUCT(cresp, pkt);
   6215 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6216 				memcpy(sc->sc_cmd_resp,
   6217 				    pkt, sizeof(*pkt)+sizeof(*cresp));
   6218 			}
   6219 			break;
   6220 
   6221 		/* ignore */
   6222 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
   6223 			break;
   6224 
   6225 		case IWM_INIT_COMPLETE_NOTIF:
   6226 			sc->sc_init_complete = 1;
   6227 			wakeup(&sc->sc_init_complete);
   6228 			break;
   6229 
   6230 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
   6231 			struct iwm_scan_complete_notif *notif;
   6232 			SYNC_RESP_STRUCT(notif, pkt);
   6233 
   6234 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6235 			break; }
   6236 
   6237 		case IWM_REPLY_ERROR: {
   6238 			struct iwm_error_resp *resp;
   6239 			SYNC_RESP_STRUCT(resp, pkt);
   6240 
   6241 			aprint_error_dev(sc->sc_dev,
   6242 			    "firmware error 0x%x, cmd 0x%x\n",
   6243 			    le32toh(resp->error_type), resp->cmd_id);
   6244 			break; }
   6245 
   6246 		case IWM_TIME_EVENT_NOTIFICATION: {
   6247 			struct iwm_time_event_notif *notif;
   6248 			SYNC_RESP_STRUCT(notif, pkt);
   6249 
   6250 			if (notif->status) {
   6251 				if (le32toh(notif->action) &
   6252 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
   6253 					sc->sc_auth_prot = 2;
   6254 				else
   6255 					sc->sc_auth_prot = 0;
   6256 			} else {
   6257 				sc->sc_auth_prot = -1;
   6258 			}
   6259 			wakeup(&sc->sc_auth_prot);
   6260 			break; }
   6261 
   6262 		default:
   6263 			aprint_error_dev(sc->sc_dev,
   6264 			    "frame %d/%d %x UNHANDLED (this should "
   6265 			    "not happen)\n", qid, idx, pkt->len_n_flags);
   6266 			break;
   6267 		}
   6268 
   6269 		/*
   6270 		 * Why test bit 0x80?  The Linux driver:
   6271 		 *
   6272 		 * There is one exception:  uCode sets bit 15 when it
   6273 		 * originates the response/notification, i.e. when the
   6274 		 * response/notification is not a direct response to a
   6275 		 * command sent by the driver.  For example, uCode issues
   6276 		 * IWM_REPLY_RX when it sends a received frame to the driver;
   6277 		 * it is not a direct response to any driver command.
   6278 		 *
   6279 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
   6280 		 * uses a slightly different format for pkt->hdr, and "qid"
   6281 		 * is actually the upper byte of a two-byte field.
   6282 		 */
   6283 		if (!(pkt->hdr.qid & (1 << 7))) {
   6284 			iwm_cmd_done(sc, pkt);
   6285 		}
   6286 
   6287 		ADVANCE_RXQ(sc);
   6288 	}
   6289 
   6290 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   6291 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   6292 
   6293 	/*
   6294 	 * Tell the firmware what we have processed.
   6295 	 * Seems like the hardware gets upset unless we align
   6296 	 * the write by 8??
   6297 	 */
   6298 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   6299 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   6300 }
   6301 
   6302 static int
   6303 iwm_intr(void *arg)
   6304 {
   6305 	struct iwm_softc *sc = arg;
   6306 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6307 	int handled = 0;
   6308 	int r1, r2, rv = 0;
   6309 	int isperiodic = 0;
   6310 
   6311 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   6312 
   6313 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   6314 		uint32_t *ict = sc->ict_dma.vaddr;
   6315 		int tmp;
   6316 
   6317 		tmp = htole32(ict[sc->ict_cur]);
   6318 		if (!tmp)
   6319 			goto out_ena;
   6320 
   6321 		/*
   6322 		 * ok, there was something.  keep plowing until we have all.
   6323 		 */
   6324 		r1 = r2 = 0;
   6325 		while (tmp) {
   6326 			r1 |= tmp;
   6327 			ict[sc->ict_cur] = 0;
   6328 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
   6329 			tmp = htole32(ict[sc->ict_cur]);
   6330 		}
   6331 
   6332 		/* this is where the fun begins.  don't ask */
   6333 		if (r1 == 0xffffffff)
   6334 			r1 = 0;
   6335 
   6336 		/* i am not expected to understand this */
   6337 		if (r1 & 0xc0000)
   6338 			r1 |= 0x8000;
   6339 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   6340 	} else {
   6341 		r1 = IWM_READ(sc, IWM_CSR_INT);
   6342 		/* "hardware gone" (where, fishing?) */
   6343 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   6344 			goto out;
   6345 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   6346 	}
   6347 	if (r1 == 0 && r2 == 0) {
   6348 		goto out_ena;
   6349 	}
   6350 
   6351 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   6352 
   6353 	/* ignored */
   6354 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
   6355 
   6356 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   6357 #ifdef IWM_DEBUG
   6358 		int i;
   6359 
   6360 		iwm_nic_error(sc);
   6361 
   6362 		/* Dump driver status (TX and RX rings) while we're here. */
   6363 		DPRINTF(("driver status:\n"));
   6364 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
   6365 			struct iwm_tx_ring *ring = &sc->txq[i];
   6366 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   6367 			    "queued=%-3d\n",
   6368 			    i, ring->qid, ring->cur, ring->queued));
   6369 		}
   6370 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   6371 		DPRINTF(("  802.11 state %d\n", sc->sc_ic.ic_state));
   6372 #endif
   6373 
   6374 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   6375 		ifp->if_flags &= ~IFF_UP;
   6376 		iwm_stop(ifp, 1);
   6377 		rv = 1;
   6378 		goto out;
   6379 
   6380 	}
   6381 
   6382 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   6383 		handled |= IWM_CSR_INT_BIT_HW_ERR;
   6384 		aprint_error_dev(sc->sc_dev,
   6385 		    "hardware error, stopping device\n");
   6386 		ifp->if_flags &= ~IFF_UP;
   6387 		iwm_stop(ifp, 1);
   6388 		rv = 1;
   6389 		goto out;
   6390 	}
   6391 
   6392 	/* firmware chunk loaded */
   6393 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   6394 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   6395 		handled |= IWM_CSR_INT_BIT_FH_TX;
   6396 
   6397 		sc->sc_fw_chunk_done = 1;
   6398 		wakeup(&sc->sc_fw);
   6399 	}
   6400 
   6401 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   6402 		handled |= IWM_CSR_INT_BIT_RF_KILL;
   6403 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   6404 			DPRINTF(("%s: rfkill switch, disabling interface\n",
   6405 			    DEVNAME(sc)));
   6406 			ifp->if_flags &= ~IFF_UP;
   6407 			iwm_stop(ifp, 1);
   6408 		}
   6409 	}
   6410 
   6411 	/*
   6412 	 * The Linux driver uses periodic interrupts to avoid races.
   6413 	 * We cargo-cult like it's going out of fashion.
   6414 	 */
   6415 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   6416 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
   6417 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   6418 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   6419 			IWM_WRITE_1(sc,
   6420 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   6421 		isperiodic = 1;
   6422 	}
   6423 
   6424 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
   6425 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
   6426 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   6427 
   6428 		iwm_notif_intr(sc);
   6429 
   6430 		/* enable periodic interrupt, see above */
   6431 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
   6432 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   6433 			    IWM_CSR_INT_PERIODIC_ENA);
   6434 	}
   6435 
   6436 	if (__predict_false(r1 & ~handled))
   6437 		DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
   6438 	rv = 1;
   6439 
   6440  out_ena:
   6441 	iwm_restore_interrupts(sc);
   6442  out:
   6443 	return rv;
   6444 }
   6445 
   6446 /*
   6447  * Autoconf glue-sniffing
   6448  */
   6449 
   6450 static const pci_product_id_t iwm_devices[] = {
   6451 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   6452 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   6453 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   6454 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   6455 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   6456 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   6457 };
   6458 
   6459 static int
   6460 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   6461 {
   6462 	struct pci_attach_args *pa = aux;
   6463 
   6464 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   6465 		return 0;
   6466 
   6467 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   6468 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   6469 			return 1;
   6470 
   6471 	return 0;
   6472 }
   6473 
   6474 static int
   6475 iwm_preinit(struct iwm_softc *sc)
   6476 {
   6477 	int error;
   6478 
   6479 	if ((error = iwm_prepare_card_hw(sc)) != 0) {
   6480 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6481 		return error;
   6482 	}
   6483 
   6484 	if (sc->sc_flags & IWM_FLAG_ATTACHED)
   6485 		return 0;
   6486 
   6487 	if ((error = iwm_start_hw(sc)) != 0) {
   6488 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6489 		return error;
   6490 	}
   6491 
   6492 	error = iwm_run_init_mvm_ucode(sc, 1);
   6493 	iwm_stop_device(sc);
   6494 	return error;
   6495 }
   6496 
   6497 static void
   6498 iwm_attach_hook(device_t dev)
   6499 {
   6500 	struct iwm_softc *sc = device_private(dev);
   6501 	struct ieee80211com *ic = &sc->sc_ic;
   6502 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   6503 
   6504 	KASSERT(!cold);
   6505 
   6506 	if (iwm_preinit(sc) != 0)
   6507 		return;
   6508 
   6509 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   6510 
   6511 	aprint_normal_dev(sc->sc_dev,
   6512 	    "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
   6513 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
   6514 	    IWM_UCODE_MAJOR(sc->sc_fwver),
   6515 	    IWM_UCODE_MINOR(sc->sc_fwver),
   6516 	    IWM_UCODE_API(sc->sc_fwver),
   6517 	    ether_sprintf(sc->sc_nvm.hw_addr));
   6518 
   6519 	ic->ic_ifp = ifp;
   6520 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   6521 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   6522 	ic->ic_state = IEEE80211_S_INIT;
   6523 
   6524 	/* Set device capabilities. */
   6525 	ic->ic_caps =
   6526 	    IEEE80211_C_WEP |		/* WEP */
   6527 	    IEEE80211_C_WPA |		/* 802.11i */
   6528 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   6529 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   6530 
   6531 #ifndef IWM_NO_5GHZ
   6532 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   6533 #endif
   6534 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   6535 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   6536 
   6537 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   6538 		sc->sc_phyctxt[i].id = i;
   6539 	}
   6540 
   6541 	sc->sc_amrr.amrr_min_success_threshold =  1;
   6542 	sc->sc_amrr.amrr_max_success_threshold = 15;
   6543 
   6544 	/* IBSS channel undefined for now. */
   6545 	ic->ic_ibss_chan = &ic->ic_channels[1];
   6546 
   6547 #if 0
   6548 	/* Max RSSI */
   6549 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   6550 #endif
   6551 
   6552 	ifp->if_softc = sc;
   6553 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   6554 	ifp->if_init = iwm_init;
   6555 	ifp->if_stop = iwm_stop;
   6556 	ifp->if_ioctl = iwm_ioctl;
   6557 	ifp->if_start = iwm_start;
   6558 	ifp->if_watchdog = iwm_watchdog;
   6559 	IFQ_SET_READY(&ifp->if_snd);
   6560 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   6561 
   6562 	if_initialize(ifp);
   6563 	ieee80211_ifattach(ic);
   6564 	if_register(ifp);
   6565 
   6566 	ic->ic_node_alloc = iwm_node_alloc;
   6567 
   6568 	/* Override 802.11 state transition machine. */
   6569 	sc->sc_newstate = ic->ic_newstate;
   6570 	ic->ic_newstate = iwm_newstate;
   6571 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   6572 	ieee80211_announce(ic);
   6573 
   6574 	iwm_radiotap_attach(sc);
   6575 	callout_init(&sc->sc_calib_to, 0);
   6576 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   6577 
   6578 	//task_set(&sc->init_task, iwm_init_task, sc);
   6579 }
   6580 
   6581 static void
   6582 iwm_attach(device_t parent, device_t self, void *aux)
   6583 {
   6584 	struct iwm_softc *sc = device_private(self);
   6585 	struct pci_attach_args *pa = aux;
   6586 	pci_intr_handle_t ih;
   6587 	pcireg_t reg, memtype;
   6588 	const char *intrstr;
   6589 	int error;
   6590 	int txq_i;
   6591 
   6592 	sc->sc_dev = self;
   6593 	sc->sc_pct = pa->pa_pc;
   6594 	sc->sc_pcitag = pa->pa_tag;
   6595 	sc->sc_dmat = pa->pa_dmat;
   6596 	sc->sc_pciid = pa->pa_id;
   6597 
   6598 	pci_aprint_devinfo(pa, NULL);
   6599 
   6600 	/*
   6601 	 * Get the offset of the PCI Express Capability Structure in PCI
   6602 	 * Configuration Space.
   6603 	 */
   6604 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   6605 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   6606 	if (error == 0) {
   6607 		aprint_error_dev(self,
   6608 		    "PCIe capability structure not found!\n");
   6609 		return;
   6610 	}
   6611 
   6612 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6613 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6614 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6615 
   6616 	/* Enable bus-mastering and hardware bug workaround. */
   6617 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   6618 	reg |= PCI_COMMAND_MASTER_ENABLE;
   6619 	/* if !MSI */
   6620 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
   6621 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
   6622 	}
   6623 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   6624 
   6625 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   6626 	error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   6627 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   6628 	if (error != 0) {
   6629 		aprint_error_dev(self, "can't map mem space\n");
   6630 		return;
   6631 	}
   6632 
   6633 	/* Install interrupt handler. */
   6634 	if (pci_intr_map(pa, &ih)) {
   6635 		aprint_error_dev(self, "can't map interrupt\n");
   6636 		return;
   6637 	}
   6638 
   6639 	char intrbuf[PCI_INTRSTR_LEN];
   6640 	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
   6641 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
   6642 	if (sc->sc_ih == NULL) {
   6643 		aprint_error_dev(self, "can't establish interrupt");
   6644 		if (intrstr != NULL)
   6645 			aprint_error(" at %s", intrstr);
   6646 		aprint_error("\n");
   6647 		return;
   6648 	}
   6649 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   6650 
   6651 	sc->sc_wantresp = -1;
   6652 
   6653 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   6654 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   6655 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   6656 		sc->sc_fwname = "iwlwifi-7260-9.ucode";
   6657 		break;
   6658 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   6659 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   6660 		sc->sc_fwname = "iwlwifi-3160-9.ucode";
   6661 		break;
   6662 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   6663 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   6664 		sc->sc_fwname = "iwlwifi-7265-9.ucode";
   6665 		break;
   6666 	default:
   6667 		aprint_error_dev(self, "unknown product %#x",
   6668 		    PCI_PRODUCT(sc->sc_pciid));
   6669 		return;
   6670 	}
   6671 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   6672 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   6673 
   6674 	/*
   6675 	 * We now start fiddling with the hardware
   6676 	 */
   6677 
   6678 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   6679 	if (iwm_prepare_card_hw(sc) != 0) {
   6680 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6681 		return;
   6682 	}
   6683 
   6684 	/* Allocate DMA memory for firmware transfers. */
   6685 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
   6686 		aprint_error_dev(sc->sc_dev,
   6687 		    "could not allocate memory for firmware\n");
   6688 		return;
   6689 	}
   6690 
   6691 	/* Allocate "Keep Warm" page. */
   6692 	if ((error = iwm_alloc_kw(sc)) != 0) {
   6693 		aprint_error_dev(sc->sc_dev,
   6694 		    "could not allocate keep warm page\n");
   6695 		goto fail1;
   6696 	}
   6697 
   6698 	/* We use ICT interrupts */
   6699 	if ((error = iwm_alloc_ict(sc)) != 0) {
   6700 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   6701 		goto fail2;
   6702 	}
   6703 
   6704 	/* Allocate TX scheduler "rings". */
   6705 	if ((error = iwm_alloc_sched(sc)) != 0) {
   6706 		aprint_error_dev(sc->sc_dev,
   6707 		    "could not allocate TX scheduler rings\n");
   6708 		goto fail3;
   6709 	}
   6710 
   6711 	/* Allocate TX rings */
   6712 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   6713 		if ((error = iwm_alloc_tx_ring(sc,
   6714 		    &sc->txq[txq_i], txq_i)) != 0) {
   6715 			aprint_error_dev(sc->sc_dev,
   6716 			    "could not allocate TX ring %d\n", txq_i);
   6717 			goto fail4;
   6718 		}
   6719 	}
   6720 
   6721 	/* Allocate RX ring. */
   6722 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
   6723 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   6724 		goto fail4;
   6725 	}
   6726 
   6727 	workqueue_create(&sc->sc_eswq, "iwmes",
   6728 	    iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
   6729 	workqueue_create(&sc->sc_nswq, "iwmns",
   6730 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
   6731 
   6732 	/* Clear pending interrupts. */
   6733 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   6734 
   6735 	/*
   6736 	 * We can't do normal attach before the file system is mounted
   6737 	 * because we cannot read the MAC address without loading the
   6738 	 * firmware from disk.  So we postpone until mountroot is done.
   6739 	 * Notably, this will require a full driver unload/load cycle
   6740 	 * (or reboot) in case the firmware is not present when the
   6741 	 * hook runs.
   6742 	 */
   6743 	config_mountroot(self, iwm_attach_hook);
   6744 
   6745 	return;
   6746 
   6747 	/* Free allocated memory if something failed during attachment. */
   6748 fail4:	while (--txq_i >= 0)
   6749 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   6750 	iwm_free_sched(sc);
   6751 fail3:	if (sc->ict_dma.vaddr != NULL)
   6752 		iwm_free_ict(sc);
   6753 fail2:	iwm_free_kw(sc);
   6754 fail1:	iwm_free_fwmem(sc);
   6755 }
   6756 
   6757 /*
   6758  * Attach the interface to 802.11 radiotap.
   6759  */
   6760 void
   6761 iwm_radiotap_attach(struct iwm_softc *sc)
   6762 {
   6763 	struct ifnet *ifp = sc->sc_ic.ic_ifp;
   6764 
   6765 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   6766 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   6767 	    &sc->sc_drvbpf);
   6768 
   6769 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   6770 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   6771 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   6772 
   6773 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   6774 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   6775 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   6776 }
   6777 
   6778 #if 0
   6779 static void
   6780 iwm_init_task(void *arg1)
   6781 {
   6782 	struct iwm_softc *sc = arg1;
   6783 	struct ifnet *ifp = &sc->sc_ic.ic_if;
   6784 	int s;
   6785 
   6786 	s = splnet();
   6787 	while (sc->sc_flags & IWM_FLAG_BUSY)
   6788 		tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
   6789 	sc->sc_flags |= IWM_FLAG_BUSY;
   6790 
   6791 	iwm_stop(ifp, 0);
   6792 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   6793 		iwm_init(ifp);
   6794 
   6795 	sc->sc_flags &= ~IWM_FLAG_BUSY;
   6796 	wakeup(&sc->sc_flags);
   6797 	splx(s);
   6798 }
   6799 
   6800 static void
   6801 iwm_wakeup(struct iwm_softc *sc)
   6802 {
   6803 	pcireg_t reg;
   6804 
   6805 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6806 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6807 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6808 
   6809 	iwm_init_task(sc);
   6810 }
   6811 
   6812 static int
   6813 iwm_activate(device_t self, enum devact act)
   6814 {
   6815 	struct iwm_softc *sc = device_private(self);
   6816 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6817 
   6818 	switch (act) {
   6819 	case DVACT_DEACTIVATE:
   6820 		if (ifp->if_flags & IFF_RUNNING)
   6821 			iwm_stop(ifp, 0);
   6822 		return 0;
   6823 	default:
   6824 		return EOPNOTSUPP;
   6825 	}
   6826 }
   6827 #endif
   6828 
   6829 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   6830 	NULL, NULL);
   6831