Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.5
      1 /*	$NetBSD: if_iwm.c,v 1.5 2015/02/13 18:57:47 nonaka Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.18 2015/02/11 01:12:42 brad Exp	*/
      3 
      4 /*
      5  * Copyright (c) 2014 genua mbh <info (at) genua.de>
      6  * Copyright (c) 2014 Fixup Software Ltd.
      7  *
      8  * Permission to use, copy, modify, and distribute this software for any
      9  * purpose with or without fee is hereby granted, provided that the above
     10  * copyright notice and this permission notice appear in all copies.
     11  *
     12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19  */
     20 
     21 /*-
     22  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     23  * which were used as the reference documentation for this implementation.
     24  *
     25  * Driver version we are currently based off of is
     26  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
     27  *
     28  ***********************************************************************
     29  *
     30  * This file is provided under a dual BSD/GPLv2 license.  When using or
     31  * redistributing this file, you may do so under either license.
     32  *
     33  * GPL LICENSE SUMMARY
     34  *
     35  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * All rights reserved.
     63  *
     64  * Redistribution and use in source and binary forms, with or without
     65  * modification, are permitted provided that the following conditions
     66  * are met:
     67  *
     68  *  * Redistributions of source code must retain the above copyright
     69  *    notice, this list of conditions and the following disclaimer.
     70  *  * Redistributions in binary form must reproduce the above copyright
     71  *    notice, this list of conditions and the following disclaimer in
     72  *    the documentation and/or other materials provided with the
     73  *    distribution.
     74  *  * Neither the name Intel Corporation nor the names of its
     75  *    contributors may be used to endorse or promote products derived
     76  *    from this software without specific prior written permission.
     77  *
     78  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     79  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     80  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     81  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     82  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     83  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     84  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     85  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     86  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     87  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     88  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     89  */
     90 
     91 /*-
     92  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     93  *
     94  * Permission to use, copy, modify, and distribute this software for any
     95  * purpose with or without fee is hereby granted, provided that the above
     96  * copyright notice and this permission notice appear in all copies.
     97  *
     98  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     99  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    100  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    101  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    102  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    103  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    104  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    105  */
    106 
    107 #include <sys/cdefs.h>
    108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.5 2015/02/13 18:57:47 nonaka Exp $");
    109 
    110 #include <sys/param.h>
    111 #include <sys/conf.h>
    112 #include <sys/kernel.h>
    113 #include <sys/kmem.h>
    114 #include <sys/mbuf.h>
    115 #include <sys/mutex.h>
    116 #include <sys/proc.h>
    117 #include <sys/socket.h>
    118 #include <sys/sockio.h>
    119 #include <sys/systm.h>
    120 
    121 #include <sys/cpu.h>
    122 #include <sys/bus.h>
    123 #include <sys/workqueue.h>
    124 #include <machine/endian.h>
    125 #include <machine/intr.h>
    126 
    127 #include <dev/pci/pcireg.h>
    128 #include <dev/pci/pcivar.h>
    129 #include <dev/pci/pcidevs.h>
    130 #include <dev/firmload.h>
    131 
    132 #include <net/bpf.h>
    133 #include <net/if.h>
    134 #include <net/if_arp.h>
    135 #include <net/if_dl.h>
    136 #include <net/if_media.h>
    137 #include <net/if_types.h>
    138 #include <net/if_ether.h>
    139 
    140 #include <netinet/in.h>
    141 #include <netinet/in_systm.h>
    142 #include <netinet/ip.h>
    143 
    144 #include <net80211/ieee80211_var.h>
    145 #include <net80211/ieee80211_amrr.h>
    146 #include <net80211/ieee80211_radiotap.h>
    147 
    148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    150 
    151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    153 
    154 #ifdef IWM_DEBUG
    155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    157 int iwm_debug = 1;
    158 #else
    159 #define DPRINTF(x)	do { ; } while (0)
    160 #define DPRINTFN(n, x)	do { ; } while (0)
    161 #endif
    162 
    163 #include <dev/pci/if_iwmreg.h>
    164 #include <dev/pci/if_iwmvar.h>
    165 
    166 static const uint8_t iwm_nvm_channels[] = {
    167 	/* 2.4 GHz */
    168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    169 	/* 5 GHz */
    170 	36, 40, 44 , 48, 52, 56, 60, 64,
    171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    172 	149, 153, 157, 161, 165
    173 };
    174 #define IWM_NUM_2GHZ_CHANNELS	14
    175 
    176 /* It looks like 11a TX is broken, unfortunately. */
    177 #define IWM_NO_5GHZ		1
    178 
    179 static const struct iwm_rate {
    180 	uint8_t rate;
    181 	uint8_t plcp;
    182 } iwm_rates[] = {
    183 	{   2,	IWM_RATE_1M_PLCP  },
    184 	{   4,	IWM_RATE_2M_PLCP  },
    185 	{  11,	IWM_RATE_5M_PLCP  },
    186 	{  22,	IWM_RATE_11M_PLCP },
    187 	{  12,	IWM_RATE_6M_PLCP  },
    188 	{  18,	IWM_RATE_9M_PLCP  },
    189 	{  24,	IWM_RATE_12M_PLCP },
    190 	{  36,	IWM_RATE_18M_PLCP },
    191 	{  48,	IWM_RATE_24M_PLCP },
    192 	{  72,	IWM_RATE_36M_PLCP },
    193 	{  96,	IWM_RATE_48M_PLCP },
    194 	{ 108,	IWM_RATE_54M_PLCP },
    195 };
    196 #define IWM_RIDX_CCK	0
    197 #define IWM_RIDX_OFDM	4
    198 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    199 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    200 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    201 
    202 struct iwm_newstate_state {
    203 	struct work ns_wk;
    204 	enum ieee80211_state ns_nstate;
    205 	int ns_arg;
    206 	int ns_generation;
    207 };
    208 
    209 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    210 static int	iwm_firmware_store_section(struct iwm_softc *,
    211 		    enum iwm_ucode_type, uint8_t *, size_t);
    212 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    213 static int	iwm_read_firmware(struct iwm_softc *);
    214 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    215 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    216 #ifdef IWM_DEBUG
    217 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    218 #endif
    219 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    220 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    221 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    222 static int	iwm_nic_lock(struct iwm_softc *);
    223 static void	iwm_nic_unlock(struct iwm_softc *);
    224 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    225 		    uint32_t);
    226 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    227 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    228 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    229 		    bus_size_t, bus_size_t);
    230 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    231 static int	iwm_alloc_fwmem(struct iwm_softc *);
    232 static void	iwm_free_fwmem(struct iwm_softc *);
    233 static int	iwm_alloc_sched(struct iwm_softc *);
    234 static void	iwm_free_sched(struct iwm_softc *);
    235 static int	iwm_alloc_kw(struct iwm_softc *);
    236 static void	iwm_free_kw(struct iwm_softc *);
    237 static int	iwm_alloc_ict(struct iwm_softc *);
    238 static void	iwm_free_ict(struct iwm_softc *);
    239 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    240 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    241 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    242 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    243 		    int);
    244 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    245 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    246 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    247 static int	iwm_check_rfkill(struct iwm_softc *);
    248 static void	iwm_enable_interrupts(struct iwm_softc *);
    249 static void	iwm_restore_interrupts(struct iwm_softc *);
    250 static void	iwm_disable_interrupts(struct iwm_softc *);
    251 static void	iwm_ict_reset(struct iwm_softc *);
    252 static int	iwm_set_hw_ready(struct iwm_softc *);
    253 static int	iwm_prepare_card_hw(struct iwm_softc *);
    254 static void	iwm_apm_config(struct iwm_softc *);
    255 static int	iwm_apm_init(struct iwm_softc *);
    256 static void	iwm_apm_stop(struct iwm_softc *);
    257 static int	iwm_start_hw(struct iwm_softc *);
    258 static void	iwm_stop_device(struct iwm_softc *);
    259 static void	iwm_set_pwr(struct iwm_softc *);
    260 static void	iwm_mvm_nic_config(struct iwm_softc *);
    261 static int	iwm_nic_rx_init(struct iwm_softc *);
    262 static int	iwm_nic_tx_init(struct iwm_softc *);
    263 static int	iwm_nic_init(struct iwm_softc *);
    264 static void	iwm_enable_txq(struct iwm_softc *, int, int);
    265 static int	iwm_post_alive(struct iwm_softc *);
    266 static int	iwm_is_valid_channel(uint16_t);
    267 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    268 static uint16_t iwm_channel_id_to_papd(uint16_t);
    269 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    270 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    271 		    uint8_t **, uint16_t *, uint16_t);
    272 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    273 		    void *);
    274 static int	iwm_send_phy_db_data(struct iwm_softc *);
    275 static int	iwm_send_phy_db_data(struct iwm_softc *);
    276 static void	iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    277 		    struct iwm_time_event_cmd_v1 *);
    278 static int	iwm_mvm_send_time_event_cmd(struct iwm_softc *,
    279 		    const struct iwm_time_event_cmd_v2 *);
    280 static int	iwm_mvm_time_event_send_add(struct iwm_softc *,
    281 		    struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
    282 static void	iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
    283 		    uint32_t, uint32_t, uint32_t);
    284 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    285 		    uint16_t, uint8_t *, uint16_t *);
    286 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    287 		    uint16_t *);
    288 static void	iwm_init_channel_map(struct iwm_softc *,
    289 		    const uint16_t * const);
    290 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    291 		    const uint16_t *, const uint16_t *, uint8_t, uint8_t);
    292 static int	iwm_nvm_init(struct iwm_softc *);
    293 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    294 		    const uint8_t *, uint32_t);
    295 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    296 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    297 static int	iwm_fw_alive(struct iwm_softc *, uint32_t);
    298 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    299 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    300 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
    301 		    enum iwm_ucode_type);
    302 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    303 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    304 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    305 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
    306 		    struct iwm_rx_phy_info *);
    307 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
    308 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    309 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
    310 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    311 		    struct iwm_rx_data *);
    312 static void	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
    313 		    struct iwm_rx_packet *, struct iwm_node *);
    314 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    315 		    struct iwm_rx_data *);
    316 static int	iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    317 		    uint32_t);
    318 static int	iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
    319 		    int);
    320 static int	iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    321 static void	iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
    322 		    struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
    323 		    uint32_t, uint32_t);
    324 static void	iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
    325 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    326 		    uint8_t, uint8_t);
    327 static int	iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
    328 		    struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
    329 		    uint32_t);
    330 static int	iwm_mvm_phy_ctxt_add(struct iwm_softc *,
    331 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    332 		    uint8_t, uint8_t);
    333 static int	iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
    334 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    335 		    uint8_t, uint8_t);
    336 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    337 static int	iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
    338 		    uint16_t, const void *);
    339 static int	iwm_mvm_send_cmd_status(struct iwm_softc *,
    340 		    struct iwm_host_cmd *, uint32_t *);
    341 static int	iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
    342 		    uint16_t, const void *, uint32_t *);
    343 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    344 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
    345 #if 0
    346 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    347 		    uint16_t);
    348 #endif
    349 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
    350 		    struct iwm_node *, struct ieee80211_frame *,
    351 		    struct iwm_tx_cmd *);
    352 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    353 		    struct ieee80211_node *, int);
    354 static int	iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
    355 		    struct iwm_beacon_filter_cmd *);
    356 static void	iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
    357 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    358 static int	iwm_mvm_update_beacon_abort(struct iwm_softc *,
    359 		    struct iwm_node *, int);
    360 static void	iwm_mvm_power_log(struct iwm_softc *,
    361 		    struct iwm_mac_power_cmd *);
    362 static void	iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    363 		    struct iwm_mac_power_cmd *);
    364 static int	iwm_mvm_power_mac_update_mode(struct iwm_softc *,
    365 		    struct iwm_node *);
    366 static int	iwm_mvm_power_update_device(struct iwm_softc *);
    367 static int	iwm_mvm_enable_beacon_filter(struct iwm_softc *,
    368 		    struct iwm_node *);
    369 static int	iwm_mvm_disable_beacon_filter(struct iwm_softc *,
    370 		    struct iwm_node *);
    371 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
    372 		    struct iwm_mvm_add_sta_cmd_v5 *);
    373 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
    374 		    struct iwm_mvm_add_sta_cmd_v6 *, int *);
    375 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
    376 		    int);
    377 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
    378 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
    379 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
    380 		    struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
    381 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
    382 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
    383 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
    384 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
    385 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
    386 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
    387 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
    388 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
    389 static int	iwm_mvm_scan_fill_channels(struct iwm_softc *,
    390 		    struct iwm_scan_cmd *, int, int, int);
    391 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
    392 		    struct ieee80211_frame *, const uint8_t *, int,
    393 		    const uint8_t *, int, const uint8_t *, int, int);
    394 static int	iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
    395 		    int);
    396 static void	iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    397 		    int *);
    398 static void	iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
    399 		    struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
    400 static int	iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
    401 		    struct iwm_mac_ctx_cmd *);
    402 static void	iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
    403 		    struct iwm_node *, struct iwm_mac_data_sta *, int);
    404 static int	iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
    405 		    struct iwm_node *, uint32_t);
    406 static int	iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
    407 		    uint32_t);
    408 static int	iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
    409 static int	iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
    410 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
    411 static int	iwm_auth(struct iwm_softc *);
    412 static int	iwm_assoc(struct iwm_softc *);
    413 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
    414 static void	iwm_calib_timeout(void *);
    415 static void	iwm_setrates(struct iwm_node *);
    416 static int	iwm_media_change(struct ifnet *);
    417 static void	iwm_newstate_cb(struct work *, void *);
    418 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    419 static void	iwm_endscan_cb(struct work *, void *);
    420 static int	iwm_init_hw(struct iwm_softc *);
    421 static int	iwm_init(struct ifnet *);
    422 static void	iwm_start(struct ifnet *);
    423 static void	iwm_stop(struct ifnet *, int);
    424 static void	iwm_watchdog(struct ifnet *);
    425 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    426 #ifdef IWM_DEBUG
    427 static const char *iwm_desc_lookup(uint32_t);
    428 static void	iwm_nic_error(struct iwm_softc *);
    429 #endif
    430 static void	iwm_notif_intr(struct iwm_softc *);
    431 static int	iwm_intr(void *);
    432 static int	iwm_preinit(struct iwm_softc *);
    433 static void	iwm_attach_hook(device_t);
    434 static void	iwm_attach(device_t, device_t, void *);
    435 #if 0
    436 static void	iwm_init_task(void *);
    437 static int	iwm_activate(device_t, enum devact);
    438 static void	iwm_wakeup(struct iwm_softc *);
    439 #endif
    440 static void	iwm_radiotap_attach(struct iwm_softc *);
    441 
    442 static int
    443 iwm_firmload(struct iwm_softc *sc)
    444 {
    445 	struct iwm_fw_info *fw = &sc->sc_fw;
    446 	firmware_handle_t fwh;
    447 	int error;
    448 
    449 	/* Open firmware image. */
    450 	if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
    451 		aprint_error_dev(sc->sc_dev,
    452 		    "could not get firmware handle %s\n", sc->sc_fwname);
    453 		return error;
    454 	}
    455 
    456 	fw->fw_rawsize = firmware_get_size(fwh);
    457 	/*
    458 	 * Well, this is how the Linux driver checks it ....
    459 	 */
    460 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    461 		aprint_error_dev(sc->sc_dev,
    462 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    463 		error = EINVAL;
    464 		goto out;
    465 	}
    466 
    467 	/* some sanity */
    468 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    469 		aprint_error_dev(sc->sc_dev,
    470 		    "firmware size is ridiculous: %zd bytes\n",
    471 		fw->fw_rawsize);
    472 		error = EINVAL;
    473 		goto out;
    474 	}
    475 
    476 	/* Read the firmware. */
    477 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    478 	if (fw->fw_rawdata == NULL) {
    479 		aprint_error_dev(sc->sc_dev,
    480 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    481 		error = ENOMEM;
    482 		goto out;
    483 	}
    484 	error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    485 	if (error) {
    486 		aprint_error_dev(sc->sc_dev,
    487 		    "could not read firmware %s\n", sc->sc_fwname);
    488 		goto out;
    489 	}
    490 
    491  out:
    492 	/* caller will release memory, if necessary */
    493 
    494 	firmware_close(fwh);
    495 	return error;
    496 }
    497 
    498 /*
    499  * just maintaining status quo.
    500  */
    501 static void
    502 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
    503 {
    504 	struct ieee80211_frame *wh;
    505 	uint8_t subtype;
    506 	uint8_t *frm, *efrm;
    507 
    508 	wh = mtod(m, struct ieee80211_frame *);
    509 
    510 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    511 		return;
    512 
    513 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    514 
    515 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    516 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    517 		return;
    518 
    519 	frm = (uint8_t *)(wh + 1);
    520 	efrm = mtod(m, uint8_t *) + m->m_len;
    521 
    522 	frm += 12;      /* skip tstamp, bintval and capinfo fields */
    523 	while (frm < efrm) {
    524 		if (*frm == IEEE80211_ELEMID_DSPARMS) {
    525 #if IEEE80211_CHAN_MAX < 255
    526 			if (frm[2] <= IEEE80211_CHAN_MAX)
    527 #endif
    528 				ic->ic_curchan = &ic->ic_channels[frm[2]];
    529 		}
    530 		frm += frm[1] + 2;
    531 	}
    532 }
    533 
    534 /*
    535  * Firmware parser.
    536  */
    537 
    538 static int
    539 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    540 {
    541 	struct iwm_fw_cscheme_list *l = (void *)data;
    542 
    543 	if (dlen < sizeof(*l) ||
    544 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    545 		return EINVAL;
    546 
    547 	/* we don't actually store anything for now, always use s/w crypto */
    548 
    549 	return 0;
    550 }
    551 
    552 static int
    553 iwm_firmware_store_section(struct iwm_softc *sc,
    554 	enum iwm_ucode_type type, uint8_t *data, size_t dlen)
    555 {
    556 	struct iwm_fw_sects *fws;
    557 	struct iwm_fw_onesect *fwone;
    558 
    559 	if (type >= IWM_UCODE_TYPE_MAX)
    560 		return EINVAL;
    561 	if (dlen < sizeof(uint32_t))
    562 		return EINVAL;
    563 
    564 	fws = &sc->sc_fw.fw_sects[type];
    565 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    566 		return EINVAL;
    567 
    568 	fwone = &fws->fw_sect[fws->fw_count];
    569 
    570 	/* first 32bit are device load offset */
    571 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    572 
    573 	/* rest is data */
    574 	fwone->fws_data = data + sizeof(uint32_t);
    575 	fwone->fws_len = dlen - sizeof(uint32_t);
    576 
    577 	/* for freeing the buffer during driver unload */
    578 	fwone->fws_alloc = data;
    579 	fwone->fws_allocsize = dlen;
    580 
    581 	fws->fw_count++;
    582 	fws->fw_totlen += fwone->fws_len;
    583 
    584 	return 0;
    585 }
    586 
    587 /* iwlwifi: iwl-drv.c */
    588 struct iwm_tlv_calib_data {
    589 	uint32_t ucode_type;
    590 	struct iwm_tlv_calib_ctrl calib;
    591 } __packed;
    592 
    593 static int
    594 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    595 {
    596 	const struct iwm_tlv_calib_data *def_calib = data;
    597 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    598 
    599 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    600 		DPRINTF(("%s: Wrong ucode_type %u for default "
    601 		    "calibration.\n", DEVNAME(sc), ucode_type));
    602 		return EINVAL;
    603 	}
    604 
    605 	sc->sc_default_calib[ucode_type].flow_trigger =
    606 	    def_calib->calib.flow_trigger;
    607 	sc->sc_default_calib[ucode_type].event_trigger =
    608 	    def_calib->calib.event_trigger;
    609 
    610 	return 0;
    611 }
    612 
    613 static int
    614 iwm_read_firmware(struct iwm_softc *sc)
    615 {
    616 	struct iwm_fw_info *fw = &sc->sc_fw;
    617         struct iwm_tlv_ucode_header *uhdr;
    618         struct iwm_ucode_tlv tlv;
    619 	enum iwm_ucode_tlv_type tlv_type;
    620 	uint8_t *data;
    621 	int error, status;
    622 	size_t len;
    623 
    624 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    625 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    626 	} else {
    627 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    628 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    629 	}
    630 	status = fw->fw_status;
    631 
    632 	if (status == IWM_FW_STATUS_DONE)
    633 		return 0;
    634 
    635 	/*
    636 	 * Load firmware into driver memory.
    637 	 * fw_rawdata and fw_rawsize will be set.
    638 	 */
    639 	error = iwm_firmload(sc);
    640 	if (error != 0) {
    641 		aprint_error_dev(sc->sc_dev,
    642 		    "could not read firmware %s (error %d)\n",
    643 		    sc->sc_fwname, error);
    644 		goto out;
    645 	}
    646 
    647 	/*
    648 	 * Parse firmware contents
    649 	 */
    650 
    651 	uhdr = (void *)fw->fw_rawdata;
    652 	if (*(uint32_t *)fw->fw_rawdata != 0
    653 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    654 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    655 		    sc->sc_fwname);
    656 		error = EINVAL;
    657 		goto out;
    658 	}
    659 
    660 	sc->sc_fwver = le32toh(uhdr->ver);
    661 	data = uhdr->data;
    662 	len = fw->fw_rawsize - sizeof(*uhdr);
    663 
    664 	while (len >= sizeof(tlv)) {
    665 		size_t tlv_len;
    666 		void *tlv_data;
    667 
    668 		memcpy(&tlv, data, sizeof(tlv));
    669 		tlv_len = le32toh(tlv.length);
    670 		tlv_type = le32toh(tlv.type);
    671 
    672 		len -= sizeof(tlv);
    673 		data += sizeof(tlv);
    674 		tlv_data = data;
    675 
    676 		if (len < tlv_len) {
    677 			aprint_error_dev(sc->sc_dev,
    678 			    "firmware too short: %zu bytes\n", len);
    679 			error = EINVAL;
    680 			goto parse_out;
    681 		}
    682 
    683 		switch ((int)tlv_type) {
    684 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    685 			if (tlv_len < sizeof(uint32_t)) {
    686 				error = EINVAL;
    687 				goto parse_out;
    688 			}
    689 			sc->sc_capa_max_probe_len
    690 			    = le32toh(*(uint32_t *)tlv_data);
    691 			/* limit it to something sensible */
    692 			if (sc->sc_capa_max_probe_len > (1<<16)) {
    693 				DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
    694 				    "ridiculous\n", DEVNAME(sc)));
    695 				error = EINVAL;
    696 				goto parse_out;
    697 			}
    698 			break;
    699 		case IWM_UCODE_TLV_PAN:
    700 			if (tlv_len) {
    701 				error = EINVAL;
    702 				goto parse_out;
    703 			}
    704 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    705 			break;
    706 		case IWM_UCODE_TLV_FLAGS:
    707 			if (tlv_len < sizeof(uint32_t)) {
    708 				error = EINVAL;
    709 				goto parse_out;
    710 			}
    711 			/*
    712 			 * Apparently there can be many flags, but Linux driver
    713 			 * parses only the first one, and so do we.
    714 			 *
    715 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    716 			 * Intentional or a bug?  Observations from
    717 			 * current firmware file:
    718 			 *  1) TLV_PAN is parsed first
    719 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    720 			 * ==> this resets TLV_PAN to itself... hnnnk
    721 			 */
    722 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    723 			break;
    724 		case IWM_UCODE_TLV_CSCHEME:
    725 			if ((error = iwm_store_cscheme(sc,
    726 			    tlv_data, tlv_len)) != 0)
    727 				goto parse_out;
    728 			break;
    729 		case IWM_UCODE_TLV_NUM_OF_CPU:
    730 			if (tlv_len != sizeof(uint32_t)) {
    731 				error = EINVAL;
    732 				goto parse_out;
    733 			}
    734 			if (le32toh(*(uint32_t*)tlv_data) != 1) {
    735 				DPRINTF(("%s: driver supports "
    736 				    "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
    737 				error = EINVAL;
    738 				goto parse_out;
    739 			}
    740 			break;
    741 		case IWM_UCODE_TLV_SEC_RT:
    742 			if ((error = iwm_firmware_store_section(sc,
    743 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
    744 				goto parse_out;
    745 			break;
    746 		case IWM_UCODE_TLV_SEC_INIT:
    747 			if ((error = iwm_firmware_store_section(sc,
    748 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
    749 				goto parse_out;
    750 			break;
    751 		case IWM_UCODE_TLV_SEC_WOWLAN:
    752 			if ((error = iwm_firmware_store_section(sc,
    753 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
    754 				goto parse_out;
    755 			break;
    756 		case IWM_UCODE_TLV_DEF_CALIB:
    757 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    758 				error = EINVAL;
    759 				goto parse_out;
    760 			}
    761 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
    762 				goto parse_out;
    763 			break;
    764 		case IWM_UCODE_TLV_PHY_SKU:
    765 			if (tlv_len != sizeof(uint32_t)) {
    766 				error = EINVAL;
    767 				goto parse_out;
    768 			}
    769 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    770 			break;
    771 
    772 		case IWM_UCODE_TLV_API_CHANGES_SET:
    773 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
    774 			/* ignore, not used by current driver */
    775 			break;
    776 
    777 		default:
    778 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    779 			    DEVNAME(sc), tlv_type));
    780 			error = EINVAL;
    781 			goto parse_out;
    782 		}
    783 
    784 		len -= roundup(tlv_len, 4);
    785 		data += roundup(tlv_len, 4);
    786 	}
    787 
    788 	KASSERT(error == 0);
    789 
    790  parse_out:
    791 	if (error) {
    792 		aprint_error_dev(sc->sc_dev,
    793 		    "firmware parse error, section type %d\n", tlv_type);
    794 	}
    795 
    796 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    797 		aprint_error_dev(sc->sc_dev,
    798 		    "device uses unsupported power ops\n");
    799 		error = ENOTSUP;
    800 	}
    801 
    802  out:
    803 	if (error)
    804 		fw->fw_status = IWM_FW_STATUS_NONE;
    805 	else
    806 		fw->fw_status = IWM_FW_STATUS_DONE;
    807 	wakeup(&sc->sc_fw);
    808 
    809 	if (error) {
    810 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    811 		fw->fw_rawdata = NULL;
    812 	}
    813 	return error;
    814 }
    815 
    816 /*
    817  * basic device access
    818  */
    819 
    820 static uint32_t
    821 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    822 {
    823 	IWM_WRITE(sc,
    824 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    825 	IWM_BARRIER_READ_WRITE(sc);
    826 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    827 }
    828 
    829 static void
    830 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    831 {
    832 	IWM_WRITE(sc,
    833 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    834 	IWM_BARRIER_WRITE(sc);
    835 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    836 }
    837 
    838 #ifdef IWM_DEBUG
    839 /* iwlwifi: pcie/trans.c */
    840 static int
    841 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    842 {
    843 	int offs, ret = 0;
    844 	uint32_t *vals = buf;
    845 
    846 	if (iwm_nic_lock(sc)) {
    847 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    848 		for (offs = 0; offs < dwords; offs++)
    849 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    850 		iwm_nic_unlock(sc);
    851 	} else {
    852 		ret = EBUSY;
    853 	}
    854 	return ret;
    855 }
    856 #endif
    857 
    858 /* iwlwifi: pcie/trans.c */
    859 static int
    860 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    861 {
    862 	int offs;
    863 	const uint32_t *vals = buf;
    864 
    865 	if (iwm_nic_lock(sc)) {
    866 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    867 		/* WADDR auto-increments */
    868 		for (offs = 0; offs < dwords; offs++) {
    869 			uint32_t val = vals ? vals[offs] : 0;
    870 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    871 		}
    872 		iwm_nic_unlock(sc);
    873 	} else {
    874 		DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
    875 		return EBUSY;
    876 	}
    877 	return 0;
    878 }
    879 
    880 static int
    881 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    882 {
    883 	return iwm_write_mem(sc, addr, &val, 1);
    884 }
    885 
    886 static int
    887 iwm_poll_bit(struct iwm_softc *sc, int reg,
    888 	uint32_t bits, uint32_t mask, int timo)
    889 {
    890 	for (;;) {
    891 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
    892 			return 1;
    893 		}
    894 		if (timo < 10) {
    895 			return 0;
    896 		}
    897 		timo -= 10;
    898 		DELAY(10);
    899 	}
    900 }
    901 
    902 static int
    903 iwm_nic_lock(struct iwm_softc *sc)
    904 {
    905 	int rv = 0;
    906 
    907 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
    908 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    909 
    910 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
    911 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
    912 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
    913 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
    914 	    	rv = 1;
    915 	} else {
    916 		/* jolt */
    917 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
    918 	}
    919 
    920 	return rv;
    921 }
    922 
    923 static void
    924 iwm_nic_unlock(struct iwm_softc *sc)
    925 {
    926 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
    927 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    928 }
    929 
    930 static void
    931 iwm_set_bits_mask_prph(struct iwm_softc *sc,
    932 	uint32_t reg, uint32_t bits, uint32_t mask)
    933 {
    934 	uint32_t val;
    935 
    936 	/* XXX: no error path? */
    937 	if (iwm_nic_lock(sc)) {
    938 		val = iwm_read_prph(sc, reg) & mask;
    939 		val |= bits;
    940 		iwm_write_prph(sc, reg, val);
    941 		iwm_nic_unlock(sc);
    942 	}
    943 }
    944 
    945 static void
    946 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    947 {
    948 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
    949 }
    950 
    951 static void
    952 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    953 {
    954 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
    955 }
    956 
    957 /*
    958  * DMA resource routines
    959  */
    960 
    961 static int
    962 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
    963     bus_size_t size, bus_size_t alignment)
    964 {
    965 	int nsegs, error;
    966 	void *va;
    967 
    968 	dma->tag = tag;
    969 	dma->size = size;
    970 
    971 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
    972 	    &dma->map);
    973 	if (error != 0)
    974 		goto fail;
    975 
    976 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
    977 	    BUS_DMA_NOWAIT);
    978 	if (error != 0)
    979 		goto fail;
    980 
    981 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
    982 	    BUS_DMA_NOWAIT);
    983 	if (error != 0)
    984 		goto fail;
    985 	dma->vaddr = va;
    986 
    987 	error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
    988 	    BUS_DMA_NOWAIT);
    989 	if (error != 0)
    990 		goto fail;
    991 
    992 	memset(dma->vaddr, 0, size);
    993 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
    994 	dma->paddr = dma->map->dm_segs[0].ds_addr;
    995 
    996 	return 0;
    997 
    998 fail:	iwm_dma_contig_free(dma);
    999 	return error;
   1000 }
   1001 
   1002 static void
   1003 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1004 {
   1005 	if (dma->map != NULL) {
   1006 		if (dma->vaddr != NULL) {
   1007 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1008 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1009 			bus_dmamap_unload(dma->tag, dma->map);
   1010 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1011 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1012 			dma->vaddr = NULL;
   1013 		}
   1014 		bus_dmamap_destroy(dma->tag, dma->map);
   1015 		dma->map = NULL;
   1016 	}
   1017 }
   1018 
   1019 /* fwmem is used to load firmware onto the card */
   1020 static int
   1021 iwm_alloc_fwmem(struct iwm_softc *sc)
   1022 {
   1023 	/* Must be aligned on a 16-byte boundary. */
   1024 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
   1025 	    sc->sc_fwdmasegsz, 16);
   1026 }
   1027 
   1028 static void
   1029 iwm_free_fwmem(struct iwm_softc *sc)
   1030 {
   1031 	iwm_dma_contig_free(&sc->fw_dma);
   1032 }
   1033 
   1034 /* tx scheduler rings.  not used? */
   1035 static int
   1036 iwm_alloc_sched(struct iwm_softc *sc)
   1037 {
   1038 	int rv;
   1039 
   1040 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   1041 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   1042 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   1043 	return rv;
   1044 }
   1045 
   1046 static void
   1047 iwm_free_sched(struct iwm_softc *sc)
   1048 {
   1049 	iwm_dma_contig_free(&sc->sched_dma);
   1050 }
   1051 
   1052 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
   1053 static int
   1054 iwm_alloc_kw(struct iwm_softc *sc)
   1055 {
   1056 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   1057 }
   1058 
   1059 static void
   1060 iwm_free_kw(struct iwm_softc *sc)
   1061 {
   1062 	iwm_dma_contig_free(&sc->kw_dma);
   1063 }
   1064 
   1065 /* interrupt cause table */
   1066 static int
   1067 iwm_alloc_ict(struct iwm_softc *sc)
   1068 {
   1069 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
   1070 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
   1071 }
   1072 
   1073 static void
   1074 iwm_free_ict(struct iwm_softc *sc)
   1075 {
   1076 	iwm_dma_contig_free(&sc->ict_dma);
   1077 }
   1078 
   1079 static int
   1080 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1081 {
   1082 	bus_size_t size;
   1083 	int i, error;
   1084 
   1085 	ring->cur = 0;
   1086 
   1087 	/* Allocate RX descriptors (256-byte aligned). */
   1088 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1089 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1090 	if (error != 0) {
   1091 		aprint_error_dev(sc->sc_dev,
   1092 		    "could not allocate RX ring DMA memory\n");
   1093 		goto fail;
   1094 	}
   1095 	ring->desc = ring->desc_dma.vaddr;
   1096 
   1097 	/* Allocate RX status area (16-byte aligned). */
   1098 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1099 	    sizeof(*ring->stat), 16);
   1100 	if (error != 0) {
   1101 		aprint_error_dev(sc->sc_dev,
   1102 		    "could not allocate RX status DMA memory\n");
   1103 		goto fail;
   1104 	}
   1105 	ring->stat = ring->stat_dma.vaddr;
   1106 
   1107 	/*
   1108 	 * Allocate and map RX buffers.
   1109 	 */
   1110 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1111 		struct iwm_rx_data *data = &ring->data[i];
   1112 
   1113 		memset(data, 0, sizeof(*data));
   1114 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1115 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1116 		    &data->map);
   1117 		if (error != 0) {
   1118 			aprint_error_dev(sc->sc_dev,
   1119 			    "could not create RX buf DMA map\n");
   1120 			goto fail;
   1121 		}
   1122 
   1123 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
   1124 			goto fail;
   1125 		}
   1126 	}
   1127 	return 0;
   1128 
   1129 fail:	iwm_free_rx_ring(sc, ring);
   1130 	return error;
   1131 }
   1132 
   1133 static void
   1134 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1135 {
   1136 	int ntries;
   1137 
   1138 	if (iwm_nic_lock(sc)) {
   1139 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1140 		for (ntries = 0; ntries < 1000; ntries++) {
   1141 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1142 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1143 				break;
   1144 			DELAY(10);
   1145 		}
   1146 		iwm_nic_unlock(sc);
   1147 	}
   1148 	ring->cur = 0;
   1149 }
   1150 
   1151 static void
   1152 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1153 {
   1154 	int i;
   1155 
   1156 	iwm_dma_contig_free(&ring->desc_dma);
   1157 	iwm_dma_contig_free(&ring->stat_dma);
   1158 
   1159 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1160 		struct iwm_rx_data *data = &ring->data[i];
   1161 
   1162 		if (data->m != NULL) {
   1163 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1164 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1165 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1166 			m_freem(data->m);
   1167 		}
   1168 		if (data->map != NULL)
   1169 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1170 	}
   1171 }
   1172 
   1173 static int
   1174 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1175 {
   1176 	bus_addr_t paddr;
   1177 	bus_size_t size;
   1178 	int i, error;
   1179 
   1180 	ring->qid = qid;
   1181 	ring->queued = 0;
   1182 	ring->cur = 0;
   1183 
   1184 	/* Allocate TX descriptors (256-byte aligned). */
   1185 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1186 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1187 	if (error != 0) {
   1188 		aprint_error_dev(sc->sc_dev,
   1189 		    "could not allocate TX ring DMA memory\n");
   1190 		goto fail;
   1191 	}
   1192 	ring->desc = ring->desc_dma.vaddr;
   1193 
   1194 	/*
   1195 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1196 	 * to allocate commands space for other rings.
   1197 	 */
   1198 	if (qid > IWM_MVM_CMD_QUEUE)
   1199 		return 0;
   1200 
   1201 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1202 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1203 	if (error != 0) {
   1204 		aprint_error_dev(sc->sc_dev,
   1205 		    "could not allocate TX cmd DMA memory\n");
   1206 		goto fail;
   1207 	}
   1208 	ring->cmd = ring->cmd_dma.vaddr;
   1209 
   1210 	paddr = ring->cmd_dma.paddr;
   1211 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1212 		struct iwm_tx_data *data = &ring->data[i];
   1213 
   1214 		data->cmd_paddr = paddr;
   1215 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1216 		    + offsetof(struct iwm_tx_cmd, scratch);
   1217 		paddr += sizeof(struct iwm_device_cmd);
   1218 
   1219 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
   1220 		    IWM_NUM_OF_TBS, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
   1221 		    &data->map);
   1222 		if (error != 0) {
   1223 			aprint_error_dev(sc->sc_dev,
   1224 			    "could not create TX buf DMA map\n");
   1225 			goto fail;
   1226 		}
   1227 	}
   1228 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1229 	return 0;
   1230 
   1231 fail:	iwm_free_tx_ring(sc, ring);
   1232 	return error;
   1233 }
   1234 
   1235 static void
   1236 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1237 {
   1238 	int i;
   1239 
   1240 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1241 		struct iwm_tx_data *data = &ring->data[i];
   1242 
   1243 		if (data->m != NULL) {
   1244 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1245 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1246 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1247 			m_freem(data->m);
   1248 			data->m = NULL;
   1249 		}
   1250 	}
   1251 	/* Clear TX descriptors. */
   1252 	memset(ring->desc, 0, ring->desc_dma.size);
   1253 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1254 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1255 	sc->qfullmsk &= ~(1 << ring->qid);
   1256 	ring->queued = 0;
   1257 	ring->cur = 0;
   1258 }
   1259 
   1260 static void
   1261 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1262 {
   1263 	int i;
   1264 
   1265 	iwm_dma_contig_free(&ring->desc_dma);
   1266 	iwm_dma_contig_free(&ring->cmd_dma);
   1267 
   1268 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1269 		struct iwm_tx_data *data = &ring->data[i];
   1270 
   1271 		if (data->m != NULL) {
   1272 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1273 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1274 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1275 			m_freem(data->m);
   1276 		}
   1277 		if (data->map != NULL)
   1278 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1279 	}
   1280 }
   1281 
   1282 /*
   1283  * High-level hardware frobbing routines
   1284  */
   1285 
   1286 static void
   1287 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1288 {
   1289 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1290 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1291 }
   1292 
   1293 static int
   1294 iwm_check_rfkill(struct iwm_softc *sc)
   1295 {
   1296 	uint32_t v;
   1297 	int s;
   1298 	int rv;
   1299 
   1300 	s = splnet();
   1301 
   1302 	/*
   1303 	 * "documentation" is not really helpful here:
   1304 	 *  27:	HW_RF_KILL_SW
   1305 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1306 	 *
   1307 	 * But apparently when it's off, it's on ...
   1308 	 */
   1309 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1310 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1311 	if (rv) {
   1312 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1313 	} else {
   1314 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1315 	}
   1316 
   1317 	splx(s);
   1318 	return rv;
   1319 }
   1320 
   1321 static void
   1322 iwm_enable_interrupts(struct iwm_softc *sc)
   1323 {
   1324 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1325 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1326 }
   1327 
   1328 static void
   1329 iwm_restore_interrupts(struct iwm_softc *sc)
   1330 {
   1331 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1332 }
   1333 
   1334 static void
   1335 iwm_disable_interrupts(struct iwm_softc *sc)
   1336 {
   1337 	int s = splnet();
   1338 
   1339 	/* disable interrupts */
   1340 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1341 
   1342 	/* acknowledge all interrupts */
   1343 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1344 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1345 
   1346 	splx(s);
   1347 }
   1348 
   1349 static void
   1350 iwm_ict_reset(struct iwm_softc *sc)
   1351 {
   1352 	iwm_disable_interrupts(sc);
   1353 
   1354 	/* Reset ICT table. */
   1355 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1356 	sc->ict_cur = 0;
   1357 
   1358 	/* Set physical address of ICT table (4KB aligned). */
   1359 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1360 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1361 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1362 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1363 
   1364 	/* Switch to ICT interrupt mode in driver. */
   1365 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1366 
   1367 	/* Re-enable interrupts. */
   1368 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1369 	iwm_enable_interrupts(sc);
   1370 }
   1371 
   1372 #define IWM_HW_READY_TIMEOUT 50
   1373 static int
   1374 iwm_set_hw_ready(struct iwm_softc *sc)
   1375 {
   1376 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1377 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1378 
   1379         return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1380 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1381 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1382 	    IWM_HW_READY_TIMEOUT);
   1383 }
   1384 #undef IWM_HW_READY_TIMEOUT
   1385 
   1386 static int
   1387 iwm_prepare_card_hw(struct iwm_softc *sc)
   1388 {
   1389 	int rv = 0;
   1390 	int t = 0;
   1391 
   1392 	if (!iwm_set_hw_ready(sc))
   1393 		goto out;
   1394 
   1395 	/* If HW is not ready, prepare the conditions to check again */
   1396 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1397 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1398 
   1399 	do {
   1400 		if (iwm_set_hw_ready(sc))
   1401 			goto out;
   1402 		DELAY(200);
   1403 		t += 200;
   1404 	} while (t < 150000);
   1405 
   1406 	rv = ETIMEDOUT;
   1407 
   1408  out:
   1409 	return rv;
   1410 }
   1411 
   1412 static void
   1413 iwm_apm_config(struct iwm_softc *sc)
   1414 {
   1415 	pcireg_t reg;
   1416 
   1417 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1418 	    sc->sc_cap_off + PCIE_LCSR);
   1419 	if (reg & PCIE_LCSR_ASPM_L1) {
   1420 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1421 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1422 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1423 	} else {
   1424 		/* ... and "Enabling" here */
   1425 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1426 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1427 	}
   1428 }
   1429 
   1430 /*
   1431  * Start up NIC's basic functionality after it has been reset
   1432  * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
   1433  * NOTE:  This does not load uCode nor start the embedded processor
   1434  */
   1435 static int
   1436 iwm_apm_init(struct iwm_softc *sc)
   1437 {
   1438 	int error = 0;
   1439 
   1440 	DPRINTF(("iwm apm start\n"));
   1441 
   1442 	/* Disable L0S exit timer (platform NMI Work/Around) */
   1443 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1444 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1445 
   1446 	/*
   1447 	 * Disable L0s without affecting L1;
   1448 	 *  don't wait for ICH L0s (ICH bug W/A)
   1449 	 */
   1450 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1451 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1452 
   1453 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1454 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1455 
   1456 	/*
   1457 	 * Enable HAP INTA (interrupt from management bus) to
   1458 	 * wake device's PCI Express link L1a -> L0s
   1459 	 */
   1460 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1461 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1462 
   1463 	iwm_apm_config(sc);
   1464 
   1465 #if 0 /* not for 7k */
   1466 	/* Configure analog phase-lock-loop before activating to D0A */
   1467 	if (trans->cfg->base_params->pll_cfg_val)
   1468 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1469 		    trans->cfg->base_params->pll_cfg_val);
   1470 #endif
   1471 
   1472 	/*
   1473 	 * Set "initialization complete" bit to move adapter from
   1474 	 * D0U* --> D0A* (powered-up active) state.
   1475 	 */
   1476 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1477 
   1478 	/*
   1479 	 * Wait for clock stabilization; once stabilized, access to
   1480 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1481 	 * and accesses to uCode SRAM.
   1482 	 */
   1483 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1484 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1485 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1486 		aprint_error_dev(sc->sc_dev,
   1487 		    "timeout waiting for clock stabilization\n");
   1488 		goto out;
   1489 	}
   1490 
   1491 	/*
   1492 	 * This is a bit of an abuse - This is needed for 7260 / 3160
   1493 	 * only check host_interrupt_operation_mode even if this is
   1494 	 * not related to host_interrupt_operation_mode.
   1495 	 *
   1496 	 * Enable the oscillator to count wake up time for L1 exit. This
   1497 	 * consumes slightly more power (100uA) - but allows to be sure
   1498 	 * that we wake up from L1 on time.
   1499 	 *
   1500 	 * This looks weird: read twice the same register, discard the
   1501 	 * value, set a bit, and yet again, read that same register
   1502 	 * just to discard the value. But that's the way the hardware
   1503 	 * seems to like it.
   1504 	 */
   1505 	iwm_read_prph(sc, IWM_OSC_CLK);
   1506 	iwm_read_prph(sc, IWM_OSC_CLK);
   1507 	iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1508 	iwm_read_prph(sc, IWM_OSC_CLK);
   1509 	iwm_read_prph(sc, IWM_OSC_CLK);
   1510 
   1511 	/*
   1512 	 * Enable DMA clock and wait for it to stabilize.
   1513 	 *
   1514 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1515 	 * do not disable clocks.  This preserves any hardware bits already
   1516 	 * set by default in "CLK_CTRL_REG" after reset.
   1517 	 */
   1518 	iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1519 	//kpause("iwmapm", 0, mstohz(20), NULL);
   1520 	DELAY(20);
   1521 
   1522 	/* Disable L1-Active */
   1523 	iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1524 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1525 
   1526 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1527 	iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1528 	    IWM_APMG_RTC_INT_STT_RFKILL);
   1529 
   1530  out:
   1531 	if (error)
   1532 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
   1533 	return error;
   1534 }
   1535 
   1536 /* iwlwifi/pcie/trans.c */
   1537 static void
   1538 iwm_apm_stop(struct iwm_softc *sc)
   1539 {
   1540 	/* stop device's busmaster DMA activity */
   1541 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1542 
   1543 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1544 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1545 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1546 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1547 	DPRINTF(("iwm apm stop\n"));
   1548 }
   1549 
   1550 /* iwlwifi pcie/trans.c */
   1551 static int
   1552 iwm_start_hw(struct iwm_softc *sc)
   1553 {
   1554 	int error;
   1555 
   1556 	if ((error = iwm_prepare_card_hw(sc)) != 0)
   1557 		return error;
   1558 
   1559         /* Reset the entire device */
   1560 	IWM_WRITE(sc, IWM_CSR_RESET,
   1561 	    IWM_CSR_RESET_REG_FLAG_SW_RESET |
   1562 	    IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1563 	DELAY(10);
   1564 
   1565 	if ((error = iwm_apm_init(sc)) != 0)
   1566 		return error;
   1567 
   1568 	iwm_enable_rfkill_int(sc);
   1569 	iwm_check_rfkill(sc);
   1570 
   1571 	return 0;
   1572 }
   1573 
   1574 /* iwlwifi pcie/trans.c */
   1575 
   1576 static void
   1577 iwm_stop_device(struct iwm_softc *sc)
   1578 {
   1579 	int chnl, ntries;
   1580 	int qid;
   1581 
   1582 	/* tell the device to stop sending interrupts */
   1583 	iwm_disable_interrupts(sc);
   1584 
   1585 	/* device going down, Stop using ICT table */
   1586 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1587 
   1588 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
   1589 
   1590 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1591 
   1592 	/* Stop all DMA channels. */
   1593 	if (iwm_nic_lock(sc)) {
   1594 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1595 			IWM_WRITE(sc,
   1596 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1597 			for (ntries = 0; ntries < 200; ntries++) {
   1598 				uint32_t r;
   1599 
   1600 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1601 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1602 				    chnl))
   1603 					break;
   1604 				DELAY(20);
   1605 			}
   1606 		}
   1607 		iwm_nic_unlock(sc);
   1608 	}
   1609 
   1610 	/* Stop RX ring. */
   1611 	iwm_reset_rx_ring(sc, &sc->rxq);
   1612 
   1613 	/* Reset all TX rings. */
   1614 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1615 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1616 
   1617 	/*
   1618 	 * Power-down device's busmaster DMA clocks
   1619 	 */
   1620 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1621 	DELAY(5);
   1622 
   1623 	/* Make sure (redundant) we've released our request to stay awake */
   1624 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1625 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1626 
   1627 	/* Stop the device, and put it in low power state */
   1628 	iwm_apm_stop(sc);
   1629 
   1630         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
   1631          * Clean again the interrupt here
   1632          */
   1633 	iwm_disable_interrupts(sc);
   1634 	/* stop and reset the on-board processor */
   1635 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1636 
   1637 	/*
   1638 	 * Even if we stop the HW, we still want the RF kill
   1639 	 * interrupt
   1640 	 */
   1641 	iwm_enable_rfkill_int(sc);
   1642 	iwm_check_rfkill(sc);
   1643 }
   1644 
   1645 /* iwlwifi pcie/trans.c (always main power) */
   1646 static void
   1647 iwm_set_pwr(struct iwm_softc *sc)
   1648 {
   1649 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1650 	    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1651 }
   1652 
   1653 /* iwlwifi: mvm/ops.c */
   1654 static void
   1655 iwm_mvm_nic_config(struct iwm_softc *sc)
   1656 {
   1657 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1658 	uint32_t reg_val = 0;
   1659 
   1660 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1661 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1662 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1663 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1664 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1665 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1666 
   1667 	/* SKU control */
   1668 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1669 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1670 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1671 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1672 
   1673 	/* radio configuration */
   1674 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1675 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1676 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1677 
   1678 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1679 
   1680         DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1681                        radio_cfg_step, radio_cfg_dash));
   1682 
   1683 	/*
   1684 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1685 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1686 	 * to lose ownership and not being able to obtain it back.
   1687 	 */
   1688 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1689 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1690 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1691 }
   1692 
   1693 static int
   1694 iwm_nic_rx_init(struct iwm_softc *sc)
   1695 {
   1696 	if (!iwm_nic_lock(sc))
   1697 		return EBUSY;
   1698 
   1699 	/*
   1700 	 * Initialize RX ring.  This is from the iwn driver.
   1701 	 */
   1702 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1703 
   1704 	/* stop DMA */
   1705 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1706 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1707 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1708 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1709 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1710 
   1711 	/* Set physical address of RX ring (256-byte aligned). */
   1712 	IWM_WRITE(sc,
   1713 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1714 
   1715 	/* Set physical address of RX status (16-byte aligned). */
   1716 	IWM_WRITE(sc,
   1717 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1718 
   1719 	/* Enable RX. */
   1720 	/*
   1721 	 * Note: Linux driver also sets this:
   1722 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1723 	 *
   1724 	 * It causes weird behavior.  YMMV.
   1725 	 */
   1726 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1727 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1728 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1729 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1730 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1731 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1732 
   1733 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1734 	IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1735 
   1736 	/*
   1737 	 * Thus sayeth el jefe (iwlwifi) via a comment:
   1738 	 *
   1739 	 * This value should initially be 0 (before preparing any
   1740  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
   1741 	 */
   1742 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1743 
   1744 	iwm_nic_unlock(sc);
   1745 
   1746 	return 0;
   1747 }
   1748 
   1749 static int
   1750 iwm_nic_tx_init(struct iwm_softc *sc)
   1751 {
   1752 	int qid;
   1753 
   1754 	if (!iwm_nic_lock(sc))
   1755 		return EBUSY;
   1756 
   1757 	/* Deactivate TX scheduler. */
   1758 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1759 
   1760 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1761 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1762 
   1763 	/* Initialize TX rings. */
   1764 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1765 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1766 
   1767 		/* Set physical address of TX ring (256-byte aligned). */
   1768 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1769 		    txq->desc_dma.paddr >> 8);
   1770 		DPRINTF(("loading ring %d descriptors (%p) at %lx\n",
   1771 		    qid, txq->desc, txq->desc_dma.paddr >> 8));
   1772 	}
   1773 	iwm_nic_unlock(sc);
   1774 
   1775 	return 0;
   1776 }
   1777 
   1778 static int
   1779 iwm_nic_init(struct iwm_softc *sc)
   1780 {
   1781 	int error;
   1782 
   1783 	iwm_apm_init(sc);
   1784 	iwm_set_pwr(sc);
   1785 
   1786 	iwm_mvm_nic_config(sc);
   1787 
   1788 	if ((error = iwm_nic_rx_init(sc)) != 0)
   1789 		return error;
   1790 
   1791 	/*
   1792 	 * Ditto for TX, from iwn
   1793 	 */
   1794 	if ((error = iwm_nic_tx_init(sc)) != 0)
   1795 		return error;
   1796 
   1797 	DPRINTF(("shadow registers enabled\n"));
   1798 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1799 
   1800         return 0;
   1801 }
   1802 
   1803 enum iwm_mvm_tx_fifo {
   1804 	IWM_MVM_TX_FIFO_BK = 0,
   1805 	IWM_MVM_TX_FIFO_BE,
   1806 	IWM_MVM_TX_FIFO_VI,
   1807 	IWM_MVM_TX_FIFO_VO,
   1808 	IWM_MVM_TX_FIFO_MCAST = 5,
   1809 };
   1810 
   1811 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
   1812         IWM_MVM_TX_FIFO_VO,
   1813         IWM_MVM_TX_FIFO_VI,
   1814         IWM_MVM_TX_FIFO_BE,
   1815         IWM_MVM_TX_FIFO_BK,
   1816 };
   1817 
   1818 static void
   1819 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
   1820 {
   1821 	if (!iwm_nic_lock(sc)) {
   1822 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1823 		return; /* XXX return EBUSY */
   1824 	}
   1825 
   1826 	/* unactivate before configuration */
   1827 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1828 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1829 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1830 
   1831 	if (qid != IWM_MVM_CMD_QUEUE) {
   1832 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
   1833 	}
   1834 
   1835 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1836 
   1837 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1838 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1839 
   1840 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1841 	/* Set scheduler window size and frame limit. */
   1842 	iwm_write_mem32(sc,
   1843 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1844 	    sizeof(uint32_t),
   1845 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1846 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1847 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1848 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1849 
   1850 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1851 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1852 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1853 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1854 	    IWM_SCD_QUEUE_STTS_REG_MSK);
   1855 
   1856 	iwm_nic_unlock(sc);
   1857 
   1858 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1859 }
   1860 
   1861 static int
   1862 iwm_post_alive(struct iwm_softc *sc)
   1863 {
   1864 	int nwords;
   1865 	int error, chnl;
   1866 
   1867 	if (!iwm_nic_lock(sc))
   1868 		return EBUSY;
   1869 
   1870 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
   1871 		DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
   1872 		error = EINVAL;
   1873 		goto out;
   1874 	}
   1875 
   1876 	iwm_ict_reset(sc);
   1877 
   1878 	/* Clear TX scheduler state in SRAM. */
   1879 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1880 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1881 	    / sizeof(uint32_t);
   1882 	error = iwm_write_mem(sc,
   1883 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1884 	    NULL, nwords);
   1885 	if (error)
   1886 		goto out;
   1887 
   1888 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1889 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1890 
   1891 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1892 
   1893 	/* enable command channel */
   1894 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
   1895 
   1896 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1897 
   1898 	/* Enable DMA channels. */
   1899 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1900 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1901 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1902 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1903 	}
   1904 
   1905 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1906 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1907 
   1908         /* Enable L1-Active */
   1909 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1910 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1911 
   1912  out:
   1913  	iwm_nic_unlock(sc);
   1914 	return error;
   1915 }
   1916 
   1917 /*
   1918  * PHY db
   1919  * iwlwifi/iwl-phy-db.c
   1920  */
   1921 
   1922 /*
   1923  * BEGIN iwl-phy-db.c
   1924  */
   1925 
   1926 enum iwm_phy_db_section_type {
   1927 	IWM_PHY_DB_CFG = 1,
   1928 	IWM_PHY_DB_CALIB_NCH,
   1929 	IWM_PHY_DB_UNUSED,
   1930 	IWM_PHY_DB_CALIB_CHG_PAPD,
   1931 	IWM_PHY_DB_CALIB_CHG_TXP,
   1932 	IWM_PHY_DB_MAX
   1933 };
   1934 
   1935 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
   1936 
   1937 /*
   1938  * phy db - configure operational ucode
   1939  */
   1940 struct iwm_phy_db_cmd {
   1941 	uint16_t type;
   1942 	uint16_t length;
   1943 	uint8_t data[];
   1944 } __packed;
   1945 
   1946 /* for parsing of tx power channel group data that comes from the firmware*/
   1947 struct iwm_phy_db_chg_txp {
   1948 	uint32_t space;
   1949 	uint16_t max_channel_idx;
   1950 } __packed;
   1951 
   1952 /*
   1953  * phy db - Receive phy db chunk after calibrations
   1954  */
   1955 struct iwm_calib_res_notif_phy_db {
   1956 	uint16_t type;
   1957 	uint16_t length;
   1958 	uint8_t data[];
   1959 } __packed;
   1960 
   1961 /*
   1962  * get phy db section: returns a pointer to a phy db section specified by
   1963  * type and channel group id.
   1964  */
   1965 static struct iwm_phy_db_entry *
   1966 iwm_phy_db_get_section(struct iwm_softc *sc,
   1967 	enum iwm_phy_db_section_type type, uint16_t chg_id)
   1968 {
   1969 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   1970 
   1971 	if (type >= IWM_PHY_DB_MAX)
   1972 		return NULL;
   1973 
   1974 	switch (type) {
   1975 	case IWM_PHY_DB_CFG:
   1976 		return &phy_db->cfg;
   1977 	case IWM_PHY_DB_CALIB_NCH:
   1978 		return &phy_db->calib_nch;
   1979 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   1980 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   1981 			return NULL;
   1982 		return &phy_db->calib_ch_group_papd[chg_id];
   1983 	case IWM_PHY_DB_CALIB_CHG_TXP:
   1984 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   1985 			return NULL;
   1986 		return &phy_db->calib_ch_group_txp[chg_id];
   1987 	default:
   1988 		return NULL;
   1989 	}
   1990 	return NULL;
   1991 }
   1992 
   1993 static int
   1994 iwm_phy_db_set_section(struct iwm_softc *sc,
   1995     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   1996 {
   1997 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   1998 	struct iwm_phy_db_entry *entry;
   1999 	uint16_t chg_id = 0;
   2000 
   2001 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2002 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2003 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2004 
   2005 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2006 	if (!entry)
   2007 		return EINVAL;
   2008 
   2009 	if (entry->data)
   2010 		kmem_intr_free(entry->data, entry->size);
   2011 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2012 	if (!entry->data) {
   2013 		entry->size = 0;
   2014 		return ENOMEM;
   2015 	}
   2016 	memcpy(entry->data, phy_db_notif->data, size);
   2017 	entry->size = size;
   2018 
   2019 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2020 	    __func__, __LINE__, type, size, entry->data));
   2021 
   2022 	return 0;
   2023 }
   2024 
   2025 static int
   2026 iwm_is_valid_channel(uint16_t ch_id)
   2027 {
   2028 	if (ch_id <= 14 ||
   2029 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2030 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2031 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2032 		return 1;
   2033 	return 0;
   2034 }
   2035 
   2036 static uint8_t
   2037 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2038 {
   2039 	if (!iwm_is_valid_channel(ch_id))
   2040 		return 0xff;
   2041 
   2042 	if (ch_id <= 14)
   2043 		return ch_id - 1;
   2044 	if (ch_id <= 64)
   2045 		return (ch_id + 20) / 4;
   2046 	if (ch_id <= 140)
   2047 		return (ch_id - 12) / 4;
   2048 	return (ch_id - 13) / 4;
   2049 }
   2050 
   2051 
   2052 static uint16_t
   2053 iwm_channel_id_to_papd(uint16_t ch_id)
   2054 {
   2055 	if (!iwm_is_valid_channel(ch_id))
   2056 		return 0xff;
   2057 
   2058 	if (1 <= ch_id && ch_id <= 14)
   2059 		return 0;
   2060 	if (36 <= ch_id && ch_id <= 64)
   2061 		return 1;
   2062 	if (100 <= ch_id && ch_id <= 140)
   2063 		return 2;
   2064 	return 3;
   2065 }
   2066 
   2067 static uint16_t
   2068 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2069 {
   2070 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2071 	struct iwm_phy_db_chg_txp *txp_chg;
   2072 	int i;
   2073 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2074 
   2075 	if (ch_index == 0xff)
   2076 		return 0xff;
   2077 
   2078 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2079 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2080 		if (!txp_chg)
   2081 			return 0xff;
   2082 		/*
   2083 		 * Looking for the first channel group that its max channel is
   2084 		 * higher then wanted channel.
   2085 		 */
   2086 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2087 			return i;
   2088 	}
   2089 	return 0xff;
   2090 }
   2091 
   2092 static int
   2093 iwm_phy_db_get_section_data(struct iwm_softc *sc,
   2094 	uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
   2095 {
   2096 	struct iwm_phy_db_entry *entry;
   2097 	uint16_t ch_group_id = 0;
   2098 
   2099 	/* find wanted channel group */
   2100 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2101 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2102 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2103 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2104 
   2105 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2106 	if (!entry)
   2107 		return EINVAL;
   2108 
   2109 	*data = entry->data;
   2110 	*size = entry->size;
   2111 
   2112 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2113 		       __func__, __LINE__, type, *size));
   2114 
   2115 	return 0;
   2116 }
   2117 
   2118 static int
   2119 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
   2120 	uint16_t length, void *data)
   2121 {
   2122 	struct iwm_phy_db_cmd phy_db_cmd;
   2123 	struct iwm_host_cmd cmd = {
   2124 		.id = IWM_PHY_DB_CMD,
   2125 		.flags = IWM_CMD_SYNC,
   2126 	};
   2127 
   2128 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2129 	    type, length));
   2130 
   2131 	/* Set phy db cmd variables */
   2132 	phy_db_cmd.type = le16toh(type);
   2133 	phy_db_cmd.length = le16toh(length);
   2134 
   2135 	/* Set hcmd variables */
   2136 	cmd.data[0] = &phy_db_cmd;
   2137 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2138 	cmd.data[1] = data;
   2139 	cmd.len[1] = length;
   2140 	cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
   2141 
   2142 	return iwm_send_cmd(sc, &cmd);
   2143 }
   2144 
   2145 static int
   2146 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2147 	enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2148 {
   2149 	uint16_t i;
   2150 	int err;
   2151 	struct iwm_phy_db_entry *entry;
   2152 
   2153 	/* Send all the channel-specific groups to operational fw */
   2154 	for (i = 0; i < max_ch_groups; i++) {
   2155 		entry = iwm_phy_db_get_section(sc, type, i);
   2156 		if (!entry)
   2157 			return EINVAL;
   2158 
   2159 		if (!entry->size)
   2160 			continue;
   2161 
   2162 		/* Send the requested PHY DB section */
   2163 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2164 		if (err) {
   2165 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2166 			    "err %d\n", DEVNAME(sc), type, i, err));
   2167 			return err;
   2168 		}
   2169 
   2170 		DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
   2171 	}
   2172 
   2173 	return 0;
   2174 }
   2175 
   2176 static int
   2177 iwm_send_phy_db_data(struct iwm_softc *sc)
   2178 {
   2179 	uint8_t *data = NULL;
   2180 	uint16_t size = 0;
   2181 	int err;
   2182 
   2183 	DPRINTF(("Sending phy db data and configuration to runtime image\n"));
   2184 
   2185 	/* Send PHY DB CFG section */
   2186 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2187 	if (err) {
   2188 		DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
   2189 		    DEVNAME(sc), err));
   2190 		return err;
   2191 	}
   2192 
   2193 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2194 	if (err) {
   2195 		DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
   2196 		    DEVNAME(sc), err));
   2197 		return err;
   2198 	}
   2199 
   2200 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2201 	    &data, &size, 0);
   2202 	if (err) {
   2203 		DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
   2204 		    "%d\n", DEVNAME(sc), err));
   2205 		return err;
   2206 	}
   2207 
   2208 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2209 	if (err) {
   2210 		DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
   2211 		    "sect, %d\n", DEVNAME(sc), err));
   2212 		return err;
   2213 	}
   2214 
   2215 	/* Send all the TXP channel specific data */
   2216 	err = iwm_phy_db_send_all_channel_groups(sc,
   2217 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2218 	if (err) {
   2219 		DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
   2220 		    DEVNAME(sc), err));
   2221 		return err;
   2222 	}
   2223 
   2224 	/* Send all the TXP channel specific data */
   2225 	err = iwm_phy_db_send_all_channel_groups(sc,
   2226 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2227 	if (err) {
   2228 		DPRINTF(("%s: Cannot send channel specific TX power groups, "
   2229 		    "%d\n", DEVNAME(sc), err));
   2230 		return err;
   2231 	}
   2232 
   2233 	DPRINTF(("Finished sending phy db non channel data\n"));
   2234 	return 0;
   2235 }
   2236 
   2237 /*
   2238  * END iwl-phy-db.c
   2239  */
   2240 
   2241 /*
   2242  * BEGIN iwlwifi/mvm/time-event.c
   2243  */
   2244 
   2245 /*
   2246  * For the high priority TE use a time event type that has similar priority to
   2247  * the FW's action scan priority.
   2248  */
   2249 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2250 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2251 
   2252 /* used to convert from time event API v2 to v1 */
   2253 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2254 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2255 static inline uint16_t
   2256 iwm_te_v2_get_notify(uint16_t policy)
   2257 {
   2258 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2259 }
   2260 
   2261 static inline uint16_t
   2262 iwm_te_v2_get_dep_policy(uint16_t policy)
   2263 {
   2264 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2265 		IWM_TE_V2_PLACEMENT_POS;
   2266 }
   2267 
   2268 static inline uint16_t
   2269 iwm_te_v2_get_absence(uint16_t policy)
   2270 {
   2271 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2272 }
   2273 
   2274 static void
   2275 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2276 	struct iwm_time_event_cmd_v1 *cmd_v1)
   2277 {
   2278 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2279 	cmd_v1->action = cmd_v2->action;
   2280 	cmd_v1->id = cmd_v2->id;
   2281 	cmd_v1->apply_time = cmd_v2->apply_time;
   2282 	cmd_v1->max_delay = cmd_v2->max_delay;
   2283 	cmd_v1->depends_on = cmd_v2->depends_on;
   2284 	cmd_v1->interval = cmd_v2->interval;
   2285 	cmd_v1->duration = cmd_v2->duration;
   2286 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2287 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2288 	else
   2289 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2290 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2291 	cmd_v1->interval_reciprocal = 0; /* unused */
   2292 
   2293 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2294 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2295 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2296 }
   2297 
   2298 static int
   2299 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
   2300 	const struct iwm_time_event_cmd_v2 *cmd)
   2301 {
   2302 	struct iwm_time_event_cmd_v1 cmd_v1;
   2303 
   2304 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2305 		return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
   2306 		    IWM_CMD_SYNC, sizeof(*cmd), cmd);
   2307 
   2308 	iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
   2309 	return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
   2310 	    sizeof(cmd_v1), &cmd_v1);
   2311 }
   2312 
   2313 static int
   2314 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
   2315 	void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
   2316 {
   2317 	int ret;
   2318 
   2319 	DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
   2320 
   2321 	ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
   2322 	if (ret) {
   2323 		DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
   2324 		    DEVNAME(sc), ret));
   2325 	}
   2326 
   2327 	return ret;
   2328 }
   2329 
   2330 static void
   2331 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2332 	uint32_t duration, uint32_t min_duration, uint32_t max_delay)
   2333 {
   2334 	struct iwm_time_event_cmd_v2 time_cmd;
   2335 
   2336 	memset(&time_cmd, 0, sizeof(time_cmd));
   2337 
   2338 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2339 	time_cmd.id_and_color =
   2340 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2341 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2342 
   2343 	time_cmd.apply_time = htole32(iwm_read_prph(sc,
   2344 	    IWM_DEVICE_SYSTEM_TIME_REG));
   2345 
   2346 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2347 	time_cmd.max_delay = htole32(max_delay);
   2348 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2349 	time_cmd.interval = htole32(1);
   2350 	time_cmd.duration = htole32(duration);
   2351 	time_cmd.repeat = 1;
   2352 	time_cmd.policy
   2353 	    = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2354 	        IWM_TE_V2_NOTIF_HOST_EVENT_END);
   2355 
   2356 	iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
   2357 }
   2358 
   2359 /*
   2360  * END iwlwifi/mvm/time-event.c
   2361  */
   2362 
   2363 /*
   2364  * NVM read access and content parsing.  We do not support
   2365  * external NVM or writing NVM.
   2366  * iwlwifi/mvm/nvm.c
   2367  */
   2368 
   2369 /* list of NVM sections we are allowed/need to read */
   2370 static const int nvm_to_read[] = {
   2371 	IWM_NVM_SECTION_TYPE_HW,
   2372 	IWM_NVM_SECTION_TYPE_SW,
   2373 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2374 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2375 };
   2376 
   2377 /* Default NVM size to read */
   2378 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
   2379 #define IWM_MAX_NVM_SECTION_SIZE 7000
   2380 
   2381 #define IWM_NVM_WRITE_OPCODE 1
   2382 #define IWM_NVM_READ_OPCODE 0
   2383 
   2384 static int
   2385 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
   2386 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
   2387 {
   2388 	offset = 0;
   2389 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2390 		.offset = htole16(offset),
   2391 		.length = htole16(length),
   2392 		.type = htole16(section),
   2393 		.op_code = IWM_NVM_READ_OPCODE,
   2394 	};
   2395 	struct iwm_nvm_access_resp *nvm_resp;
   2396 	struct iwm_rx_packet *pkt;
   2397 	struct iwm_host_cmd cmd = {
   2398 		.id = IWM_NVM_ACCESS_CMD,
   2399 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
   2400 		    IWM_CMD_SEND_IN_RFKILL,
   2401 		.data = { &nvm_access_cmd, },
   2402 	};
   2403 	int ret, bytes_read, offset_read;
   2404 	uint8_t *resp_data;
   2405 
   2406 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2407 
   2408 	ret = iwm_send_cmd(sc, &cmd);
   2409 	if (ret)
   2410 		return ret;
   2411 
   2412 	pkt = cmd.resp_pkt;
   2413 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2414 		DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
   2415 		    DEVNAME(sc), pkt->hdr.flags));
   2416 		ret = EIO;
   2417 		goto exit;
   2418 	}
   2419 
   2420 	/* Extract NVM response */
   2421 	nvm_resp = (void *)pkt->data;
   2422 
   2423 	ret = le16toh(nvm_resp->status);
   2424 	bytes_read = le16toh(nvm_resp->length);
   2425 	offset_read = le16toh(nvm_resp->offset);
   2426 	resp_data = nvm_resp->data;
   2427 	if (ret) {
   2428 		DPRINTF(("%s: NVM access command failed with status %d\n",
   2429 		    DEVNAME(sc), ret));
   2430 		ret = EINVAL;
   2431 		goto exit;
   2432 	}
   2433 
   2434 	if (offset_read != offset) {
   2435 		DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
   2436 		    DEVNAME(sc), offset_read));
   2437 		ret = EINVAL;
   2438 		goto exit;
   2439 	}
   2440 
   2441 	memcpy(data + offset, resp_data, bytes_read);
   2442 	*len = bytes_read;
   2443 
   2444  exit:
   2445 	iwm_free_resp(sc, &cmd);
   2446 	return ret;
   2447 }
   2448 
   2449 /*
   2450  * Reads an NVM section completely.
   2451  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2452  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2453  * by uCode, we need to manually check in this case that we don't
   2454  * overflow and try to read more than the EEPROM size.
   2455  * For 7000 family NICs, we supply the maximal size we can read, and
   2456  * the uCode fills the response with as much data as we can,
   2457  * without overflowing, so no check is needed.
   2458  */
   2459 static int
   2460 iwm_nvm_read_section(struct iwm_softc *sc,
   2461 	uint16_t section, uint8_t *data, uint16_t *len)
   2462 {
   2463 	uint16_t length, seglen;
   2464 	int error;
   2465 
   2466 	/* Set nvm section read length */
   2467 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2468 	*len = 0;
   2469 
   2470 	/* Read the NVM until exhausted (reading less than requested) */
   2471 	while (seglen == length) {
   2472 		error = iwm_nvm_read_chunk(sc,
   2473 		    section, *len, length, data, &seglen);
   2474 		if (error) {
   2475 			aprint_error_dev(sc->sc_dev,
   2476 			    "Cannot read NVM from section %d offset %d, "
   2477 			    "length %d\n", section, *len, length);
   2478 			return error;
   2479 		}
   2480 		*len += seglen;
   2481 	}
   2482 
   2483 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2484 	return 0;
   2485 }
   2486 
   2487 /*
   2488  * BEGIN IWM_NVM_PARSE
   2489  */
   2490 
   2491 /* iwlwifi/iwl-nvm-parse.c */
   2492 
   2493 /* NVM offsets (in words) definitions */
   2494 enum wkp_nvm_offsets {
   2495 	/* NVM HW-Section offset (in words) definitions */
   2496 	IWM_HW_ADDR = 0x15,
   2497 
   2498 /* NVM SW-Section offset (in words) definitions */
   2499 	IWM_NVM_SW_SECTION = 0x1C0,
   2500 	IWM_NVM_VERSION = 0,
   2501 	IWM_RADIO_CFG = 1,
   2502 	IWM_SKU = 2,
   2503 	IWM_N_HW_ADDRS = 3,
   2504 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
   2505 
   2506 /* NVM calibration section offset (in words) definitions */
   2507 	IWM_NVM_CALIB_SECTION = 0x2B8,
   2508 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
   2509 };
   2510 
   2511 /* SKU Capabilities (actual values from NVM definition) */
   2512 enum nvm_sku_bits {
   2513 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
   2514 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
   2515 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
   2516 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
   2517 };
   2518 
   2519 /* radio config bits (actual values from NVM definition) */
   2520 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
   2521 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
   2522 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
   2523 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
   2524 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
   2525 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
   2526 
   2527 #define DEFAULT_MAX_TX_POWER 16
   2528 
   2529 /**
   2530  * enum iwm_nvm_channel_flags - channel flags in NVM
   2531  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
   2532  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
   2533  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
   2534  * @IWM_NVM_CHANNEL_RADAR: radar detection required
   2535  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
   2536  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
   2537  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
   2538  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
   2539  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
   2540  */
   2541 enum iwm_nvm_channel_flags {
   2542 	IWM_NVM_CHANNEL_VALID = (1 << 0),
   2543 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
   2544 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
   2545 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
   2546 	IWM_NVM_CHANNEL_DFS = (1 << 7),
   2547 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
   2548 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
   2549 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
   2550 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
   2551 };
   2552 
   2553 static void
   2554 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
   2555 {
   2556 	struct ieee80211com *ic = &sc->sc_ic;
   2557 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2558 	int ch_idx;
   2559 	struct ieee80211_channel *channel;
   2560 	uint16_t ch_flags;
   2561 	int is_5ghz;
   2562 	int flags, hw_value;
   2563 
   2564 	for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
   2565 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2566 
   2567 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2568 		    !data->sku_cap_band_52GHz_enable)
   2569 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2570 
   2571 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2572 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2573 			    iwm_nvm_channels[ch_idx],
   2574 			    ch_flags,
   2575 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2576 			    "5.2" : "2.4"));
   2577 			continue;
   2578 		}
   2579 
   2580 		hw_value = iwm_nvm_channels[ch_idx];
   2581 		channel = &ic->ic_channels[hw_value];
   2582 
   2583 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2584 		if (!is_5ghz) {
   2585 			flags = IEEE80211_CHAN_2GHZ;
   2586 			channel->ic_flags
   2587 			    = IEEE80211_CHAN_CCK
   2588 			    | IEEE80211_CHAN_OFDM
   2589 			    | IEEE80211_CHAN_DYN
   2590 			    | IEEE80211_CHAN_2GHZ;
   2591 		} else {
   2592 			flags = IEEE80211_CHAN_5GHZ;
   2593 			channel->ic_flags =
   2594 			    IEEE80211_CHAN_A;
   2595 		}
   2596 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2597 
   2598 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2599 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2600 	}
   2601 }
   2602 
   2603 static int
   2604 iwm_parse_nvm_data(struct iwm_softc *sc,
   2605 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
   2606 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
   2607 {
   2608 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2609 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2610 	uint16_t radio_cfg, sku;
   2611 
   2612 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2613 
   2614 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2615 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2616 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2617 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2618 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2619 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
   2620 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
   2621 
   2622 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2623 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2624 #ifndef IWM_NO_5GHZ
   2625 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2626 #else
   2627 	data->sku_cap_band_52GHz_enable = 0;
   2628 #endif
   2629 	data->sku_cap_11n_enable = 0;
   2630 
   2631 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
   2632 		DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
   2633 		    data->valid_tx_ant, data->valid_rx_ant));
   2634 		return EINVAL;
   2635 	}
   2636 
   2637 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2638 
   2639 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
   2640 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
   2641 
   2642 	/* The byte order is little endian 16 bit, meaning 214365 */
   2643 	memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2644 	data->hw_addr[0] = hw_addr[1];
   2645 	data->hw_addr[1] = hw_addr[0];
   2646 	data->hw_addr[2] = hw_addr[3];
   2647 	data->hw_addr[3] = hw_addr[2];
   2648 	data->hw_addr[4] = hw_addr[5];
   2649 	data->hw_addr[5] = hw_addr[4];
   2650 
   2651 	iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
   2652 	data->calib_version = 255;   /* TODO:
   2653 					this value will prevent some checks from
   2654 					failing, we need to check if this
   2655 					field is still needed, and if it does,
   2656 					where is it in the NVM */
   2657 
   2658 	return 0;
   2659 }
   2660 
   2661 /*
   2662  * END NVM PARSE
   2663  */
   2664 
   2665 struct iwm_nvm_section {
   2666         uint16_t length;
   2667         const uint8_t *data;
   2668 };
   2669 
   2670 #define IWM_FW_VALID_TX_ANT(sc) \
   2671     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
   2672     >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
   2673 #define IWM_FW_VALID_RX_ANT(sc) \
   2674     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
   2675     >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
   2676 
   2677 static int
   2678 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2679 {
   2680 	const uint16_t *hw, *sw, *calib;
   2681 
   2682 	/* Checking for required sections */
   2683 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2684 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2685 		DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
   2686 		return ENOENT;
   2687 	}
   2688 
   2689 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
   2690 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2691 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2692 	return iwm_parse_nvm_data(sc, hw, sw, calib,
   2693 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
   2694 }
   2695 
   2696 static int
   2697 iwm_nvm_init(struct iwm_softc *sc)
   2698 {
   2699 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2700 	int i, section, error;
   2701 	uint16_t len;
   2702 	uint8_t *nvm_buffer, *temp;
   2703 
   2704 	/* Read From FW NVM */
   2705 	DPRINTF(("Read NVM\n"));
   2706 
   2707 	/* TODO: find correct NVM max size for a section */
   2708 	nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
   2709 	for (i = 0; i < __arraycount(nvm_to_read); i++) {
   2710 		section = nvm_to_read[i];
   2711 		KASSERT(section <= __arraycount(nvm_sections));
   2712 
   2713 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
   2714 		if (error)
   2715 			break;
   2716 
   2717 		temp = kmem_alloc(len, KM_SLEEP);
   2718 		memcpy(temp, nvm_buffer, len);
   2719 		nvm_sections[section].data = temp;
   2720 		nvm_sections[section].length = len;
   2721 	}
   2722 	kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
   2723 	if (error)
   2724 		return error;
   2725 
   2726 	return iwm_parse_nvm_sections(sc, nvm_sections);
   2727 }
   2728 
   2729 /*
   2730  * Firmware loading gunk.  This is kind of a weird hybrid between the
   2731  * iwn driver and the Linux iwlwifi driver.
   2732  */
   2733 
   2734 static int
   2735 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2736 	const uint8_t *section, uint32_t byte_cnt)
   2737 {
   2738 	struct iwm_dma_info *dma = &sc->fw_dma;
   2739 	int error;
   2740 
   2741 	/* Copy firmware section into pre-allocated DMA-safe memory. */
   2742 	memcpy(dma->vaddr, section, byte_cnt);
   2743 	bus_dmamap_sync(sc->sc_dmat,
   2744 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
   2745 
   2746 	if (!iwm_nic_lock(sc))
   2747 		return EBUSY;
   2748 
   2749 	sc->sc_fw_chunk_done = 0;
   2750 
   2751 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2752 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2753 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2754 	    dst_addr);
   2755 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2756 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2757 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2758 	    (iwm_get_dma_hi_addr(dma->paddr)
   2759 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2760 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2761 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2762 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2763 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2764 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2765 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   2766 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   2767 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   2768 
   2769 	iwm_nic_unlock(sc);
   2770 
   2771 	/* wait 1s for this segment to load */
   2772 	while (!sc->sc_fw_chunk_done)
   2773 		if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
   2774 			break;
   2775 
   2776         return error;
   2777 }
   2778 
   2779 static int
   2780 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2781 {
   2782 	struct iwm_fw_sects *fws;
   2783 	int error, i, w;
   2784 	void *data;
   2785 	uint32_t dlen;
   2786 	uint32_t offset;
   2787 
   2788 	sc->sc_uc.uc_intr = 0;
   2789 
   2790 	fws = &sc->sc_fw.fw_sects[ucode_type];
   2791 	for (i = 0; i < fws->fw_count; i++) {
   2792 		data = fws->fw_sect[i].fws_data;
   2793 		dlen = fws->fw_sect[i].fws_len;
   2794 		offset = fws->fw_sect[i].fws_devoff;
   2795 		DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
   2796 		    ucode_type, offset, dlen));
   2797 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
   2798 		if (error) {
   2799 			DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
   2800 			    "returned error %02d\n", i, fws->fw_count, error));
   2801 			return error;
   2802 		}
   2803 	}
   2804 
   2805 	/* wait for the firmware to load */
   2806 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   2807 
   2808 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
   2809 		error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
   2810 	}
   2811 
   2812 	return error;
   2813 }
   2814 
   2815 /* iwlwifi: pcie/trans.c */
   2816 static int
   2817 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2818 {
   2819 	int error;
   2820 
   2821 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2822 
   2823 	if ((error = iwm_nic_init(sc)) != 0) {
   2824 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   2825 		return error;
   2826 	}
   2827 
   2828 	/* make sure rfkill handshake bits are cleared */
   2829 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2830 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   2831 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   2832 
   2833 	/* clear (again), then enable host interrupts */
   2834 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2835 	iwm_enable_interrupts(sc);
   2836 
   2837 	/* really make sure rfkill handshake bits are cleared */
   2838 	/* maybe we should write a few times more?  just to make sure */
   2839 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2840 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2841 
   2842 	/* Load the given image to the HW */
   2843 	return iwm_load_firmware(sc, ucode_type);
   2844 }
   2845 
   2846 static int
   2847 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
   2848 {
   2849 	return iwm_post_alive(sc);
   2850 }
   2851 
   2852 static int
   2853 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   2854 {
   2855 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   2856 		.valid = htole32(valid_tx_ant),
   2857 	};
   2858 
   2859 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
   2860 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
   2861 }
   2862 
   2863 /* iwlwifi: mvm/fw.c */
   2864 static int
   2865 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   2866 {
   2867 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   2868 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   2869 
   2870 	/* Set parameters */
   2871 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   2872 	phy_cfg_cmd.calib_control.event_trigger =
   2873 	    sc->sc_default_calib[ucode_type].event_trigger;
   2874 	phy_cfg_cmd.calib_control.flow_trigger =
   2875 	    sc->sc_default_calib[ucode_type].flow_trigger;
   2876 
   2877 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   2878 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
   2879 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   2880 }
   2881 
   2882 static int
   2883 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
   2884 	enum iwm_ucode_type ucode_type)
   2885 {
   2886 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   2887 	int error;
   2888 
   2889 	if ((error = iwm_read_firmware(sc)) != 0)
   2890 		return error;
   2891 
   2892 	sc->sc_uc_current = ucode_type;
   2893         error = iwm_start_fw(sc, ucode_type);
   2894 	if (error) {
   2895 		sc->sc_uc_current = old_type;
   2896 		return error;
   2897 	}
   2898 
   2899 	return iwm_fw_alive(sc, sc->sched_base);
   2900 }
   2901 
   2902 /*
   2903  * mvm misc bits
   2904  */
   2905 
   2906 /*
   2907  * follows iwlwifi/fw.c
   2908  */
   2909 static int
   2910 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   2911 {
   2912 	int error;
   2913 
   2914 	/* do not operate with rfkill switch turned on */
   2915 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   2916 		aprint_error_dev(sc->sc_dev,
   2917 		    "radio is disabled by hardware switch\n");
   2918 		return EPERM;
   2919 	}
   2920 
   2921 	sc->sc_init_complete = 0;
   2922         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
   2923 	    IWM_UCODE_TYPE_INIT)) != 0)
   2924 		return error;
   2925 
   2926 	if (justnvm) {
   2927 		if ((error = iwm_nvm_init(sc)) != 0) {
   2928 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   2929 			return error;
   2930 		}
   2931 		memcpy(&sc->sc_ic.ic_myaddr,
   2932 		    &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
   2933 
   2934 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
   2935 		    + sc->sc_capa_max_probe_len
   2936 		    + IWM_MAX_NUM_SCAN_CHANNELS
   2937 		    * sizeof(struct iwm_scan_channel);
   2938 		sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
   2939 
   2940 		return 0;
   2941 	}
   2942 
   2943 	/* Send TX valid antennas before triggering calibrations */
   2944 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   2945 		return error;
   2946 
   2947 	/*
   2948 	* Send phy configurations command to init uCode
   2949 	* to start the 16.0 uCode init image internal calibrations.
   2950 	*/
   2951 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
   2952 		DPRINTF(("%s: failed to run internal calibration: %d\n",
   2953 		    DEVNAME(sc), error));
   2954 		return error;
   2955 	}
   2956 
   2957 	/*
   2958 	 * Nothing to do but wait for the init complete notification
   2959 	 * from the firmware
   2960 	 */
   2961 	while (!sc->sc_init_complete)
   2962 		if ((error = tsleep(&sc->sc_init_complete,
   2963 		    0, "iwminit", 2*hz)) != 0)
   2964 			break;
   2965 
   2966 	return error;
   2967 }
   2968 
   2969 /*
   2970  * receive side
   2971  */
   2972 
   2973 /* (re)stock rx ring, called at init-time and at runtime */
   2974 static int
   2975 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   2976 {
   2977 	struct iwm_rx_ring *ring = &sc->rxq;
   2978 	struct iwm_rx_data *data = &ring->data[idx];
   2979 	struct mbuf *m;
   2980 	int error;
   2981 	int fatal = 0;
   2982 
   2983 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   2984 	if (m == NULL)
   2985 		return ENOBUFS;
   2986 
   2987 	if (size <= MCLBYTES) {
   2988 		MCLGET(m, M_DONTWAIT);
   2989 	} else {
   2990 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   2991 	}
   2992 	if ((m->m_flags & M_EXT) == 0) {
   2993 		m_freem(m);
   2994 		return ENOBUFS;
   2995 	}
   2996 
   2997 	if (data->m != NULL) {
   2998 		bus_dmamap_unload(sc->sc_dmat, data->map);
   2999 		fatal = 1;
   3000 	}
   3001 
   3002 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3003 	if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3004 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
   3005 		/* XXX */
   3006 		if (fatal)
   3007 			panic("iwm: could not load RX mbuf");
   3008 		m_freem(m);
   3009 		return error;
   3010 	}
   3011 	data->m = m;
   3012 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   3013 
   3014         /* Update RX descriptor. */
   3015 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   3016 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3017 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   3018 
   3019 	return 0;
   3020 }
   3021 
   3022 /* iwlwifi: mvm/rx.c */
   3023 #define IWM_RSSI_OFFSET 50
   3024 static int
   3025 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3026 {
   3027 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   3028 	uint32_t agc_a, agc_b;
   3029 	uint32_t val;
   3030 
   3031 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3032 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3033 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3034 
   3035 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3036 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3037 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3038 
   3039 	/*
   3040 	 * dBm = rssi dB - agc dB - constant.
   3041 	 * Higher AGC (higher radio gain) means lower signal.
   3042 	 */
   3043 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3044 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3045 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3046 
   3047 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3048 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3049 
   3050 	return max_rssi_dbm;
   3051 }
   3052 
   3053 /* iwlwifi: mvm/rx.c */
   3054 /*
   3055  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
   3056  * values are reported by the fw as positive values - need to negate
   3057  * to obtain their dBM.  Account for missing antennas by replacing 0
   3058  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3059  */
   3060 static int
   3061 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
   3062     struct iwm_rx_phy_info *phy_info)
   3063 {
   3064 	int energy_a, energy_b, energy_c, max_energy;
   3065 	uint32_t val;
   3066 
   3067 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3068 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3069 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3070 	energy_a = energy_a ? -energy_a : -256;
   3071 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3072 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3073 	energy_b = energy_b ? -energy_b : -256;
   3074 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3075 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3076 	energy_c = energy_c ? -energy_c : -256;
   3077 	max_energy = MAX(energy_a, energy_b);
   3078 	max_energy = MAX(max_energy, energy_c);
   3079 
   3080 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   3081 	    energy_a, energy_b, energy_c, max_energy));
   3082 
   3083 	return max_energy;
   3084 }
   3085 
   3086 static void
   3087 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
   3088 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3089 {
   3090 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3091 
   3092 	DPRINTFN(20, ("received PHY stats\n"));
   3093 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3094 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3095 
   3096 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3097 }
   3098 
   3099 /*
   3100  * Retrieve the average noise (in dBm) among receivers.
   3101  */
   3102 static int
   3103 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
   3104 {
   3105 	int i, total, nbant, noise;
   3106 
   3107 	total = nbant = noise = 0;
   3108 	for (i = 0; i < 3; i++) {
   3109 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3110 		if (noise) {
   3111 			total += noise;
   3112 			nbant++;
   3113 		}
   3114 	}
   3115 
   3116 	/* There should be at least one antenna but check anyway. */
   3117 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3118 }
   3119 
   3120 /*
   3121  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
   3122  *
   3123  * Handles the actual data of the Rx packet from the fw
   3124  */
   3125 static void
   3126 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
   3127 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3128 {
   3129 	struct ieee80211com *ic = &sc->sc_ic;
   3130 	struct ieee80211_frame *wh;
   3131 	struct ieee80211_node *ni;
   3132 	struct ieee80211_channel *c = NULL;
   3133 	struct mbuf *m;
   3134 	struct iwm_rx_phy_info *phy_info;
   3135 	struct iwm_rx_mpdu_res_start *rx_res;
   3136 	int device_timestamp;
   3137 	uint32_t len;
   3138 	uint32_t rx_pkt_status;
   3139 	int rssi;
   3140 
   3141 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3142 	    BUS_DMASYNC_POSTREAD);
   3143 
   3144 	phy_info = &sc->sc_last_phy_info;
   3145 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3146 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3147 	len = le16toh(rx_res->byte_count);
   3148 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
   3149 
   3150 	m = data->m;
   3151 	m->m_data = pkt->data + sizeof(*rx_res);
   3152 	m->m_pkthdr.len = m->m_len = len;
   3153 
   3154 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3155 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3156 		    phy_info->cfg_phy_cnt));
   3157 		return;
   3158 	}
   3159 
   3160 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3161 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3162 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3163 		return; /* drop */
   3164 	}
   3165 
   3166 	device_timestamp = le32toh(phy_info->system_timestamp);
   3167 
   3168 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3169 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
   3170 	} else {
   3171 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
   3172 	}
   3173 	rssi = -rssi;
   3174 
   3175 	if (ic->ic_state == IEEE80211_S_SCAN)
   3176 		iwm_fix_channel(ic, m);
   3177 
   3178 	/* replenish ring for the buffer we're going to feed to the sharks */
   3179 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3180 		return;
   3181 
   3182 	m->m_pkthdr.rcvif = IC2IFP(ic);
   3183 
   3184 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
   3185 		if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3186 			c = &ic->ic_channels[le32toh(phy_info->channel)];
   3187 	}
   3188 
   3189 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3190 	if (c)
   3191 		ni->ni_chan = c;
   3192 
   3193 	if (sc->sc_drvbpf != NULL) {
   3194 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3195 
   3196 		tap->wr_flags = 0;
   3197 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3198 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3199 		tap->wr_chan_freq =
   3200 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3201 		tap->wr_chan_flags =
   3202 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3203 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3204 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3205 		tap->wr_tsft = phy_info->system_timestamp;
   3206 		switch (phy_info->rate) {
   3207 		/* CCK rates. */
   3208 		case  10: tap->wr_rate =   2; break;
   3209 		case  20: tap->wr_rate =   4; break;
   3210 		case  55: tap->wr_rate =  11; break;
   3211 		case 110: tap->wr_rate =  22; break;
   3212 		/* OFDM rates. */
   3213 		case 0xd: tap->wr_rate =  12; break;
   3214 		case 0xf: tap->wr_rate =  18; break;
   3215 		case 0x5: tap->wr_rate =  24; break;
   3216 		case 0x7: tap->wr_rate =  36; break;
   3217 		case 0x9: tap->wr_rate =  48; break;
   3218 		case 0xb: tap->wr_rate =  72; break;
   3219 		case 0x1: tap->wr_rate =  96; break;
   3220 		case 0x3: tap->wr_rate = 108; break;
   3221 		/* Unknown rate: should not happen. */
   3222 		default:  tap->wr_rate =   0;
   3223 		}
   3224 
   3225 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3226 	}
   3227 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3228 	ieee80211_free_node(ni);
   3229 }
   3230 
   3231 static void
   3232 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3233 	struct iwm_node *in)
   3234 {
   3235 	struct ieee80211com *ic = &sc->sc_ic;
   3236 	struct ifnet *ifp = IC2IFP(ic);
   3237 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
   3238 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3239 	int failack = tx_resp->failure_frame;
   3240 
   3241 	KASSERT(tx_resp->frame_count == 1);
   3242 
   3243 	/* Update rate control statistics. */
   3244 	in->in_amn.amn_txcnt++;
   3245 	if (failack > 0) {
   3246 		in->in_amn.amn_retrycnt++;
   3247 	}
   3248 
   3249 	if (status != IWM_TX_STATUS_SUCCESS &&
   3250 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3251 		ifp->if_oerrors++;
   3252 	else
   3253 		ifp->if_opackets++;
   3254 }
   3255 
   3256 static void
   3257 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
   3258 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3259 {
   3260 	struct ieee80211com *ic = &sc->sc_ic;
   3261 	struct ifnet *ifp = IC2IFP(ic);
   3262 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3263 	int idx = cmd_hdr->idx;
   3264 	int qid = cmd_hdr->qid;
   3265 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3266 	struct iwm_tx_data *txd = &ring->data[idx];
   3267 	struct iwm_node *in = txd->in;
   3268 
   3269 	if (txd->done) {
   3270 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3271 		    DEVNAME(sc)));
   3272 		return;
   3273 	}
   3274 
   3275 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3276 	    BUS_DMASYNC_POSTREAD);
   3277 
   3278 	sc->sc_tx_timer = 0;
   3279 
   3280 	iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
   3281 
   3282 	/* Unmap and free mbuf. */
   3283 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3284 	    BUS_DMASYNC_POSTWRITE);
   3285 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3286 	m_freem(txd->m);
   3287 
   3288 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3289 	KASSERT(txd->done == 0);
   3290 	txd->done = 1;
   3291 	KASSERT(txd->in);
   3292 
   3293 	txd->m = NULL;
   3294 	txd->in = NULL;
   3295 	ieee80211_free_node(&in->in_ni);
   3296 
   3297 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3298 		sc->qfullmsk &= ~(1 << ring->qid);
   3299 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3300 			ifp->if_flags &= ~IFF_OACTIVE;
   3301 			/*
   3302 			 * Well, we're in interrupt context, but then again
   3303 			 * I guess net80211 does all sorts of stunts in
   3304 			 * interrupt context, so maybe this is no biggie.
   3305 			 */
   3306 			(*ifp->if_start)(ifp);
   3307 		}
   3308 	}
   3309 }
   3310 
   3311 /*
   3312  * BEGIN iwlwifi/mvm/binding.c
   3313  */
   3314 
   3315 static int
   3316 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3317 {
   3318 	struct iwm_binding_cmd cmd;
   3319 	struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
   3320 	int i, ret;
   3321 	uint32_t status;
   3322 
   3323 	memset(&cmd, 0, sizeof(cmd));
   3324 
   3325 	cmd.id_and_color
   3326 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3327 	cmd.action = htole32(action);
   3328 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3329 
   3330 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3331 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3332 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3333 
   3334 	status = 0;
   3335 	ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3336 	    sizeof(cmd), &cmd, &status);
   3337 	if (ret) {
   3338 		DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
   3339 		    DEVNAME(sc), action, ret));
   3340 		return ret;
   3341 	}
   3342 
   3343 	if (status) {
   3344 		DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
   3345 		    status));
   3346 		ret = EIO;
   3347 	}
   3348 
   3349 	return ret;
   3350 }
   3351 
   3352 static int
   3353 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
   3354 {
   3355 	return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3356 }
   3357 
   3358 static int
   3359 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
   3360 {
   3361 	return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3362 }
   3363 
   3364 /*
   3365  * END iwlwifi/mvm/binding.c
   3366  */
   3367 
   3368 /*
   3369  * BEGIN iwlwifi/mvm/phy-ctxt.c
   3370  */
   3371 
   3372 /*
   3373  * Construct the generic fields of the PHY context command
   3374  */
   3375 static void
   3376 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3377 	struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3378 {
   3379 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3380 
   3381 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3382 	    ctxt->color));
   3383 	cmd->action = htole32(action);
   3384 	cmd->apply_time = htole32(apply_time);
   3385 }
   3386 
   3387 /*
   3388  * Add the phy configuration to the PHY context command
   3389  */
   3390 static void
   3391 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
   3392 	struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
   3393 	uint8_t chains_static, uint8_t chains_dynamic)
   3394 {
   3395 	struct ieee80211com *ic = &sc->sc_ic;
   3396 	uint8_t active_cnt, idle_cnt;
   3397 
   3398 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3399 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3400 
   3401 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3402 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3403 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3404 
   3405 	/* Set rx the chains */
   3406 	idle_cnt = chains_static;
   3407 	active_cnt = chains_dynamic;
   3408 
   3409 	cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
   3410 					IWM_PHY_RX_CHAIN_VALID_POS);
   3411 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3412 	cmd->rxchain_info |= htole32(active_cnt <<
   3413 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3414 
   3415 	cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
   3416 }
   3417 
   3418 /*
   3419  * Send a command
   3420  * only if something in the configuration changed: in case that this is the
   3421  * first time that the phy configuration is applied or in case that the phy
   3422  * configuration changed from the previous apply.
   3423  */
   3424 static int
   3425 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
   3426 	struct iwm_mvm_phy_ctxt *ctxt,
   3427 	uint8_t chains_static, uint8_t chains_dynamic,
   3428 	uint32_t action, uint32_t apply_time)
   3429 {
   3430 	struct iwm_phy_context_cmd cmd;
   3431 	int ret;
   3432 
   3433 	/* Set the command header fields */
   3434 	iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3435 
   3436 	/* Set the command data */
   3437 	iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3438 	    chains_static, chains_dynamic);
   3439 
   3440 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
   3441 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3442 	if (ret) {
   3443 		DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
   3444 	}
   3445 	return ret;
   3446 }
   3447 
   3448 /*
   3449  * Send a command to add a PHY context based on the current HW configuration.
   3450  */
   3451 static int
   3452 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3453 	struct ieee80211_channel *chan,
   3454 	uint8_t chains_static, uint8_t chains_dynamic)
   3455 {
   3456 	ctxt->channel = chan;
   3457 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3458 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
   3459 }
   3460 
   3461 /*
   3462  * Send a command to modify the PHY context based on the current HW
   3463  * configuration. Note that the function does not check that the configuration
   3464  * changed.
   3465  */
   3466 static int
   3467 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
   3468 	struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
   3469 	uint8_t chains_static, uint8_t chains_dynamic)
   3470 {
   3471 	ctxt->channel = chan;
   3472 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3473 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
   3474 }
   3475 
   3476 /*
   3477  * END iwlwifi/mvm/phy-ctxt.c
   3478  */
   3479 
   3480 /*
   3481  * transmit side
   3482  */
   3483 
   3484 /*
   3485  * Send a command to the firmware.  We try to implement the Linux
   3486  * driver interface for the routine.
   3487  * mostly from if_iwn (iwn_cmd()).
   3488  *
   3489  * For now, we always copy the first part and map the second one (if it exists).
   3490  */
   3491 static int
   3492 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3493 {
   3494 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3495 	struct iwm_tfd *desc;
   3496 	struct iwm_tx_data *data;
   3497 	struct iwm_device_cmd *cmd;
   3498 	struct mbuf *m;
   3499 	bus_addr_t paddr;
   3500 	uint32_t addr_lo;
   3501 	int error, i, paylen, off, s;
   3502 	int code;
   3503 	int async, wantresp;
   3504 
   3505 	code = hcmd->id;
   3506 	async = hcmd->flags & IWM_CMD_ASYNC;
   3507 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3508 
   3509 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3510 		paylen += hcmd->len[i];
   3511 	}
   3512 
   3513 	/* if the command wants an answer, busy sc_cmd_resp */
   3514 	if (wantresp) {
   3515 		KASSERT(!async);
   3516 		while (sc->sc_wantresp != -1)
   3517 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3518 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3519 		DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
   3520 	}
   3521 
   3522 	/*
   3523 	 * Is the hardware still available?  (after e.g. above wait).
   3524 	 */
   3525 	s = splnet();
   3526 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3527 		error = ENXIO;
   3528 		goto out;
   3529 	}
   3530 
   3531 	desc = &ring->desc[ring->cur];
   3532 	data = &ring->data[ring->cur];
   3533 
   3534 	if (paylen > sizeof(cmd->data)) {
   3535 		/* Command is too large */
   3536 		if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
   3537 			error = EINVAL;
   3538 			goto out;
   3539 		}
   3540 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3541 		if (m == NULL) {
   3542 			error = ENOMEM;
   3543 			goto out;
   3544 		}
   3545 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3546 		if (!(m->m_flags & M_EXT)) {
   3547 			m_freem(m);
   3548 			error = ENOMEM;
   3549 			goto out;
   3550 		}
   3551 		cmd = mtod(m, struct iwm_device_cmd *);
   3552 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
   3553 		    IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3554 		if (error != 0) {
   3555 			m_freem(m);
   3556 			goto out;
   3557 		}
   3558 		data->m = m;
   3559 		paddr = data->map->dm_segs[0].ds_addr;
   3560 	} else {
   3561 		cmd = &ring->cmd[ring->cur];
   3562 		paddr = data->cmd_paddr;
   3563 	}
   3564 
   3565 	cmd->hdr.code = code;
   3566 	cmd->hdr.flags = 0;
   3567 	cmd->hdr.qid = ring->qid;
   3568 	cmd->hdr.idx = ring->cur;
   3569 
   3570 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3571 		if (hcmd->len[i] == 0)
   3572 			continue;
   3573 		memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
   3574 		off += hcmd->len[i];
   3575 	}
   3576 	KASSERT(off == paylen);
   3577 
   3578 	/* lo field is not aligned */
   3579 	addr_lo = htole32((uint32_t)paddr);
   3580 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3581 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3582 	    | ((sizeof(cmd->hdr) + paylen) << 4));
   3583 	desc->num_tbs = 1;
   3584 
   3585 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%lu %s\n",
   3586 	    code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
   3587 
   3588 	if (paylen > sizeof(cmd->data)) {
   3589 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3590 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3591 	} else {
   3592 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3593 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3594 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3595 	}
   3596 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3597 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3598 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   3599 
   3600 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3601 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3602 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3603 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3604 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3605 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3606 		DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
   3607 		error = EBUSY;
   3608 		goto out;
   3609 	}
   3610 
   3611 #if 0
   3612 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3613 #endif
   3614 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3615 	    code, ring->qid, ring->cur));
   3616 
   3617 	/* Kick command ring. */
   3618 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3619 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3620 
   3621 	if (!async) {
   3622 		/* m..m-mmyy-mmyyyy-mym-ym m-my generation */
   3623 		int generation = sc->sc_generation;
   3624 		error = tsleep(desc, PCATCH, "iwmcmd", hz);
   3625 		if (error == 0) {
   3626 			/* if hardware is no longer up, return error */
   3627 			if (generation != sc->sc_generation) {
   3628 				error = ENXIO;
   3629 			} else {
   3630 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3631 			}
   3632 		}
   3633 	}
   3634  out:
   3635 	if (wantresp && error != 0) {
   3636 		iwm_free_resp(sc, hcmd);
   3637 	}
   3638 	splx(s);
   3639 
   3640 	return error;
   3641 }
   3642 
   3643 /* iwlwifi: mvm/utils.c */
   3644 static int
   3645 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
   3646 	uint32_t flags, uint16_t len, const void *data)
   3647 {
   3648 	struct iwm_host_cmd cmd = {
   3649 		.id = id,
   3650 		.len = { len, },
   3651 		.data = { data, },
   3652 		.flags = flags,
   3653 	};
   3654 
   3655 	return iwm_send_cmd(sc, &cmd);
   3656 }
   3657 
   3658 /* iwlwifi: mvm/utils.c */
   3659 static int
   3660 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
   3661 	struct iwm_host_cmd *cmd, uint32_t *status)
   3662 {
   3663 	struct iwm_rx_packet *pkt;
   3664 	struct iwm_cmd_response *resp;
   3665 	int error, resp_len;
   3666 
   3667 	//lockdep_assert_held(&mvm->mutex);
   3668 
   3669 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3670 	cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
   3671 
   3672 	if ((error = iwm_send_cmd(sc, cmd)) != 0)
   3673 		return error;
   3674 	pkt = cmd->resp_pkt;
   3675 
   3676 	/* Can happen if RFKILL is asserted */
   3677 	if (!pkt) {
   3678 		error = 0;
   3679 		goto out_free_resp;
   3680 	}
   3681 
   3682 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3683 		error = EIO;
   3684 		goto out_free_resp;
   3685 	}
   3686 
   3687 	resp_len = iwm_rx_packet_payload_len(pkt);
   3688 	if (resp_len != sizeof(*resp)) {
   3689 		error = EIO;
   3690 		goto out_free_resp;
   3691 	}
   3692 
   3693 	resp = (void *)pkt->data;
   3694 	*status = le32toh(resp->status);
   3695  out_free_resp:
   3696 	iwm_free_resp(sc, cmd);
   3697 	return error;
   3698 }
   3699 
   3700 /* iwlwifi/mvm/utils.c */
   3701 static int
   3702 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
   3703 	uint16_t len, const void *data, uint32_t *status)
   3704 {
   3705 	struct iwm_host_cmd cmd = {
   3706 		.id = id,
   3707 		.len = { len, },
   3708 		.data = { data, },
   3709 	};
   3710 
   3711 	return iwm_mvm_send_cmd_status(sc, &cmd, status);
   3712 }
   3713 
   3714 static void
   3715 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3716 {
   3717 	KASSERT(sc->sc_wantresp != -1);
   3718 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
   3719 	    == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
   3720 	sc->sc_wantresp = -1;
   3721 	wakeup(&sc->sc_wantresp);
   3722 }
   3723 
   3724 /*
   3725  * Process a "command done" firmware notification.  This is where we wakeup
   3726  * processes waiting for a synchronous command completion.
   3727  * from if_iwn
   3728  */
   3729 static void
   3730 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
   3731 {
   3732 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3733 	struct iwm_tx_data *data;
   3734 
   3735 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
   3736 		return;	/* Not a command ack. */
   3737 	}
   3738 
   3739 	data = &ring->data[pkt->hdr.idx];
   3740 
   3741 	/* If the command was mapped in an mbuf, free it. */
   3742 	if (data->m != NULL) {
   3743 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3744 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3745 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3746 		m_freem(data->m);
   3747 		data->m = NULL;
   3748 	}
   3749 	wakeup(&ring->desc[pkt->hdr.idx]);
   3750 }
   3751 
   3752 #if 0
   3753 /*
   3754  * necessary only for block ack mode
   3755  */
   3756 void
   3757 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   3758 	uint16_t len)
   3759 {
   3760 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   3761 	uint16_t w_val;
   3762 
   3763 	scd_bc_tbl = sc->sched_dma.vaddr;
   3764 
   3765 	len += 8; /* magic numbers came naturally from paris */
   3766 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   3767 		len = roundup(len, 4) / 4;
   3768 
   3769 	w_val = htole16(sta_id << 12 | len);
   3770 
   3771 	/* Update TX scheduler. */
   3772 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   3773 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3774 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   3775 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   3776 
   3777 	/* I really wonder what this is ?!? */
   3778 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   3779 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   3780 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3781 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   3782 		    (char *)(void *)sc->sched_dma.vaddr,
   3783 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   3784 	}
   3785 }
   3786 #endif
   3787 
   3788 /*
   3789  * Fill in various bit for management frames, and leave them
   3790  * unfilled for data frames (firmware takes care of that).
   3791  * Return the selected TX rate.
   3792  */
   3793 static const struct iwm_rate *
   3794 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   3795 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   3796 {
   3797 	const struct iwm_rate *rinfo;
   3798 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3799 	int ridx, rate_flags;
   3800 	int nrates = in->in_ni.ni_rates.rs_nrates;
   3801 
   3802 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   3803 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   3804 
   3805 	/* for data frames, use RS table */
   3806 	if (type == IEEE80211_FC0_TYPE_DATA) {
   3807 		if (sc->sc_fixed_ridx != -1) {
   3808 			tx->initial_rate_index = sc->sc_fixed_ridx;
   3809 		} else {
   3810 			tx->initial_rate_index = (nrates-1) - in->in_ni.ni_txrate;
   3811 		}
   3812                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   3813 		DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
   3814 		return &iwm_rates[tx->initial_rate_index];
   3815 	}
   3816 
   3817 	/* for non-data, use the lowest supported rate */
   3818 	ridx = in->in_ridx[0];
   3819 	rinfo = &iwm_rates[ridx];
   3820 
   3821 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   3822 	if (IWM_RIDX_IS_CCK(ridx))
   3823 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   3824 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   3825 
   3826 	return rinfo;
   3827 }
   3828 
   3829 #define TB0_SIZE 16
   3830 static int
   3831 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   3832 {
   3833 	struct ieee80211com *ic = &sc->sc_ic;
   3834 	struct iwm_node *in = (void *)ni;
   3835 	struct iwm_tx_ring *ring;
   3836 	struct iwm_tx_data *data;
   3837 	struct iwm_tfd *desc;
   3838 	struct iwm_device_cmd *cmd;
   3839 	struct iwm_tx_cmd *tx;
   3840 	struct ieee80211_frame *wh;
   3841 	struct ieee80211_key *k = NULL;
   3842 	struct mbuf *m1;
   3843 	const struct iwm_rate *rinfo;
   3844 	uint32_t flags;
   3845 	u_int hdrlen;
   3846 	bus_dma_segment_t *seg;
   3847 	uint8_t tid, type;
   3848 	int i, totlen, error, pad;
   3849 	int hdrlen2;
   3850 
   3851 	wh = mtod(m, struct ieee80211_frame *);
   3852 	hdrlen = ieee80211_anyhdrsize(wh);
   3853 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3854 
   3855 	hdrlen2 = (ieee80211_has_qos(wh)) ?
   3856 	    sizeof (struct ieee80211_qosframe) :
   3857 	    sizeof (struct ieee80211_frame);
   3858 
   3859 	if (hdrlen != hdrlen2)
   3860 		DPRINTF(("%s: hdrlen error (%d != %d)\n",
   3861 		    DEVNAME(sc), hdrlen, hdrlen2));
   3862 
   3863 	tid = 0;
   3864 
   3865 	ring = &sc->txq[ac];
   3866 	desc = &ring->desc[ring->cur];
   3867 	memset(desc, 0, sizeof(*desc));
   3868 	data = &ring->data[ring->cur];
   3869 
   3870 	/* Fill out iwm_tx_cmd to send to the firmware */
   3871 	cmd = &ring->cmd[ring->cur];
   3872 	cmd->hdr.code = IWM_TX_CMD;
   3873 	cmd->hdr.flags = 0;
   3874 	cmd->hdr.qid = ring->qid;
   3875 	cmd->hdr.idx = ring->cur;
   3876 
   3877 	tx = (void *)cmd->data;
   3878 	memset(tx, 0, sizeof(*tx));
   3879 
   3880 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   3881 
   3882 	if (sc->sc_drvbpf != NULL) {
   3883 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   3884 
   3885 		tap->wt_flags = 0;
   3886 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   3887 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   3888 		tap->wt_rate = rinfo->rate;
   3889 		tap->wt_hwqueue = ac;
   3890 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   3891 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   3892 
   3893 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   3894 	}
   3895 
   3896 	/* Encrypt the frame if need be. */
   3897 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   3898 		k = ieee80211_crypto_encap(ic, ni, m);
   3899 		if (k == NULL) {
   3900 			m_freem(m);
   3901 			return ENOBUFS;
   3902 		}
   3903 		/* Packet header may have moved, reset our local pointer. */
   3904 		wh = mtod(m, struct ieee80211_frame *);
   3905 	}
   3906 	totlen = m->m_pkthdr.len;
   3907 
   3908 	flags = 0;
   3909 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3910 		flags |= IWM_TX_CMD_FLG_ACK;
   3911 	}
   3912 
   3913 	if (type != IEEE80211_FC0_TYPE_DATA
   3914 	    && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
   3915 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3916 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   3917 	}
   3918 
   3919 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   3920 	    type != IEEE80211_FC0_TYPE_DATA)
   3921 		tx->sta_id = sc->sc_aux_sta.sta_id;
   3922 	else
   3923 		tx->sta_id = IWM_STATION_ID;
   3924 
   3925 	if (type == IEEE80211_FC0_TYPE_MGT) {
   3926 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   3927 
   3928 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   3929 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   3930 			tx->pm_frame_timeout = htole16(3);
   3931 		else
   3932 			tx->pm_frame_timeout = htole16(2);
   3933 	} else {
   3934 		tx->pm_frame_timeout = htole16(0);
   3935 	}
   3936 
   3937         if (hdrlen & 3) {
   3938                 /* First segment length must be a multiple of 4. */
   3939                 flags |= IWM_TX_CMD_FLG_MH_PAD;
   3940                 pad = 4 - (hdrlen & 3);
   3941         } else
   3942                 pad = 0;
   3943 
   3944 	tx->driver_txop = 0;
   3945 	tx->next_frame_len = 0;
   3946 
   3947 	tx->len = htole16(totlen);
   3948 	tx->tid_tspec = tid;
   3949 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   3950 
   3951 	/* Set physical address of "scratch area". */
   3952 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   3953 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   3954 
   3955 	/* Copy 802.11 header in TX command. */
   3956 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   3957 
   3958 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   3959 
   3960 	tx->sec_ctl = 0;
   3961 	tx->tx_flags |= htole32(flags);
   3962 
   3963 	/* Trim 802.11 header. */
   3964 	m_adj(m, hdrlen);
   3965 
   3966 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3967 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3968 	if (error != 0) {
   3969 		if (error != EFBIG) {
   3970 			aprint_error_dev(sc->sc_dev,
   3971 			    "can't map mbuf (error %d)\n", error);
   3972 			m_freem(m);
   3973 			return error;
   3974 		}
   3975 		/* Too many DMA segments, linearize mbuf. */
   3976 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   3977 		if (m1 == NULL) {
   3978 			m_freem(m);
   3979 			return ENOBUFS;
   3980 		}
   3981 		if (m->m_pkthdr.len > MHLEN) {
   3982 			MCLGET(m1, M_DONTWAIT);
   3983 			if (!(m1->m_flags & M_EXT)) {
   3984 				m_freem(m);
   3985 				m_freem(m1);
   3986 				return ENOBUFS;
   3987 			}
   3988 		}
   3989 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   3990 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   3991 		m_freem(m);
   3992 		m = m1;
   3993 
   3994 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3995 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3996 		if (error != 0) {
   3997 			aprint_error_dev(sc->sc_dev,
   3998 			    "can't map mbuf (error %d)\n", error);
   3999 			m_freem(m);
   4000 			return error;
   4001 		}
   4002 	}
   4003 	data->m = m;
   4004 	data->in = in;
   4005 	data->done = 0;
   4006 
   4007 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   4008 	KASSERT(data->in != NULL);
   4009 
   4010 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   4011 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   4012 
   4013 	/* Fill TX descriptor. */
   4014 	desc->num_tbs = 2 + data->map->dm_nsegs;
   4015 
   4016 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   4017 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4018 	    (TB0_SIZE << 4);
   4019 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   4020 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4021 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   4022 	      + hdrlen + pad - TB0_SIZE) << 4);
   4023 
   4024 	/* Other DMA segments are for data payload. */
   4025 	seg = data->map->dm_segs;
   4026 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   4027 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   4028 		desc->tbs[i+2].hi_n_len = \
   4029 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4030 		    | ((seg->ds_len) << 4);
   4031 	}
   4032 
   4033 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4034 	    BUS_DMASYNC_PREWRITE);
   4035 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4036 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4037 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4038 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4039 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4040 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4041 
   4042 #if 0
   4043 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
   4044 #endif
   4045 
   4046 	/* Kick TX ring. */
   4047 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4048 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4049 
   4050 	/* Mark TX ring as full if we reach a certain threshold. */
   4051 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4052 		sc->qfullmsk |= 1 << ring->qid;
   4053 	}
   4054 
   4055 	return 0;
   4056 }
   4057 
   4058 #if 0
   4059 /* not necessary? */
   4060 static int
   4061 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4062 {
   4063 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4064 		.queues_ctl = htole32(tfd_msk),
   4065 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4066 	};
   4067 	int ret;
   4068 
   4069 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
   4070 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
   4071 	    sizeof(flush_cmd), &flush_cmd);
   4072 	if (ret)
   4073 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4074 		    ret);
   4075 	return ret;
   4076 }
   4077 #endif
   4078 
   4079 
   4080 /*
   4081  * BEGIN mvm/power.c
   4082  */
   4083 
   4084 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4085 
   4086 static int
   4087 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4088 	struct iwm_beacon_filter_cmd *cmd)
   4089 {
   4090 	int ret;
   4091 
   4092 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4093 	    IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4094 
   4095 	if (!ret) {
   4096 		DPRINTF(("ba_enable_beacon_abort is: %d\n",
   4097 		    le32toh(cmd->ba_enable_beacon_abort)));
   4098 		DPRINTF(("ba_escape_timer is: %d\n",
   4099 		    le32toh(cmd->ba_escape_timer)));
   4100 		DPRINTF(("bf_debug_flag is: %d\n",
   4101 		    le32toh(cmd->bf_debug_flag)));
   4102 		DPRINTF(("bf_enable_beacon_filter is: %d\n",
   4103 		    le32toh(cmd->bf_enable_beacon_filter)));
   4104 		DPRINTF(("bf_energy_delta is: %d\n",
   4105 		    le32toh(cmd->bf_energy_delta)));
   4106 		DPRINTF(("bf_escape_timer is: %d\n",
   4107 		    le32toh(cmd->bf_escape_timer)));
   4108 		DPRINTF(("bf_roaming_energy_delta is: %d\n",
   4109 		    le32toh(cmd->bf_roaming_energy_delta)));
   4110 		DPRINTF(("bf_roaming_state is: %d\n",
   4111 		    le32toh(cmd->bf_roaming_state)));
   4112 		DPRINTF(("bf_temp_threshold is: %d\n",
   4113 		    le32toh(cmd->bf_temp_threshold)));
   4114 		DPRINTF(("bf_temp_fast_filter is: %d\n",
   4115 		    le32toh(cmd->bf_temp_fast_filter)));
   4116 		DPRINTF(("bf_temp_slow_filter is: %d\n",
   4117 		    le32toh(cmd->bf_temp_slow_filter)));
   4118 	}
   4119 	return ret;
   4120 }
   4121 
   4122 static void
   4123 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
   4124 	struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
   4125 {
   4126 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4127 }
   4128 
   4129 static int
   4130 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
   4131 	int enable)
   4132 {
   4133 	struct iwm_beacon_filter_cmd cmd = {
   4134 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4135 		.bf_enable_beacon_filter = htole32(1),
   4136 		.ba_enable_beacon_abort = htole32(enable),
   4137 	};
   4138 
   4139 	if (!sc->sc_bf.bf_enabled)
   4140 		return 0;
   4141 
   4142 	sc->sc_bf.ba_enabled = enable;
   4143 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4144 	return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4145 }
   4146 
   4147 static void
   4148 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
   4149 {
   4150 	DPRINTF(("Sending power table command on mac id 0x%X for "
   4151 	    "power level %d, flags = 0x%X\n",
   4152 	    cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
   4153 	DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
   4154 
   4155 	if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
   4156 		DPRINTF(("Disable power management\n"));
   4157 		return;
   4158 	}
   4159 	KASSERT(0);
   4160 
   4161 #if 0
   4162 	DPRINTF(mvm, "Rx timeout = %u usec\n",
   4163 			le32_to_cpu(cmd->rx_data_timeout));
   4164 	DPRINTF(mvm, "Tx timeout = %u usec\n",
   4165 			le32_to_cpu(cmd->tx_data_timeout));
   4166 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
   4167 		DPRINTF(mvm, "DTIM periods to skip = %u\n",
   4168 				cmd->skip_dtim_periods);
   4169 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
   4170 		DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
   4171 				cmd->lprx_rssi_threshold);
   4172 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
   4173 		DPRINTF(mvm, "uAPSD enabled\n");
   4174 		DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
   4175 				le32_to_cpu(cmd->rx_data_timeout_uapsd));
   4176 		DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
   4177 				le32_to_cpu(cmd->tx_data_timeout_uapsd));
   4178 		DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
   4179 		DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
   4180 		DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
   4181 	}
   4182 #endif
   4183 }
   4184 
   4185 static void
   4186 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4187 	struct iwm_mac_power_cmd *cmd)
   4188 {
   4189 	struct ieee80211com *ic = &sc->sc_ic;
   4190 	struct ieee80211_node *ni = &in->in_ni;
   4191 	int dtimper, dtimper_msec;
   4192 	int keep_alive;
   4193 
   4194 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4195 	    in->in_color));
   4196 	dtimper = ic->ic_dtim_period ?: 1;
   4197 
   4198 	/*
   4199 	 * Regardless of power management state the driver must set
   4200 	 * keep alive period. FW will use it for sending keep alive NDPs
   4201 	 * immediately after association. Check that keep alive period
   4202 	 * is at least 3 * DTIM
   4203 	 */
   4204 	dtimper_msec = dtimper * ni->ni_intval;
   4205 	keep_alive
   4206 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4207 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4208 	cmd->keep_alive_seconds = htole16(keep_alive);
   4209 }
   4210 
   4211 static int
   4212 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4213 {
   4214 	int ret;
   4215 	int ba_enable;
   4216 	struct iwm_mac_power_cmd cmd;
   4217 
   4218 	memset(&cmd, 0, sizeof(cmd));
   4219 
   4220 	iwm_mvm_power_build_cmd(sc, in, &cmd);
   4221 	iwm_mvm_power_log(sc, &cmd);
   4222 
   4223 	if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
   4224 	    IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
   4225 		return ret;
   4226 
   4227 	ba_enable = !!(cmd.flags &
   4228 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4229 	return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
   4230 }
   4231 
   4232 static int
   4233 iwm_mvm_power_update_device(struct iwm_softc *sc)
   4234 {
   4235 	struct iwm_device_power_cmd cmd = {
   4236 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4237 	};
   4238 
   4239 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4240 		return 0;
   4241 
   4242 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4243 	DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
   4244 
   4245 	return iwm_mvm_send_cmd_pdu(sc,
   4246 	    IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   4247 }
   4248 
   4249 static int
   4250 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4251 {
   4252 	struct iwm_beacon_filter_cmd cmd = {
   4253 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4254 		.bf_enable_beacon_filter = htole32(1),
   4255 	};
   4256 	int ret;
   4257 
   4258 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4259 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4260 
   4261 	if (ret == 0)
   4262 		sc->sc_bf.bf_enabled = 1;
   4263 
   4264 	return ret;
   4265 }
   4266 
   4267 static int
   4268 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4269 {
   4270 	struct iwm_beacon_filter_cmd cmd;
   4271 	int ret;
   4272 
   4273 	memset(&cmd, 0, sizeof(cmd));
   4274 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4275 		return 0;
   4276 
   4277 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4278 	if (ret == 0)
   4279 		sc->sc_bf.bf_enabled = 0;
   4280 
   4281 	return ret;
   4282 }
   4283 
   4284 #if 0
   4285 static int
   4286 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4287 {
   4288 	if (!sc->sc_bf.bf_enabled)
   4289 		return 0;
   4290 
   4291 	return iwm_mvm_enable_beacon_filter(sc, in);
   4292 }
   4293 #endif
   4294 
   4295 /*
   4296  * END mvm/power.c
   4297  */
   4298 
   4299 /*
   4300  * BEGIN mvm/sta.c
   4301  */
   4302 
   4303 static void
   4304 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
   4305 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
   4306 {
   4307 	memset(cmd_v5, 0, sizeof(*cmd_v5));
   4308 
   4309 	cmd_v5->add_modify = cmd_v6->add_modify;
   4310 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
   4311 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
   4312 	memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
   4313 	cmd_v5->sta_id = cmd_v6->sta_id;
   4314 	cmd_v5->modify_mask = cmd_v6->modify_mask;
   4315 	cmd_v5->station_flags = cmd_v6->station_flags;
   4316 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
   4317 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
   4318 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
   4319 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
   4320 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
   4321 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
   4322 	cmd_v5->assoc_id = cmd_v6->assoc_id;
   4323 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
   4324 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
   4325 }
   4326 
   4327 static int
   4328 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
   4329 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
   4330 {
   4331 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
   4332 
   4333 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
   4334 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
   4335 		    sizeof(*cmd), cmd, status);
   4336 	}
   4337 
   4338 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
   4339 
   4340 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
   4341 	    &cmd_v5, status);
   4342 }
   4343 
   4344 /* send station add/update command to firmware */
   4345 static int
   4346 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
   4347 {
   4348 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
   4349 	int ret;
   4350 	uint32_t status;
   4351 
   4352 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4353 
   4354 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4355 	add_sta_cmd.mac_id_n_color
   4356 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4357 	if (!update) {
   4358 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
   4359 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4360 	}
   4361 	add_sta_cmd.add_modify = update ? 1 : 0;
   4362 	add_sta_cmd.station_flags_msk
   4363 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4364 
   4365 	status = IWM_ADD_STA_SUCCESS;
   4366 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
   4367 	if (ret)
   4368 		return ret;
   4369 
   4370 	switch (status) {
   4371 	case IWM_ADD_STA_SUCCESS:
   4372 		break;
   4373 	default:
   4374 		ret = EIO;
   4375 		DPRINTF(("IWM_ADD_STA failed\n"));
   4376 		break;
   4377 	}
   4378 
   4379 	return ret;
   4380 }
   4381 
   4382 static int
   4383 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
   4384 {
   4385 	int ret;
   4386 
   4387 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
   4388 	if (ret)
   4389 		return ret;
   4390 
   4391 	return 0;
   4392 }
   4393 
   4394 static int
   4395 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
   4396 {
   4397 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
   4398 }
   4399 
   4400 static int
   4401 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
   4402 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
   4403 {
   4404 	struct iwm_mvm_add_sta_cmd_v6 cmd;
   4405 	int ret;
   4406 	uint32_t status;
   4407 
   4408 	memset(&cmd, 0, sizeof(cmd));
   4409 	cmd.sta_id = sta->sta_id;
   4410 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
   4411 
   4412 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
   4413 
   4414 	if (addr)
   4415 		memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
   4416 
   4417 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
   4418 	if (ret)
   4419 		return ret;
   4420 
   4421 	switch (status) {
   4422 	case IWM_ADD_STA_SUCCESS:
   4423 		DPRINTF(("Internal station added.\n"));
   4424 		return 0;
   4425 	default:
   4426 		DPRINTF(("%s: Add internal station failed, status=0x%x\n",
   4427 		    DEVNAME(sc), status));
   4428 		ret = EIO;
   4429 		break;
   4430 	}
   4431 	return ret;
   4432 }
   4433 
   4434 static int
   4435 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
   4436 {
   4437 	int ret;
   4438 
   4439 	sc->sc_aux_sta.sta_id = 3;
   4440 	sc->sc_aux_sta.tfd_queue_msk = 0;
   4441 
   4442 	ret = iwm_mvm_add_int_sta_common(sc,
   4443 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
   4444 
   4445 	if (ret)
   4446 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
   4447 	return ret;
   4448 }
   4449 
   4450 /*
   4451  * END mvm/sta.c
   4452  */
   4453 
   4454 /*
   4455  * BEGIN mvm/scan.c
   4456  */
   4457 
   4458 #define IWM_PLCP_QUIET_THRESH 1
   4459 #define IWM_ACTIVE_QUIET_TIME 10
   4460 #define LONG_OUT_TIME_PERIOD 600
   4461 #define SHORT_OUT_TIME_PERIOD 200
   4462 #define SUSPEND_TIME_PERIOD 100
   4463 
   4464 static uint16_t
   4465 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
   4466 {
   4467 	uint16_t rx_chain;
   4468 	uint8_t rx_ant;
   4469 
   4470 	rx_ant = IWM_FW_VALID_RX_ANT(sc);
   4471 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4472 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4473 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4474 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4475 	return htole16(rx_chain);
   4476 }
   4477 
   4478 #define ieee80211_tu_to_usec(a) (1024*(a))
   4479 
   4480 static uint32_t
   4481 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
   4482 {
   4483 	if (!is_assoc)
   4484 		return 0;
   4485 	if (flags & 0x1)
   4486 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
   4487 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
   4488 }
   4489 
   4490 static uint32_t
   4491 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
   4492 {
   4493 	if (!is_assoc)
   4494 		return 0;
   4495 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
   4496 }
   4497 
   4498 static uint32_t
   4499 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
   4500 {
   4501 	if (flags & IEEE80211_CHAN_2GHZ)
   4502 		return htole32(IWM_PHY_BAND_24);
   4503 	else
   4504 		return htole32(IWM_PHY_BAND_5);
   4505 }
   4506 
   4507 static uint32_t
   4508 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4509 {
   4510 	uint32_t tx_ant;
   4511 	int i, ind;
   4512 
   4513 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4514 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4515 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4516 		if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
   4517 			sc->sc_scan_last_antenna = ind;
   4518 			break;
   4519 		}
   4520 	}
   4521 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4522 
   4523 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4524 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4525 				   tx_ant);
   4526 	else
   4527 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4528 }
   4529 
   4530 /*
   4531  * If req->n_ssids > 0, it means we should do an active scan.
   4532  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4533  * just to notify that this scan is active and not passive.
   4534  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4535  * the zero-length one), we need to set the corresponding bits in chan->type,
   4536  * one for each SSID, and set the active bit (first). If the first SSID is
   4537  * already included in the probe template, so we need to set only
   4538  * req->n_ssids - 1 bits in addition to the first bit.
   4539  */
   4540 static uint16_t
   4541 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4542 {
   4543 	if (flags & IEEE80211_CHAN_2GHZ)
   4544 		return 30  + 3 * (n_ssids + 1);
   4545 	return 20  + 2 * (n_ssids + 1);
   4546 }
   4547 
   4548 static uint16_t
   4549 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4550 {
   4551 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4552 }
   4553 
   4554 static int
   4555 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
   4556 	int flags, int n_ssids, int basic_ssid)
   4557 {
   4558 	struct ieee80211com *ic = &sc->sc_ic;
   4559 	uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
   4560 	uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
   4561 	struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
   4562 		(cmd->data + le16toh(cmd->tx_cmd.len));
   4563 	int type = (1 << n_ssids) - 1;
   4564 	struct ieee80211_channel *c;
   4565 	int nchan;
   4566 
   4567 	if (!basic_ssid)
   4568 		type |= (1 << n_ssids);
   4569 
   4570 	for (nchan = 0, c = &ic->ic_channels[1];
   4571 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
   4572 	    c++) {
   4573 		if ((c->ic_flags & flags) != flags)
   4574 			continue;
   4575 
   4576 		chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
   4577 		chan->type = htole32(type);
   4578 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
   4579 			chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
   4580 		chan->active_dwell = htole16(active_dwell);
   4581 		chan->passive_dwell = htole16(passive_dwell);
   4582 		chan->iteration_count = htole16(1);
   4583 		chan++;
   4584 		nchan++;
   4585 	}
   4586 	if (nchan == 0)
   4587 		DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
   4588 	return nchan;
   4589 }
   4590 
   4591 /*
   4592  * Fill in probe request with the following parameters:
   4593  * TA is our vif HW address, which mac80211 ensures we have.
   4594  * Packet is broadcasted, so this is both SA and DA.
   4595  * The probe request IE is made out of two: first comes the most prioritized
   4596  * SSID if a directed scan is requested. Second comes whatever extra
   4597  * information was given to us as the scan request IE.
   4598  */
   4599 static uint16_t
   4600 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
   4601 	const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
   4602 	const uint8_t *ie, int ie_len, int left)
   4603 {
   4604 	int len = 0;
   4605 	uint8_t *pos = NULL;
   4606 
   4607 	/* Make sure there is enough space for the probe request,
   4608 	 * two mandatory IEs and the data */
   4609 	left -= sizeof(*frame);
   4610 	if (left < 0)
   4611 		return 0;
   4612 
   4613 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4614 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4615 	frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4616 	IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
   4617 	memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
   4618 	IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
   4619 
   4620 	len += sizeof(*frame);
   4621 	CTASSERT(sizeof(*frame) == 24);
   4622 
   4623 	/* for passive scans, no need to fill anything */
   4624 	if (n_ssids == 0)
   4625 		return (uint16_t)len;
   4626 
   4627 	/* points to the payload of the request */
   4628 	pos = (uint8_t *)frame + sizeof(*frame);
   4629 
   4630 	/* fill in our SSID IE */
   4631 	left -= ssid_len + 2;
   4632 	if (left < 0)
   4633 		return 0;
   4634 	*pos++ = IEEE80211_ELEMID_SSID;
   4635 	*pos++ = ssid_len;
   4636 	if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
   4637 		memcpy(pos, ssid, ssid_len);
   4638 		pos += ssid_len;
   4639 	}
   4640 
   4641 	len += ssid_len + 2;
   4642 
   4643 	if (left < ie_len)
   4644 		return len;
   4645 
   4646 	if (ie && ie_len) {
   4647 		memcpy(pos, ie, ie_len);
   4648 		len += ie_len;
   4649 	}
   4650 
   4651 	return (uint16_t)len;
   4652 }
   4653 
   4654 static int
   4655 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
   4656 	int n_ssids, uint8_t *ssid, int ssid_len)
   4657 {
   4658 	struct ieee80211com *ic = &sc->sc_ic;
   4659 	struct iwm_host_cmd hcmd = {
   4660 		.id = IWM_SCAN_REQUEST_CMD,
   4661 		.len = { 0, },
   4662 		.data = { sc->sc_scan_cmd, },
   4663 		.flags = IWM_CMD_SYNC,
   4664 		.dataflags = { IWM_HCMD_DFL_NOCOPY, },
   4665 	};
   4666 	struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
   4667 	int is_assoc = 0;
   4668 	int ret;
   4669 	uint32_t status;
   4670 	int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
   4671 
   4672 	//lockdep_assert_held(&mvm->mutex);
   4673 
   4674 	sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
   4675 
   4676 	DPRINTF(("Handling ieee80211 scan request\n"));
   4677 	memset(cmd, 0, sc->sc_scan_cmd_len);
   4678 
   4679 	cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
   4680 	cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
   4681 	cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
   4682 	cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
   4683 	cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
   4684 	cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
   4685 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
   4686 	    IWM_MAC_FILTER_IN_BEACON);
   4687 
   4688 	cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
   4689 	cmd->repeats = htole32(1);
   4690 
   4691 	/*
   4692 	 * If the user asked for passive scan, don't change to active scan if
   4693 	 * you see any activity on the channel - remain passive.
   4694 	 */
   4695 	if (n_ssids > 0) {
   4696 		cmd->passive2active = htole16(1);
   4697 		cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4698 #if 0
   4699 		if (basic_ssid) {
   4700 			ssid = req->ssids[0].ssid;
   4701 			ssid_len = req->ssids[0].ssid_len;
   4702 		}
   4703 #endif
   4704 	} else {
   4705 		cmd->passive2active = 0;
   4706 		cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4707 	}
   4708 
   4709 	cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4710 	    IWM_TX_CMD_FLG_BT_DIS);
   4711 	cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
   4712 	cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4713 	cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
   4714 
   4715 	cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
   4716 			    (struct ieee80211_frame *)cmd->data,
   4717 			    ic->ic_myaddr, n_ssids, ssid, ssid_len,
   4718 			    NULL, 0, sc->sc_capa_max_probe_len));
   4719 
   4720 	cmd->channel_count
   4721 	    = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
   4722 
   4723 	cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
   4724 		le16toh(cmd->tx_cmd.len) +
   4725 		(cmd->channel_count * sizeof(struct iwm_scan_channel)));
   4726 	hcmd.len[0] = le16toh(cmd->len);
   4727 
   4728 	status = IWM_SCAN_RESPONSE_OK;
   4729 	ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
   4730 	if (!ret && status == IWM_SCAN_RESPONSE_OK) {
   4731 		DPRINTF(("Scan request was sent successfully\n"));
   4732 	} else {
   4733 		/*
   4734 		 * If the scan failed, it usually means that the FW was unable
   4735 		 * to allocate the time events. Warn on it, but maybe we
   4736 		 * should try to send the command again with different params.
   4737 		 */
   4738 		sc->sc_scanband = 0;
   4739 		ret = EIO;
   4740 	}
   4741 	return ret;
   4742 }
   4743 
   4744 /*
   4745  * END mvm/scan.c
   4746  */
   4747 
   4748 /*
   4749  * BEGIN mvm/mac-ctxt.c
   4750  */
   4751 
   4752 static void
   4753 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
   4754 	int *cck_rates, int *ofdm_rates)
   4755 {
   4756 	int lowest_present_ofdm = 100;
   4757 	int lowest_present_cck = 100;
   4758 	uint8_t cck = 0;
   4759 	uint8_t ofdm = 0;
   4760 	int i;
   4761 
   4762 	for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
   4763 		cck |= (1 << i);
   4764 		if (lowest_present_cck > i)
   4765 			lowest_present_cck = i;
   4766 	}
   4767 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   4768 		int adj = i - IWM_FIRST_OFDM_RATE;
   4769 		ofdm |= (1 << adj);
   4770 		if (lowest_present_cck > adj)
   4771 			lowest_present_cck = adj;
   4772 	}
   4773 
   4774 	/*
   4775 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   4776 	 * variables. This isn't sufficient though, as there might not
   4777 	 * be all the right rates in the bitmap. E.g. if the only basic
   4778 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   4779 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   4780 	 *
   4781 	 *    [...] a STA responding to a received frame shall transmit
   4782 	 *    its Control Response frame [...] at the highest rate in the
   4783 	 *    BSSBasicRateSet parameter that is less than or equal to the
   4784 	 *    rate of the immediately previous frame in the frame exchange
   4785 	 *    sequence ([...]) and that is of the same modulation class
   4786 	 *    ([...]) as the received frame. If no rate contained in the
   4787 	 *    BSSBasicRateSet parameter meets these conditions, then the
   4788 	 *    control frame sent in response to a received frame shall be
   4789 	 *    transmitted at the highest mandatory rate of the PHY that is
   4790 	 *    less than or equal to the rate of the received frame, and
   4791 	 *    that is of the same modulation class as the received frame.
   4792 	 *
   4793 	 * As a consequence, we need to add all mandatory rates that are
   4794 	 * lower than all of the basic rates to these bitmaps.
   4795 	 */
   4796 
   4797 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   4798 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   4799 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   4800 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   4801 	/* 6M already there or needed so always add */
   4802 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   4803 
   4804 	/*
   4805 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   4806 	 * Note, however:
   4807 	 *  - if no CCK rates are basic, it must be ERP since there must
   4808 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   4809 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   4810 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   4811 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   4812 	 *  - if 2M is basic, 1M is mandatory
   4813 	 *  - if 1M is basic, that's the only valid ACK rate.
   4814 	 * As a consequence, it's not as complicated as it sounds, just add
   4815 	 * any lower rates to the ACK rate bitmap.
   4816 	 */
   4817 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   4818 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   4819 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   4820 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   4821 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   4822 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   4823 	/* 1M already there or needed so always add */
   4824 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   4825 
   4826 	*cck_rates = cck;
   4827 	*ofdm_rates = ofdm;
   4828 }
   4829 
   4830 static void
   4831 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   4832 	struct iwm_mac_ctx_cmd *cmd, uint32_t action)
   4833 {
   4834 	struct ieee80211com *ic = &sc->sc_ic;
   4835 	struct ieee80211_node *ni = ic->ic_bss;
   4836 	int cck_ack_rates, ofdm_ack_rates;
   4837 	int i;
   4838 
   4839 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4840 	    in->in_color));
   4841 	cmd->action = htole32(action);
   4842 
   4843 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   4844 	cmd->tsf_id = htole32(in->in_tsfid);
   4845 
   4846 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   4847 	if (in->in_assoc) {
   4848 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   4849 	} else {
   4850 		memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
   4851 	}
   4852 	iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   4853 	cmd->cck_rates = htole32(cck_ack_rates);
   4854 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   4855 
   4856 	cmd->cck_short_preamble
   4857 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   4858 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   4859 	cmd->short_slot
   4860 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   4861 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   4862 
   4863 	for (i = 0; i < IWM_AC_NUM+1; i++) {
   4864 		int txf = i;
   4865 
   4866 		cmd->ac[txf].cw_min = htole16(0x0f);
   4867 		cmd->ac[txf].cw_max = htole16(0x3f);
   4868 		cmd->ac[txf].aifsn = 1;
   4869 		cmd->ac[txf].fifos_mask = (1 << txf);
   4870 		cmd->ac[txf].edca_txop = 0;
   4871 	}
   4872 
   4873 	cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   4874 	cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
   4875 
   4876 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   4877 }
   4878 
   4879 static int
   4880 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
   4881 {
   4882 	int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
   4883 				       sizeof(*cmd), cmd);
   4884 	if (ret)
   4885 		DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
   4886 		    DEVNAME(sc), le32toh(cmd->action), ret));
   4887 	return ret;
   4888 }
   4889 
   4890 /*
   4891  * Fill the specific data for mac context of type station or p2p client
   4892  */
   4893 static void
   4894 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   4895 	struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
   4896 {
   4897 	struct ieee80211_node *ni = &in->in_ni;
   4898 	unsigned dtim_period, dtim_count;
   4899 
   4900 	dtim_period = ni->ni_dtim_period;
   4901 	dtim_count = ni->ni_dtim_count;
   4902 
   4903 	/* We need the dtim_period to set the MAC as associated */
   4904 	if (in->in_assoc && dtim_period && !force_assoc_off) {
   4905 		uint64_t tsf;
   4906 		uint32_t dtim_offs;
   4907 
   4908 		/*
   4909 		 * The DTIM count counts down, so when it is N that means N
   4910 		 * more beacon intervals happen until the DTIM TBTT. Therefore
   4911 		 * add this to the current time. If that ends up being in the
   4912 		 * future, the firmware will handle it.
   4913 		 *
   4914 		 * Also note that the system_timestamp (which we get here as
   4915 		 * "sync_device_ts") and TSF timestamp aren't at exactly the
   4916 		 * same offset in the frame -- the TSF is at the first symbol
   4917 		 * of the TSF, the system timestamp is at signal acquisition
   4918 		 * time. This means there's an offset between them of at most
   4919 		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
   4920 		 * 384us in the longest case), this is currently not relevant
   4921 		 * as the firmware wakes up around 2ms before the TBTT.
   4922 		 */
   4923 		dtim_offs = dtim_count * ni->ni_intval;
   4924 		/* convert TU to usecs */
   4925 		dtim_offs *= 1024;
   4926 
   4927 		tsf = ni->ni_tstamp.tsf;
   4928 
   4929 		ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
   4930 		ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
   4931 
   4932 		DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
   4933 		    (long long)le64toh(ctxt_sta->dtim_tsf),
   4934 		    le32toh(ctxt_sta->dtim_time), dtim_offs));
   4935 
   4936 		ctxt_sta->is_assoc = htole32(1);
   4937 	} else {
   4938 		ctxt_sta->is_assoc = htole32(0);
   4939 	}
   4940 
   4941 	ctxt_sta->bi = htole32(ni->ni_intval);
   4942 	ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
   4943 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
   4944 	ctxt_sta->dtim_reciprocal =
   4945 	    htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
   4946 
   4947 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
   4948 	ctxt_sta->listen_interval = htole32(10);
   4949 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
   4950 }
   4951 
   4952 static int
   4953 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
   4954 	uint32_t action)
   4955 {
   4956 	struct iwm_mac_ctx_cmd cmd;
   4957 
   4958 	memset(&cmd, 0, sizeof(cmd));
   4959 
   4960 	/* Fill the common data for all mac context types */
   4961 	iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
   4962 
   4963 	if (in->in_assoc)
   4964 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   4965 	else
   4966 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
   4967 
   4968 	/* Fill the data specific for station mode */
   4969 	iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
   4970 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
   4971 
   4972 	return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
   4973 }
   4974 
   4975 static int
   4976 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   4977 {
   4978 	return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
   4979 }
   4980 
   4981 static int
   4982 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
   4983 {
   4984 	int ret;
   4985 
   4986 	ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
   4987 	if (ret)
   4988 		return ret;
   4989 
   4990 	return 0;
   4991 }
   4992 
   4993 static int
   4994 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
   4995 {
   4996 	return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
   4997 }
   4998 
   4999 #if 0
   5000 static int
   5001 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
   5002 {
   5003 	struct iwm_mac_ctx_cmd cmd;
   5004 	int ret;
   5005 
   5006 	if (!in->in_uploaded) {
   5007 		print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
   5008 		return EIO;
   5009 	}
   5010 
   5011 	memset(&cmd, 0, sizeof(cmd));
   5012 
   5013 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5014 	    in->in_color));
   5015 	cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
   5016 
   5017 	ret = iwm_mvm_send_cmd_pdu(sc,
   5018 	    IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   5019 	if (ret) {
   5020 		aprint_error_dev(sc->sc_dev,
   5021 		    "Failed to remove MAC context: %d\n", ret);
   5022 		return ret;
   5023 	}
   5024 	in->in_uploaded = 0;
   5025 
   5026 	return 0;
   5027 }
   5028 #endif
   5029 
   5030 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
   5031 
   5032 static void
   5033 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5034 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5035 {
   5036 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5037 
   5038 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5039 	    le32toh(mb->mac_id),
   5040 	    le32toh(mb->consec_missed_beacons),
   5041 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5042 	    le32toh(mb->num_recvd_beacons),
   5043 	    le32toh(mb->num_expected_beacons)));
   5044 
   5045 	/*
   5046 	 * TODO: the threshold should be adjusted based on latency conditions,
   5047 	 * and/or in case of a CS flow on one of the other AP vifs.
   5048 	 */
   5049 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5050 	    IWM_MVM_MISSED_BEACONS_THRESHOLD)
   5051 		ieee80211_beacon_miss(&sc->sc_ic);
   5052 }
   5053 
   5054 /*
   5055  * END mvm/mac-ctxt.c
   5056  */
   5057 
   5058 /*
   5059  * BEGIN mvm/quota.c
   5060  */
   5061 
   5062 static int
   5063 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5064 {
   5065 	struct iwm_time_quota_cmd cmd;
   5066 	int i, idx, ret, num_active_macs, quota, quota_rem;
   5067 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5068 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5069 	uint16_t id;
   5070 
   5071 	memset(&cmd, 0, sizeof(cmd));
   5072 
   5073 	/* currently, PHY ID == binding ID */
   5074 	if (in) {
   5075 		id = in->in_phyctxt->id;
   5076 		KASSERT(id < IWM_MAX_BINDINGS);
   5077 		colors[id] = in->in_phyctxt->color;
   5078 
   5079 		if (1)
   5080 			n_ifs[id] = 1;
   5081 	}
   5082 
   5083 	/*
   5084 	 * The FW's scheduling session consists of
   5085 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
   5086 	 * equally between all the bindings that require quota
   5087 	 */
   5088 	num_active_macs = 0;
   5089 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5090 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5091 		num_active_macs += n_ifs[i];
   5092 	}
   5093 
   5094 	quota = 0;
   5095 	quota_rem = 0;
   5096 	if (num_active_macs) {
   5097 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
   5098 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
   5099 	}
   5100 
   5101 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5102 		if (colors[i] < 0)
   5103 			continue;
   5104 
   5105 		cmd.quotas[idx].id_and_color =
   5106 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5107 
   5108 		if (n_ifs[i] <= 0) {
   5109 			cmd.quotas[idx].quota = htole32(0);
   5110 			cmd.quotas[idx].max_duration = htole32(0);
   5111 		} else {
   5112 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5113 			cmd.quotas[idx].max_duration = htole32(0);
   5114 		}
   5115 		idx++;
   5116 	}
   5117 
   5118 	/* Give the remainder of the session to the first binding */
   5119 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5120 
   5121 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
   5122 	    sizeof(cmd), &cmd);
   5123 	if (ret)
   5124 		DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
   5125 	return ret;
   5126 }
   5127 
   5128 /*
   5129  * END mvm/quota.c
   5130  */
   5131 
   5132 /*
   5133  * aieee80211 routines
   5134  */
   5135 
   5136 /*
   5137  * Change to AUTH state in 80211 state machine.  Roughly matches what
   5138  * Linux does in bss_info_changed().
   5139  */
   5140 static int
   5141 iwm_auth(struct iwm_softc *sc)
   5142 {
   5143 	struct ieee80211com *ic = &sc->sc_ic;
   5144 	struct iwm_node *in = (void *)ic->ic_bss;
   5145 	uint32_t duration;
   5146 	uint32_t min_duration;
   5147 	int error;
   5148 
   5149 	in->in_assoc = 0;
   5150 	if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
   5151 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5152 		return error;
   5153 	}
   5154 
   5155 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
   5156 	    in->in_ni.ni_chan, 1, 1)) != 0) {
   5157 		DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
   5158 		return error;
   5159 	}
   5160 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5161 
   5162 	if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
   5163 		DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
   5164 		return error;
   5165 	}
   5166 
   5167 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
   5168 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5169 		return error;
   5170 	}
   5171 
   5172 	/* a bit superfluous? */
   5173 	while (sc->sc_auth_prot)
   5174 		tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
   5175 	sc->sc_auth_prot = 1;
   5176 
   5177 	duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
   5178 	    200 + in->in_ni.ni_intval);
   5179 	min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
   5180 	    100 + in->in_ni.ni_intval);
   5181 	iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
   5182 
   5183 	while (sc->sc_auth_prot != 2) {
   5184 		/*
   5185 		 * well, meh, but if the kernel is sleeping for half a
   5186 		 * second, we have bigger problems
   5187 		 */
   5188 		if (sc->sc_auth_prot == 0) {
   5189 			DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
   5190 			return ETIMEDOUT;
   5191 		} else if (sc->sc_auth_prot == -1) {
   5192 			DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
   5193 			sc->sc_auth_prot = 0;
   5194 			return EAUTH;
   5195 		}
   5196 		tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
   5197 	}
   5198 
   5199 	return 0;
   5200 }
   5201 
   5202 static int
   5203 iwm_assoc(struct iwm_softc *sc)
   5204 {
   5205 	struct ieee80211com *ic = &sc->sc_ic;
   5206 	struct iwm_node *in = (void *)ic->ic_bss;
   5207 	int error;
   5208 
   5209 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
   5210 		DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
   5211 		return error;
   5212 	}
   5213 
   5214 	in->in_assoc = 1;
   5215 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5216 		DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
   5217 		return error;
   5218 	}
   5219 
   5220 	return 0;
   5221 }
   5222 
   5223 static int
   5224 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
   5225 {
   5226 	/*
   5227 	 * Ok, so *technically* the proper set of calls for going
   5228 	 * from RUN back to SCAN is:
   5229 	 *
   5230 	 * iwm_mvm_power_mac_disable(sc, in);
   5231 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5232 	 * iwm_mvm_rm_sta(sc, in);
   5233 	 * iwm_mvm_update_quotas(sc, NULL);
   5234 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5235 	 * iwm_mvm_binding_remove_vif(sc, in);
   5236 	 * iwm_mvm_mac_ctxt_remove(sc, in);
   5237 	 *
   5238 	 * However, that freezes the device not matter which permutations
   5239 	 * and modifications are attempted.  Obviously, this driver is missing
   5240 	 * something since it works in the Linux driver, but figuring out what
   5241 	 * is missing is a little more complicated.  Now, since we're going
   5242 	 * back to nothing anyway, we'll just do a complete device reset.
   5243 	 * Up your's, device!
   5244 	 */
   5245 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
   5246 	iwm_stop_device(sc);
   5247 	iwm_init_hw(sc);
   5248 	if (in)
   5249 		in->in_assoc = 0;
   5250 	return 0;
   5251 
   5252 #if 0
   5253 	int error;
   5254 
   5255 	iwm_mvm_power_mac_disable(sc, in);
   5256 
   5257 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5258 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
   5259 		    error);
   5260 		return error;
   5261 	}
   5262 
   5263 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
   5264 		aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
   5265 		return error;
   5266 	}
   5267 	error = iwm_mvm_rm_sta(sc, in);
   5268 	in->in_assoc = 0;
   5269 	iwm_mvm_update_quotas(sc, NULL);
   5270 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5271 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
   5272 		    error);
   5273 		return error;
   5274 	}
   5275 	iwm_mvm_binding_remove_vif(sc, in);
   5276 
   5277 	iwm_mvm_mac_ctxt_remove(sc, in);
   5278 
   5279 	return error;
   5280 #endif
   5281 }
   5282 
   5283 
   5284 static struct ieee80211_node *
   5285 iwm_node_alloc(struct ieee80211_node_table *nt)
   5286 {
   5287 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   5288 }
   5289 
   5290 static void
   5291 iwm_calib_timeout(void *arg)
   5292 {
   5293 	struct iwm_softc *sc = arg;
   5294 	struct ieee80211com *ic = &sc->sc_ic;
   5295 	int s;
   5296 
   5297 	s = splnet();
   5298 	if (ic->ic_fixed_rate == -1
   5299 	    && ic->ic_opmode == IEEE80211_M_STA
   5300 	    && ic->ic_bss) {
   5301 		struct iwm_node *in = (void *)ic->ic_bss;
   5302 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5303 	}
   5304 	splx(s);
   5305 
   5306 	callout_schedule(&sc->sc_calib_to, hz/2);
   5307 }
   5308 
   5309 static void
   5310 iwm_setrates(struct iwm_node *in)
   5311 {
   5312 	struct ieee80211_node *ni = &in->in_ni;
   5313 	struct ieee80211com *ic = ni->ni_ic;
   5314 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5315 	struct iwm_lq_cmd *lq = &in->in_lq;
   5316 	int nrates = ni->ni_rates.rs_nrates;
   5317 	int i, ridx, tab = 0;
   5318 	int txant = 0;
   5319 
   5320 	if (nrates > __arraycount(lq->rs_table)) {
   5321 		DPRINTF(("%s: node supports %d rates, driver handles only "
   5322 		    "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
   5323 		return;
   5324 	}
   5325 
   5326 	/* first figure out which rates we should support */
   5327 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
   5328 	for (i = 0; i < nrates; i++) {
   5329 		int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
   5330 
   5331 		/* Map 802.11 rate to HW rate index. */
   5332 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5333 			if (iwm_rates[ridx].rate == rate)
   5334 				break;
   5335 		if (ridx > IWM_RIDX_MAX)
   5336 			DPRINTF(("%s: WARNING: device rate for %d not found!\n",
   5337 			    DEVNAME(sc), rate));
   5338 		else
   5339 			in->in_ridx[i] = ridx;
   5340 	}
   5341 
   5342 	/* then construct a lq_cmd based on those */
   5343 	memset(lq, 0, sizeof(*lq));
   5344 	lq->sta_id = IWM_STATION_ID;
   5345 
   5346 	/*
   5347 	 * are these used? (we don't do SISO or MIMO)
   5348 	 * need to set them to non-zero, though, or we get an error.
   5349 	 */
   5350 	lq->single_stream_ant_msk = 1;
   5351 	lq->dual_stream_ant_msk = 1;
   5352 
   5353 	/*
   5354 	 * Build the actual rate selection table.
   5355 	 * The lowest bits are the rates.  Additionally,
   5356 	 * CCK needs bit 9 to be set.  The rest of the bits
   5357 	 * we add to the table select the tx antenna
   5358 	 * Note that we add the rates in the highest rate first
   5359 	 * (opposite of ni_rates).
   5360 	 */
   5361 	for (i = 0; i < nrates; i++) {
   5362 		int nextant;
   5363 
   5364 		if (txant == 0)
   5365 			txant = IWM_FW_VALID_TX_ANT(sc);
   5366 		nextant = 1<<(ffs(txant)-1);
   5367 		txant &= ~nextant;
   5368 
   5369 		ridx = in->in_ridx[(nrates-1)-i];
   5370 		tab = iwm_rates[ridx].plcp;
   5371 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
   5372 		if (IWM_RIDX_IS_CCK(ridx))
   5373 			tab |= IWM_RATE_MCS_CCK_MSK;
   5374 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5375 		lq->rs_table[i] = htole32(tab);
   5376 	}
   5377 	/* then fill the rest with the lowest possible rate */
   5378 	for (i = nrates; i < __arraycount(lq->rs_table); i++) {
   5379 		KASSERT(tab != 0);
   5380 		lq->rs_table[i] = htole32(tab);
   5381 	}
   5382 
   5383 	/* init amrr */
   5384 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5385 	ni->ni_txrate = nrates-1;
   5386 }
   5387 
   5388 static int
   5389 iwm_media_change(struct ifnet *ifp)
   5390 {
   5391 	struct iwm_softc *sc = ifp->if_softc;
   5392 	struct ieee80211com *ic = &sc->sc_ic;
   5393 	uint8_t rate, ridx;
   5394 	int error;
   5395 
   5396 	error = ieee80211_media_change(ifp);
   5397 	if (error != ENETRESET)
   5398 		return error;
   5399 
   5400 	if (ic->ic_fixed_rate != -1) {
   5401 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5402 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5403 		/* Map 802.11 rate to HW rate index. */
   5404 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5405 			if (iwm_rates[ridx].rate == rate)
   5406 				break;
   5407 		sc->sc_fixed_ridx = ridx;
   5408 	}
   5409 
   5410 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5411 	    (IFF_UP | IFF_RUNNING)) {
   5412 		iwm_stop(ifp, 0);
   5413 		error = iwm_init(ifp);
   5414 	}
   5415 	return error;
   5416 }
   5417 
   5418 static void
   5419 iwm_newstate_cb(struct work *wk, void *v)
   5420 {
   5421 	struct iwm_softc *sc = v;
   5422 	struct ieee80211com *ic = &sc->sc_ic;
   5423 	struct iwm_newstate_state *iwmns = (void *)wk;
   5424 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5425 	int generation = iwmns->ns_generation;
   5426 	struct iwm_node *in;
   5427 	int arg = iwmns->ns_arg;
   5428 	int error;
   5429 
   5430 	kmem_free(iwmns, sizeof(*iwmns));
   5431 
   5432 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
   5433 	if (sc->sc_generation != generation) {
   5434 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5435 		if (nstate == IEEE80211_S_INIT) {
   5436 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5437 			sc->sc_newstate(ic, nstate, arg);
   5438 		}
   5439 		return;
   5440 	}
   5441 
   5442 	DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
   5443 
   5444 	/* disable beacon filtering if we're hopping out of RUN */
   5445 	if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
   5446 		iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
   5447 
   5448 		if (((in = (void *)ic->ic_bss) != NULL))
   5449 			in->in_assoc = 0;
   5450 		iwm_release(sc, NULL);
   5451 
   5452 		/*
   5453 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
   5454 		 * above then the card will be completely reinitialized,
   5455 		 * so the driver must do everything necessary to bring the card
   5456 		 * from INIT to SCAN.
   5457 		 *
   5458 		 * Additionally, upon receiving deauth frame from AP,
   5459 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
   5460 		 * state. This will also fail with this driver, so bring the FSM
   5461 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
   5462 		 */
   5463 		if (nstate == IEEE80211_S_SCAN ||
   5464 		    nstate == IEEE80211_S_AUTH ||
   5465 		    nstate == IEEE80211_S_ASSOC) {
   5466 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5467 			sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
   5468 			DPRINTF(("Going INIT->SCAN\n"));
   5469 			nstate = IEEE80211_S_SCAN;
   5470 		}
   5471 	}
   5472 
   5473 	switch (nstate) {
   5474 	case IEEE80211_S_INIT:
   5475 		sc->sc_scanband = 0;
   5476 		break;
   5477 
   5478 	case IEEE80211_S_SCAN:
   5479 		if (sc->sc_scanband)
   5480 			break;
   5481 
   5482 		if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
   5483 		    ic->ic_des_esslen != 0,
   5484 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5485                         DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5486 			return;
   5487 		}
   5488 		ic->ic_state = nstate;
   5489 		return;
   5490 
   5491 	case IEEE80211_S_AUTH:
   5492 		if ((error = iwm_auth(sc)) != 0) {
   5493 			DPRINTF(("%s: could not move to auth state: %d\n",
   5494 			    DEVNAME(sc), error));
   5495 			return;
   5496 		}
   5497 
   5498 		break;
   5499 
   5500 	case IEEE80211_S_ASSOC:
   5501 		if ((error = iwm_assoc(sc)) != 0) {
   5502 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5503 			    error));
   5504 			return;
   5505 		}
   5506 		break;
   5507 
   5508 	case IEEE80211_S_RUN: {
   5509 		struct iwm_host_cmd cmd = {
   5510 			.id = IWM_LQ_CMD,
   5511 			.len = { sizeof(in->in_lq), },
   5512 			.flags = IWM_CMD_SYNC,
   5513 		};
   5514 
   5515 		in = (struct iwm_node *)ic->ic_bss;
   5516 		iwm_mvm_power_mac_update_mode(sc, in);
   5517 		iwm_mvm_enable_beacon_filter(sc, in);
   5518 		iwm_mvm_update_quotas(sc, in);
   5519 		iwm_setrates(in);
   5520 
   5521 		cmd.data[0] = &in->in_lq;
   5522 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
   5523 			DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
   5524 		}
   5525 
   5526 		callout_schedule(&sc->sc_calib_to, hz/2);
   5527 
   5528 		break; }
   5529 
   5530 	default:
   5531 		DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
   5532 		break;
   5533 	}
   5534 
   5535 	sc->sc_newstate(ic, nstate, arg);
   5536 }
   5537 
   5538 static int
   5539 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5540 {
   5541 	struct iwm_newstate_state *iwmns;
   5542 	struct ifnet *ifp = IC2IFP(ic);
   5543 	struct iwm_softc *sc = ifp->if_softc;
   5544 
   5545 	callout_stop(&sc->sc_calib_to);
   5546 
   5547 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5548 	if (!iwmns) {
   5549 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5550 		return ENOMEM;
   5551 	}
   5552 
   5553 	iwmns->ns_nstate = nstate;
   5554 	iwmns->ns_arg = arg;
   5555 	iwmns->ns_generation = sc->sc_generation;
   5556 
   5557 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5558 
   5559 	return 0;
   5560 }
   5561 
   5562 static void
   5563 iwm_endscan_cb(struct work *work __unused, void *arg)
   5564 {
   5565 	struct iwm_softc *sc = arg;
   5566 	struct ieee80211com *ic = &sc->sc_ic;
   5567 	int done;
   5568 
   5569 	DPRINTF(("scan ended\n"));
   5570 
   5571 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ) {
   5572 #ifndef IWM_NO_5GHZ
   5573 		int error;
   5574 		done = 0;
   5575 		if ((error = iwm_mvm_scan_request(sc,
   5576 		    IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
   5577 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5578 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5579 			done = 1;
   5580 		}
   5581 #else
   5582 		done = 1;
   5583 #endif
   5584 	} else {
   5585 		done = 1;
   5586 	}
   5587 
   5588 	if (done) {
   5589 		if (!sc->sc_scanband) {
   5590 			ieee80211_cancel_scan(ic);
   5591 		} else {
   5592 			ieee80211_end_scan(ic);
   5593 		}
   5594 		sc->sc_scanband = 0;
   5595 	}
   5596 }
   5597 
   5598 static int
   5599 iwm_init_hw(struct iwm_softc *sc)
   5600 {
   5601 	struct ieee80211com *ic = &sc->sc_ic;
   5602 	int error, i, qid;
   5603 
   5604 	if ((error = iwm_preinit(sc)) != 0)
   5605 		return error;
   5606 
   5607 	if ((error = iwm_start_hw(sc)) != 0)
   5608 		return error;
   5609 
   5610 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
   5611 		return error;
   5612 	}
   5613 
   5614 	/*
   5615 	 * should stop and start HW since that INIT
   5616 	 * image just loaded
   5617 	 */
   5618 	iwm_stop_device(sc);
   5619 	if ((error = iwm_start_hw(sc)) != 0) {
   5620 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   5621 		return error;
   5622 	}
   5623 
   5624 	/* omstart, this time with the regular firmware */
   5625 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   5626 	if (error) {
   5627 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   5628 		goto error;
   5629 	}
   5630 
   5631         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   5632                 goto error;
   5633 
   5634         /* Send phy db control command and then phy db calibration*/
   5635         if ((error = iwm_send_phy_db_data(sc)) != 0)
   5636                 goto error;
   5637 
   5638         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
   5639                 goto error;
   5640 
   5641 	/* Add auxiliary station for scanning */
   5642 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
   5643 		goto error;
   5644 
   5645 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   5646 		/*
   5647 		 * The channel used here isn't relevant as it's
   5648 		 * going to be overwritten in the other flows.
   5649 		 * For now use the first channel we have.
   5650 		 */
   5651 		if ((error = iwm_mvm_phy_ctxt_add(sc,
   5652 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
   5653 			goto error;
   5654 	}
   5655 
   5656         error = iwm_mvm_power_update_device(sc);
   5657         if (error)
   5658                 goto error;
   5659 
   5660 	/* Mark TX rings as active. */
   5661 	for (qid = 0; qid < 4; qid++) {
   5662 		iwm_enable_txq(sc, qid, qid);
   5663 	}
   5664 
   5665 	return 0;
   5666 
   5667  error:
   5668 	iwm_stop_device(sc);
   5669 	return error;
   5670 }
   5671 
   5672 /*
   5673  * ifnet interfaces
   5674  */
   5675 
   5676 static int
   5677 iwm_init(struct ifnet *ifp)
   5678 {
   5679 	struct iwm_softc *sc = ifp->if_softc;
   5680 	int error;
   5681 
   5682 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
   5683 		return 0;
   5684 	}
   5685 	sc->sc_generation++;
   5686 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   5687 
   5688 	if ((error = iwm_init_hw(sc)) != 0) {
   5689 		iwm_stop(ifp, 1);
   5690 		return error;
   5691 	}
   5692 
   5693 	/*
   5694  	 * Ok, firmware loaded and we are jogging
   5695 	 */
   5696 
   5697 	ifp->if_flags &= ~IFF_OACTIVE;
   5698 	ifp->if_flags |= IFF_RUNNING;
   5699 
   5700 	ieee80211_begin_scan(&sc->sc_ic, 0);
   5701 	sc->sc_flags |= IWM_FLAG_HW_INITED;
   5702 
   5703 	return 0;
   5704 }
   5705 
   5706 /*
   5707  * Dequeue packets from sendq and call send.
   5708  * mostly from iwn
   5709  */
   5710 static void
   5711 iwm_start(struct ifnet *ifp)
   5712 {
   5713 	struct iwm_softc *sc = ifp->if_softc;
   5714 	struct ieee80211com *ic = &sc->sc_ic;
   5715 	struct ieee80211_node *ni;
   5716         struct ether_header *eh;
   5717 	struct mbuf *m;
   5718 	int ac;
   5719 
   5720 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   5721 		return;
   5722 
   5723 	for (;;) {
   5724 		/* why isn't this done per-queue? */
   5725 		if (sc->qfullmsk != 0) {
   5726 			ifp->if_flags |= IFF_OACTIVE;
   5727 			break;
   5728 		}
   5729 
   5730 		/* need to send management frames even if we're not RUNning */
   5731 		IF_DEQUEUE(&ic->ic_mgtq, m);
   5732 		if (m) {
   5733 			ni = (void *)m->m_pkthdr.rcvif;
   5734 			ac = 0;
   5735 			goto sendit;
   5736 		}
   5737 		if (ic->ic_state != IEEE80211_S_RUN) {
   5738 			break;
   5739 		}
   5740 
   5741 		IFQ_DEQUEUE(&ifp->if_snd, m);
   5742 		if (!m)
   5743 			break;
   5744                 if (m->m_len < sizeof (*eh) &&
   5745                     (m = m_pullup(m, sizeof (*eh))) == NULL) {
   5746                         ifp->if_oerrors++;
   5747                         continue;
   5748                 }
   5749 		if (ifp->if_bpf != NULL)
   5750 			bpf_mtap(ifp, m);
   5751 
   5752 		eh = mtod(m, struct ether_header *);
   5753 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   5754 		if (ni == NULL) {
   5755 			m_freem(m);
   5756 			ifp->if_oerrors++;
   5757 			continue;
   5758 		}
   5759 		/* classify mbuf so we can find which tx ring to use */
   5760 		if (ieee80211_classify(ic, m, ni) != 0) {
   5761 			m_freem(m);
   5762 			ieee80211_free_node(ni);
   5763 			ifp->if_oerrors++;
   5764 			continue;
   5765 		}
   5766 
   5767 		/* No QoS encapsulation for EAPOL frames. */
   5768 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   5769 		    M_WME_GETAC(m) : WME_AC_BE;
   5770 
   5771 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   5772 			ieee80211_free_node(ni);
   5773 			ifp->if_oerrors++;
   5774 			continue;
   5775 		}
   5776 
   5777  sendit:
   5778 		if (ic->ic_rawbpf != NULL)
   5779 			bpf_mtap3(ic->ic_rawbpf, m);
   5780 		if (iwm_tx(sc, m, ni, ac) != 0) {
   5781 			ieee80211_free_node(ni);
   5782 			ifp->if_oerrors++;
   5783 			continue;
   5784 		}
   5785 
   5786 		if (ifp->if_flags & IFF_UP) {
   5787 			sc->sc_tx_timer = 15;
   5788 			ifp->if_timer = 1;
   5789 		}
   5790 	}
   5791 
   5792 	return;
   5793 }
   5794 
   5795 static void
   5796 iwm_stop(struct ifnet *ifp, int disable)
   5797 {
   5798 	struct iwm_softc *sc = ifp->if_softc;
   5799 	struct ieee80211com *ic = &sc->sc_ic;
   5800 
   5801 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   5802 	sc->sc_flags |= IWM_FLAG_STOPPED;
   5803 	sc->sc_generation++;
   5804 	sc->sc_scanband = 0;
   5805 	sc->sc_auth_prot = 0;
   5806 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5807 
   5808 	if (ic->ic_state != IEEE80211_S_INIT)
   5809 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   5810 
   5811 	ifp->if_timer = sc->sc_tx_timer = 0;
   5812 	iwm_stop_device(sc);
   5813 }
   5814 
   5815 static void
   5816 iwm_watchdog(struct ifnet *ifp)
   5817 {
   5818 	struct iwm_softc *sc = ifp->if_softc;
   5819 
   5820 	ifp->if_timer = 0;
   5821 	if (sc->sc_tx_timer > 0) {
   5822 		if (--sc->sc_tx_timer == 0) {
   5823 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   5824 #ifdef IWM_DEBUG
   5825 			iwm_nic_error(sc);
   5826 #endif
   5827 			ifp->if_flags &= ~IFF_UP;
   5828 			iwm_stop(ifp, 1);
   5829 			ifp->if_oerrors++;
   5830 			return;
   5831 		}
   5832 		ifp->if_timer = 1;
   5833 	}
   5834 
   5835 	ieee80211_watchdog(&sc->sc_ic);
   5836 }
   5837 
   5838 static int
   5839 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   5840 {
   5841 	struct iwm_softc *sc = ifp->if_softc;
   5842 	struct ieee80211com *ic = &sc->sc_ic;
   5843 	const struct sockaddr *sa;
   5844 	int s, error = 0;
   5845 
   5846 	s = splnet();
   5847 
   5848 	switch (cmd) {
   5849 	case SIOCSIFADDR:
   5850 		ifp->if_flags |= IFF_UP;
   5851 		/* FALLTHROUGH */
   5852 	case SIOCSIFFLAGS:
   5853 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
   5854 			break;
   5855 		if (ifp->if_flags & IFF_UP) {
   5856 			if (!(ifp->if_flags & IFF_RUNNING)) {
   5857 				if ((error = iwm_init(ifp)) != 0)
   5858 					ifp->if_flags &= ~IFF_UP;
   5859 			}
   5860 		} else {
   5861 			if (ifp->if_flags & IFF_RUNNING)
   5862 				iwm_stop(ifp, 1);
   5863 		}
   5864 		break;
   5865 
   5866 	case SIOCADDMULTI:
   5867 	case SIOCDELMULTI:
   5868 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   5869 		error = (cmd == SIOCADDMULTI) ?
   5870 		    ether_addmulti(sa, &sc->sc_ec) :
   5871 		    ether_delmulti(sa, &sc->sc_ec);
   5872 
   5873 		if (error == ENETRESET)
   5874 			error = 0;
   5875 		break;
   5876 
   5877 	default:
   5878 		error = ieee80211_ioctl(ic, cmd, data);
   5879 	}
   5880 
   5881 	if (error == ENETRESET) {
   5882 		error = 0;
   5883 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5884 		    (IFF_UP | IFF_RUNNING)) {
   5885 			iwm_stop(ifp, 0);
   5886 			error = iwm_init(ifp);
   5887 		}
   5888 	}
   5889 
   5890 	splx(s);
   5891 	return error;
   5892 }
   5893 
   5894 /*
   5895  * The interrupt side of things
   5896  */
   5897 
   5898 /*
   5899  * error dumping routines are from iwlwifi/mvm/utils.c
   5900  */
   5901 
   5902 /*
   5903  * Note: This structure is read from the device with IO accesses,
   5904  * and the reading already does the endian conversion. As it is
   5905  * read with uint32_t-sized accesses, any members with a different size
   5906  * need to be ordered correctly though!
   5907  */
   5908 struct iwm_error_event_table {
   5909 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   5910 	uint32_t error_id;		/* type of error */
   5911 	uint32_t pc;			/* program counter */
   5912 	uint32_t blink1;		/* branch link */
   5913 	uint32_t blink2;		/* branch link */
   5914 	uint32_t ilink1;		/* interrupt link */
   5915 	uint32_t ilink2;		/* interrupt link */
   5916 	uint32_t data1;		/* error-specific data */
   5917 	uint32_t data2;		/* error-specific data */
   5918 	uint32_t data3;		/* error-specific data */
   5919 	uint32_t bcon_time;		/* beacon timer */
   5920 	uint32_t tsf_low;		/* network timestamp function timer */
   5921 	uint32_t tsf_hi;		/* network timestamp function timer */
   5922 	uint32_t gp1;		/* GP1 timer register */
   5923 	uint32_t gp2;		/* GP2 timer register */
   5924 	uint32_t gp3;		/* GP3 timer register */
   5925 	uint32_t ucode_ver;		/* uCode version */
   5926 	uint32_t hw_ver;		/* HW Silicon version */
   5927 	uint32_t brd_ver;		/* HW board version */
   5928 	uint32_t log_pc;		/* log program counter */
   5929 	uint32_t frame_ptr;		/* frame pointer */
   5930 	uint32_t stack_ptr;		/* stack pointer */
   5931 	uint32_t hcmd;		/* last host command header */
   5932 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   5933 				 * rxtx_flag */
   5934 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   5935 				 * host_flag */
   5936 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   5937 				 * enc_flag */
   5938 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   5939 				 * time_flag */
   5940 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   5941 				 * wico interrupt */
   5942 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
   5943 	uint32_t wait_event;		/* wait event() caller address */
   5944 	uint32_t l2p_control;	/* L2pControlField */
   5945 	uint32_t l2p_duration;	/* L2pDurationField */
   5946 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   5947 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   5948 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   5949 				 * (LMPM_PMG_SEL) */
   5950 	uint32_t u_timestamp;	/* indicate when the date and time of the
   5951 				 * compilation */
   5952 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   5953 } __packed;
   5954 
   5955 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   5956 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   5957 
   5958 #ifdef IWM_DEBUG
   5959 static const struct {
   5960 	const char *name;
   5961 	uint8_t num;
   5962 } advanced_lookup[] = {
   5963 	{ "NMI_INTERRUPT_WDG", 0x34 },
   5964 	{ "SYSASSERT", 0x35 },
   5965 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   5966 	{ "BAD_COMMAND", 0x38 },
   5967 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   5968 	{ "FATAL_ERROR", 0x3D },
   5969 	{ "NMI_TRM_HW_ERR", 0x46 },
   5970 	{ "NMI_INTERRUPT_TRM", 0x4C },
   5971 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   5972 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   5973 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   5974 	{ "NMI_INTERRUPT_HOST", 0x66 },
   5975 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   5976 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   5977 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   5978 	{ "ADVANCED_SYSASSERT", 0 },
   5979 };
   5980 
   5981 static const char *
   5982 iwm_desc_lookup(uint32_t num)
   5983 {
   5984 	int i;
   5985 
   5986 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   5987 		if (advanced_lookup[i].num == num)
   5988 			return advanced_lookup[i].name;
   5989 
   5990 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   5991 	return advanced_lookup[i].name;
   5992 }
   5993 
   5994 /*
   5995  * Support for dumping the error log seemed like a good idea ...
   5996  * but it's mostly hex junk and the only sensible thing is the
   5997  * hw/ucode revision (which we know anyway).  Since it's here,
   5998  * I'll just leave it in, just in case e.g. the Intel guys want to
   5999  * help us decipher some "ADVANCED_SYSASSERT" later.
   6000  */
   6001 static void
   6002 iwm_nic_error(struct iwm_softc *sc)
   6003 {
   6004 	struct iwm_error_event_table table;
   6005 	uint32_t base;
   6006 
   6007 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   6008 	base = sc->sc_uc.uc_error_event_table;
   6009 	if (base < 0x800000 || base >= 0x80C000) {
   6010 		aprint_error_dev(sc->sc_dev,
   6011 		    "Not valid error log pointer 0x%08x\n", base);
   6012 		return;
   6013 	}
   6014 
   6015 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
   6016 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6017 		return;
   6018 	}
   6019 
   6020 	if (!table.valid) {
   6021 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   6022 		return;
   6023 	}
   6024 
   6025 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
   6026 		aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
   6027 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6028 		    sc->sc_flags, table.valid);
   6029 	}
   6030 
   6031 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
   6032 		iwm_desc_lookup(table.error_id));
   6033 	aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
   6034 	aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
   6035 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
   6036 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
   6037 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
   6038 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
   6039 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
   6040 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
   6041 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
   6042 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
   6043 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
   6044 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
   6045 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
   6046 	aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
   6047 	aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
   6048 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
   6049 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
   6050 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
   6051 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
   6052 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
   6053 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
   6054 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
   6055 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
   6056 	aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
   6057 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
   6058 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
   6059 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
   6060 	    table.l2p_duration);
   6061 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
   6062 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6063 	    table.l2p_addr_match);
   6064 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
   6065 	    table.lmpm_pmg_sel);
   6066 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
   6067 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
   6068 	    table.flow_handler);
   6069 }
   6070 #endif
   6071 
   6072 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6073 do {									\
   6074 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6075 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6076 	_var_ = (void *)((_pkt_)+1);					\
   6077 } while (/*CONSTCOND*/0)
   6078 
   6079 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6080 do {									\
   6081 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6082 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6083 	_ptr_ = (void *)((_pkt_)+1);					\
   6084 } while (/*CONSTCOND*/0)
   6085 
   6086 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6087 
   6088 /*
   6089  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
   6090  * Basic structure from if_iwn
   6091  */
   6092 static void
   6093 iwm_notif_intr(struct iwm_softc *sc)
   6094 {
   6095 	uint16_t hw;
   6096 
   6097 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6098 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6099 
   6100 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6101 	while (sc->rxq.cur != hw) {
   6102 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6103 		struct iwm_rx_packet *pkt, tmppkt;
   6104 		struct iwm_cmd_response *cresp;
   6105 		int qid, idx;
   6106 
   6107 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6108 		    BUS_DMASYNC_POSTREAD);
   6109 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6110 
   6111 		qid = pkt->hdr.qid & ~0x80;
   6112 		idx = pkt->hdr.idx;
   6113 
   6114 		DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
   6115 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
   6116 		    pkt->hdr.code, sc->rxq.cur, hw));
   6117 
   6118 		/*
   6119 		 * randomly get these from the firmware, no idea why.
   6120 		 * they at least seem harmless, so just ignore them for now
   6121 		 */
   6122 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6123 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6124 			ADVANCE_RXQ(sc);
   6125 			continue;
   6126 		}
   6127 
   6128 		switch (pkt->hdr.code) {
   6129 		case IWM_REPLY_RX_PHY_CMD:
   6130 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
   6131 			break;
   6132 
   6133 		case IWM_REPLY_RX_MPDU_CMD:
   6134 			tmppkt = *pkt; // XXX m is freed by ieee80211_input()
   6135 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
   6136 			pkt = &tmppkt;
   6137 			break;
   6138 
   6139 		case IWM_TX_CMD:
   6140 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
   6141 			break;
   6142 
   6143 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6144 			iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
   6145 			break;
   6146 
   6147 		case IWM_MVM_ALIVE: {
   6148 			struct iwm_mvm_alive_resp *resp;
   6149 			SYNC_RESP_STRUCT(resp, pkt);
   6150 
   6151 			sc->sc_uc.uc_error_event_table
   6152 			    = le32toh(resp->error_event_table_ptr);
   6153 			sc->sc_uc.uc_log_event_table
   6154 			    = le32toh(resp->log_event_table_ptr);
   6155 			sc->sched_base = le32toh(resp->scd_base_ptr);
   6156 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
   6157 
   6158 			sc->sc_uc.uc_intr = 1;
   6159 			wakeup(&sc->sc_uc);
   6160 			break; }
   6161 
   6162 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6163 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6164 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6165 
   6166 			uint16_t size = le16toh(phy_db_notif->length);
   6167 			bus_dmamap_sync(sc->sc_dmat, data->map,
   6168 			    sizeof(*pkt) + sizeof(*phy_db_notif),
   6169 			    size, BUS_DMASYNC_POSTREAD);
   6170 			iwm_phy_db_set_section(sc, phy_db_notif, size);
   6171 
   6172 			break; }
   6173 
   6174 		case IWM_STATISTICS_NOTIFICATION: {
   6175 			struct iwm_notif_statistics *stats;
   6176 			SYNC_RESP_STRUCT(stats, pkt);
   6177 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6178 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6179 			break; }
   6180 
   6181 		case IWM_NVM_ACCESS_CMD:
   6182 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6183 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6184 				    sizeof(sc->sc_cmd_resp),
   6185 				    BUS_DMASYNC_POSTREAD);
   6186 				memcpy(sc->sc_cmd_resp,
   6187 				    pkt, sizeof(sc->sc_cmd_resp));
   6188 			}
   6189 			break;
   6190 
   6191 		case IWM_PHY_CONFIGURATION_CMD:
   6192 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6193 		case IWM_ADD_STA:
   6194 		case IWM_MAC_CONTEXT_CMD:
   6195 		case IWM_REPLY_SF_CFG_CMD:
   6196 		case IWM_POWER_TABLE_CMD:
   6197 		case IWM_PHY_CONTEXT_CMD:
   6198 		case IWM_BINDING_CONTEXT_CMD:
   6199 		case IWM_TIME_EVENT_CMD:
   6200 		case IWM_SCAN_REQUEST_CMD:
   6201 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6202 		case IWM_MAC_PM_POWER_TABLE:
   6203 		case IWM_TIME_QUOTA_CMD:
   6204 		case IWM_REMOVE_STA:
   6205 		case IWM_TXPATH_FLUSH:
   6206 		case IWM_LQ_CMD:
   6207 			SYNC_RESP_STRUCT(cresp, pkt);
   6208 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6209 				memcpy(sc->sc_cmd_resp,
   6210 				    pkt, sizeof(*pkt)+sizeof(*cresp));
   6211 			}
   6212 			break;
   6213 
   6214 		/* ignore */
   6215 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
   6216 			break;
   6217 
   6218 		case IWM_INIT_COMPLETE_NOTIF:
   6219 			sc->sc_init_complete = 1;
   6220 			wakeup(&sc->sc_init_complete);
   6221 			break;
   6222 
   6223 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
   6224 			struct iwm_scan_complete_notif *notif;
   6225 			SYNC_RESP_STRUCT(notif, pkt);
   6226 
   6227 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6228 			break; }
   6229 
   6230 		case IWM_REPLY_ERROR: {
   6231 			struct iwm_error_resp *resp;
   6232 			SYNC_RESP_STRUCT(resp, pkt);
   6233 
   6234 			aprint_error_dev(sc->sc_dev,
   6235 			    "firmware error 0x%x, cmd 0x%x\n",
   6236 			    le32toh(resp->error_type), resp->cmd_id);
   6237 			break; }
   6238 
   6239 		case IWM_TIME_EVENT_NOTIFICATION: {
   6240 			struct iwm_time_event_notif *notif;
   6241 			SYNC_RESP_STRUCT(notif, pkt);
   6242 
   6243 			if (notif->status) {
   6244 				if (le32toh(notif->action) &
   6245 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
   6246 					sc->sc_auth_prot = 2;
   6247 				else
   6248 					sc->sc_auth_prot = 0;
   6249 			} else {
   6250 				sc->sc_auth_prot = -1;
   6251 			}
   6252 			wakeup(&sc->sc_auth_prot);
   6253 			break; }
   6254 
   6255 		default:
   6256 			aprint_error_dev(sc->sc_dev,
   6257 			    "frame %d/%d %x UNHANDLED (this should "
   6258 			    "not happen)\n", qid, idx, pkt->len_n_flags);
   6259 			break;
   6260 		}
   6261 
   6262 		/*
   6263 		 * Why test bit 0x80?  The Linux driver:
   6264 		 *
   6265 		 * There is one exception:  uCode sets bit 15 when it
   6266 		 * originates the response/notification, i.e. when the
   6267 		 * response/notification is not a direct response to a
   6268 		 * command sent by the driver.  For example, uCode issues
   6269 		 * IWM_REPLY_RX when it sends a received frame to the driver;
   6270 		 * it is not a direct response to any driver command.
   6271 		 *
   6272 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
   6273 		 * uses a slightly different format for pkt->hdr, and "qid"
   6274 		 * is actually the upper byte of a two-byte field.
   6275 		 */
   6276 		if (!(pkt->hdr.qid & (1 << 7))) {
   6277 			iwm_cmd_done(sc, pkt);
   6278 		}
   6279 
   6280 		ADVANCE_RXQ(sc);
   6281 	}
   6282 
   6283 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   6284 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   6285 
   6286 	/*
   6287 	 * Tell the firmware what we have processed.
   6288 	 * Seems like the hardware gets upset unless we align
   6289 	 * the write by 8??
   6290 	 */
   6291 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   6292 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   6293 }
   6294 
   6295 static int
   6296 iwm_intr(void *arg)
   6297 {
   6298 	struct iwm_softc *sc = arg;
   6299 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6300 	int handled = 0;
   6301 	int r1, r2, rv = 0;
   6302 	int isperiodic = 0;
   6303 
   6304 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   6305 
   6306 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   6307 		uint32_t *ict = sc->ict_dma.vaddr;
   6308 		int tmp;
   6309 
   6310 		tmp = htole32(ict[sc->ict_cur]);
   6311 		if (!tmp)
   6312 			goto out_ena;
   6313 
   6314 		/*
   6315 		 * ok, there was something.  keep plowing until we have all.
   6316 		 */
   6317 		r1 = r2 = 0;
   6318 		while (tmp) {
   6319 			r1 |= tmp;
   6320 			ict[sc->ict_cur] = 0;
   6321 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
   6322 			tmp = htole32(ict[sc->ict_cur]);
   6323 		}
   6324 
   6325 		/* this is where the fun begins.  don't ask */
   6326 		if (r1 == 0xffffffff)
   6327 			r1 = 0;
   6328 
   6329 		/* i am not expected to understand this */
   6330 		if (r1 & 0xc0000)
   6331 			r1 |= 0x8000;
   6332 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   6333 	} else {
   6334 		r1 = IWM_READ(sc, IWM_CSR_INT);
   6335 		/* "hardware gone" (where, fishing?) */
   6336 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   6337 			goto out;
   6338 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   6339 	}
   6340 	if (r1 == 0 && r2 == 0) {
   6341 		goto out_ena;
   6342 	}
   6343 
   6344 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   6345 
   6346 	/* ignored */
   6347 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
   6348 
   6349 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   6350 #ifdef IWM_DEBUG
   6351 		int i;
   6352 
   6353 		iwm_nic_error(sc);
   6354 
   6355 		/* Dump driver status (TX and RX rings) while we're here. */
   6356 		DPRINTF(("driver status:\n"));
   6357 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
   6358 			struct iwm_tx_ring *ring = &sc->txq[i];
   6359 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   6360 			    "queued=%-3d\n",
   6361 			    i, ring->qid, ring->cur, ring->queued));
   6362 		}
   6363 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   6364 		DPRINTF(("  802.11 state %d\n", sc->sc_ic.ic_state));
   6365 #endif
   6366 
   6367 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   6368 		ifp->if_flags &= ~IFF_UP;
   6369 		iwm_stop(ifp, 1);
   6370 		rv = 1;
   6371 		goto out;
   6372 
   6373 	}
   6374 
   6375 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   6376 		handled |= IWM_CSR_INT_BIT_HW_ERR;
   6377 		aprint_error_dev(sc->sc_dev,
   6378 		    "hardware error, stopping device\n");
   6379 		ifp->if_flags &= ~IFF_UP;
   6380 		iwm_stop(ifp, 1);
   6381 		rv = 1;
   6382 		goto out;
   6383 	}
   6384 
   6385 	/* firmware chunk loaded */
   6386 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   6387 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   6388 		handled |= IWM_CSR_INT_BIT_FH_TX;
   6389 
   6390 		sc->sc_fw_chunk_done = 1;
   6391 		wakeup(&sc->sc_fw);
   6392 	}
   6393 
   6394 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   6395 		handled |= IWM_CSR_INT_BIT_RF_KILL;
   6396 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   6397 			DPRINTF(("%s: rfkill switch, disabling interface\n",
   6398 			    DEVNAME(sc)));
   6399 			ifp->if_flags &= ~IFF_UP;
   6400 			iwm_stop(ifp, 1);
   6401 		}
   6402 	}
   6403 
   6404 	/*
   6405 	 * The Linux driver uses periodic interrupts to avoid races.
   6406 	 * We cargo-cult like it's going out of fashion.
   6407 	 */
   6408 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   6409 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
   6410 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   6411 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   6412 			IWM_WRITE_1(sc,
   6413 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   6414 		isperiodic = 1;
   6415 	}
   6416 
   6417 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
   6418 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
   6419 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   6420 
   6421 		iwm_notif_intr(sc);
   6422 
   6423 		/* enable periodic interrupt, see above */
   6424 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
   6425 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   6426 			    IWM_CSR_INT_PERIODIC_ENA);
   6427 	}
   6428 
   6429 	if (__predict_false(r1 & ~handled))
   6430 		DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
   6431 	rv = 1;
   6432 
   6433  out_ena:
   6434 	iwm_restore_interrupts(sc);
   6435  out:
   6436 	return rv;
   6437 }
   6438 
   6439 /*
   6440  * Autoconf glue-sniffing
   6441  */
   6442 
   6443 static const pci_product_id_t iwm_devices[] = {
   6444 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   6445 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   6446 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   6447 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   6448 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   6449 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   6450 };
   6451 
   6452 static int
   6453 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   6454 {
   6455 	struct pci_attach_args *pa = aux;
   6456 
   6457 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   6458 		return 0;
   6459 
   6460 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   6461 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   6462 			return 1;
   6463 
   6464 	return 0;
   6465 }
   6466 
   6467 static int
   6468 iwm_preinit(struct iwm_softc *sc)
   6469 {
   6470 	int error;
   6471 
   6472 	if ((error = iwm_prepare_card_hw(sc)) != 0) {
   6473 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6474 		return error;
   6475 	}
   6476 
   6477 	if (sc->sc_flags & IWM_FLAG_ATTACHED)
   6478 		return 0;
   6479 
   6480 	if ((error = iwm_start_hw(sc)) != 0) {
   6481 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6482 		return error;
   6483 	}
   6484 
   6485 	error = iwm_run_init_mvm_ucode(sc, 1);
   6486 	iwm_stop_device(sc);
   6487 	return error;
   6488 }
   6489 
   6490 static void
   6491 iwm_attach_hook(device_t dev)
   6492 {
   6493 	struct iwm_softc *sc = device_private(dev);
   6494 	struct ieee80211com *ic = &sc->sc_ic;
   6495 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   6496 
   6497 	KASSERT(!cold);
   6498 
   6499 	if (iwm_preinit(sc) != 0)
   6500 		return;
   6501 
   6502 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   6503 
   6504 	aprint_normal_dev(sc->sc_dev,
   6505 	    "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
   6506 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
   6507 	    IWM_UCODE_MAJOR(sc->sc_fwver),
   6508 	    IWM_UCODE_MINOR(sc->sc_fwver),
   6509 	    IWM_UCODE_API(sc->sc_fwver),
   6510 	    ether_sprintf(sc->sc_nvm.hw_addr));
   6511 
   6512 	ic->ic_ifp = ifp;
   6513 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   6514 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   6515 	ic->ic_state = IEEE80211_S_INIT;
   6516 
   6517 	/* Set device capabilities. */
   6518 	ic->ic_caps =
   6519 	    IEEE80211_C_WEP |		/* WEP */
   6520 	    IEEE80211_C_WPA |		/* 802.11i */
   6521 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   6522 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   6523 
   6524 #ifndef IWM_NO_5GHZ
   6525 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   6526 #endif
   6527 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   6528 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   6529 
   6530 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   6531 		sc->sc_phyctxt[i].id = i;
   6532 	}
   6533 
   6534 	sc->sc_amrr.amrr_min_success_threshold =  1;
   6535 	sc->sc_amrr.amrr_max_success_threshold = 15;
   6536 
   6537 	/* IBSS channel undefined for now. */
   6538 	ic->ic_ibss_chan = &ic->ic_channels[1];
   6539 
   6540 #if 0
   6541 	/* Max RSSI */
   6542 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   6543 #endif
   6544 
   6545 	ifp->if_softc = sc;
   6546 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   6547 	ifp->if_init = iwm_init;
   6548 	ifp->if_stop = iwm_stop;
   6549 	ifp->if_ioctl = iwm_ioctl;
   6550 	ifp->if_start = iwm_start;
   6551 	ifp->if_watchdog = iwm_watchdog;
   6552 	IFQ_SET_READY(&ifp->if_snd);
   6553 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   6554 
   6555 	if_initialize(ifp);
   6556 	ieee80211_ifattach(ic);
   6557 	if_register(ifp);
   6558 
   6559 	ic->ic_node_alloc = iwm_node_alloc;
   6560 
   6561 	/* Override 802.11 state transition machine. */
   6562 	sc->sc_newstate = ic->ic_newstate;
   6563 	ic->ic_newstate = iwm_newstate;
   6564 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   6565 	ieee80211_announce(ic);
   6566 
   6567 	iwm_radiotap_attach(sc);
   6568 	callout_init(&sc->sc_calib_to, 0);
   6569 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   6570 
   6571 	//task_set(&sc->init_task, iwm_init_task, sc);
   6572 }
   6573 
   6574 static void
   6575 iwm_attach(device_t parent, device_t self, void *aux)
   6576 {
   6577 	struct iwm_softc *sc = device_private(self);
   6578 	struct pci_attach_args *pa = aux;
   6579 	pci_intr_handle_t ih;
   6580 	pcireg_t reg, memtype;
   6581 	const char *intrstr;
   6582 	int error;
   6583 	int txq_i;
   6584 
   6585 	sc->sc_dev = self;
   6586 	sc->sc_pct = pa->pa_pc;
   6587 	sc->sc_pcitag = pa->pa_tag;
   6588 	sc->sc_dmat = pa->pa_dmat;
   6589 	sc->sc_pciid = pa->pa_id;
   6590 
   6591 	pci_aprint_devinfo(pa, NULL);
   6592 
   6593 	/*
   6594 	 * Get the offset of the PCI Express Capability Structure in PCI
   6595 	 * Configuration Space.
   6596 	 */
   6597 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   6598 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   6599 	if (error == 0) {
   6600 		aprint_error_dev(self,
   6601 		    "PCIe capability structure not found!\n");
   6602 		return;
   6603 	}
   6604 
   6605 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6606 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6607 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6608 
   6609 	/* Enable bus-mastering and hardware bug workaround. */
   6610 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   6611 	reg |= PCI_COMMAND_MASTER_ENABLE;
   6612 	/* if !MSI */
   6613 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
   6614 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
   6615 	}
   6616 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   6617 
   6618 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   6619 	error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   6620 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   6621 	if (error != 0) {
   6622 		aprint_error_dev(self, "can't map mem space\n");
   6623 		return;
   6624 	}
   6625 
   6626 	/* Install interrupt handler. */
   6627 	if (pci_intr_map(pa, &ih)) {
   6628 		aprint_error_dev(self, "can't map interrupt\n");
   6629 		return;
   6630 	}
   6631 
   6632 	char intrbuf[PCI_INTRSTR_LEN];
   6633 	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
   6634 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
   6635 	if (sc->sc_ih == NULL) {
   6636 		aprint_error_dev(self, "can't establish interrupt");
   6637 		if (intrstr != NULL)
   6638 			aprint_error(" at %s", intrstr);
   6639 		aprint_error("\n");
   6640 		return;
   6641 	}
   6642 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   6643 
   6644 	sc->sc_wantresp = -1;
   6645 
   6646 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   6647 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   6648 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   6649 		sc->sc_fwname = "iwlwifi-7260-9.ucode";
   6650 		break;
   6651 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   6652 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   6653 		sc->sc_fwname = "iwlwifi-3160-9.ucode";
   6654 		break;
   6655 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   6656 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   6657 		sc->sc_fwname = "iwlwifi-7265-9.ucode";
   6658 		break;
   6659 	default:
   6660 		aprint_error_dev(self, "unknown product %#x",
   6661 		    PCI_PRODUCT(sc->sc_pciid));
   6662 		return;
   6663 	}
   6664 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   6665 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   6666 
   6667 	/*
   6668 	 * We now start fiddling with the hardware
   6669 	 */
   6670 
   6671 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   6672 	if (iwm_prepare_card_hw(sc) != 0) {
   6673 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6674 		return;
   6675 	}
   6676 
   6677 	/* Allocate DMA memory for firmware transfers. */
   6678 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
   6679 		aprint_error_dev(sc->sc_dev,
   6680 		    "could not allocate memory for firmware\n");
   6681 		return;
   6682 	}
   6683 
   6684 	/* Allocate "Keep Warm" page. */
   6685 	if ((error = iwm_alloc_kw(sc)) != 0) {
   6686 		aprint_error_dev(sc->sc_dev,
   6687 		    "could not allocate keep warm page\n");
   6688 		goto fail1;
   6689 	}
   6690 
   6691 	/* We use ICT interrupts */
   6692 	if ((error = iwm_alloc_ict(sc)) != 0) {
   6693 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   6694 		goto fail2;
   6695 	}
   6696 
   6697 	/* Allocate TX scheduler "rings". */
   6698 	if ((error = iwm_alloc_sched(sc)) != 0) {
   6699 		aprint_error_dev(sc->sc_dev,
   6700 		    "could not allocate TX scheduler rings\n");
   6701 		goto fail3;
   6702 	}
   6703 
   6704 	/* Allocate TX rings */
   6705 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   6706 		if ((error = iwm_alloc_tx_ring(sc,
   6707 		    &sc->txq[txq_i], txq_i)) != 0) {
   6708 			aprint_error_dev(sc->sc_dev,
   6709 			    "could not allocate TX ring %d\n", txq_i);
   6710 			goto fail4;
   6711 		}
   6712 	}
   6713 
   6714 	/* Allocate RX ring. */
   6715 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
   6716 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   6717 		goto fail4;
   6718 	}
   6719 
   6720 	workqueue_create(&sc->sc_eswq, "iwmes",
   6721 	    iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
   6722 	workqueue_create(&sc->sc_nswq, "iwmns",
   6723 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
   6724 
   6725 	/* Clear pending interrupts. */
   6726 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   6727 
   6728 	/*
   6729 	 * We can't do normal attach before the file system is mounted
   6730 	 * because we cannot read the MAC address without loading the
   6731 	 * firmware from disk.  So we postpone until mountroot is done.
   6732 	 * Notably, this will require a full driver unload/load cycle
   6733 	 * (or reboot) in case the firmware is not present when the
   6734 	 * hook runs.
   6735 	 */
   6736 	config_mountroot(self, iwm_attach_hook);
   6737 
   6738 	return;
   6739 
   6740 	/* Free allocated memory if something failed during attachment. */
   6741 fail4:	while (--txq_i >= 0)
   6742 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   6743 	iwm_free_sched(sc);
   6744 fail3:	if (sc->ict_dma.vaddr != NULL)
   6745 		iwm_free_ict(sc);
   6746 fail2:	iwm_free_kw(sc);
   6747 fail1:	iwm_free_fwmem(sc);
   6748 }
   6749 
   6750 /*
   6751  * Attach the interface to 802.11 radiotap.
   6752  */
   6753 void
   6754 iwm_radiotap_attach(struct iwm_softc *sc)
   6755 {
   6756 	struct ifnet *ifp = sc->sc_ic.ic_ifp;
   6757 
   6758 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   6759 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   6760 	    &sc->sc_drvbpf);
   6761 
   6762 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   6763 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   6764 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   6765 
   6766 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   6767 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   6768 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   6769 }
   6770 
   6771 #if 0
   6772 static void
   6773 iwm_init_task(void *arg1)
   6774 {
   6775 	struct iwm_softc *sc = arg1;
   6776 	struct ifnet *ifp = &sc->sc_ic.ic_if;
   6777 	int s;
   6778 
   6779 	s = splnet();
   6780 	while (sc->sc_flags & IWM_FLAG_BUSY)
   6781 		tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
   6782 	sc->sc_flags |= IWM_FLAG_BUSY;
   6783 
   6784 	iwm_stop(ifp, 0);
   6785 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   6786 		iwm_init(ifp);
   6787 
   6788 	sc->sc_flags &= ~IWM_FLAG_BUSY;
   6789 	wakeup(&sc->sc_flags);
   6790 	splx(s);
   6791 }
   6792 
   6793 static void
   6794 iwm_wakeup(struct iwm_softc *sc)
   6795 {
   6796 	pcireg_t reg;
   6797 
   6798 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6799 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6800 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6801 
   6802 	iwm_init_task(sc);
   6803 }
   6804 
   6805 static int
   6806 iwm_activate(device_t self, enum devact act)
   6807 {
   6808 	struct iwm_softc *sc = device_private(self);
   6809 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6810 
   6811 	switch (act) {
   6812 	case DVACT_DEACTIVATE:
   6813 		if (ifp->if_flags & IFF_RUNNING)
   6814 			iwm_stop(ifp, 0);
   6815 		return 0;
   6816 	default:
   6817 		return EOPNOTSUPP;
   6818 	}
   6819 }
   6820 #endif
   6821 
   6822 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   6823 	NULL, NULL);
   6824