Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.29.2.3
      1 /*	$NetBSD: if_iwm.c,v 1.29.2.3 2015/06/06 14:40:09 skrll Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.41 2015/05/22 06:50:54 kettenis Exp	*/
      3 
      4 /*
      5  * Copyright (c) 2014 genua mbh <info (at) genua.de>
      6  * Copyright (c) 2014 Fixup Software Ltd.
      7  *
      8  * Permission to use, copy, modify, and distribute this software for any
      9  * purpose with or without fee is hereby granted, provided that the above
     10  * copyright notice and this permission notice appear in all copies.
     11  *
     12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19  */
     20 
     21 /*-
     22  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     23  * which were used as the reference documentation for this implementation.
     24  *
     25  * Driver version we are currently based off of is
     26  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
     27  *
     28  ***********************************************************************
     29  *
     30  * This file is provided under a dual BSD/GPLv2 license.  When using or
     31  * redistributing this file, you may do so under either license.
     32  *
     33  * GPL LICENSE SUMMARY
     34  *
     35  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * All rights reserved.
     63  *
     64  * Redistribution and use in source and binary forms, with or without
     65  * modification, are permitted provided that the following conditions
     66  * are met:
     67  *
     68  *  * Redistributions of source code must retain the above copyright
     69  *    notice, this list of conditions and the following disclaimer.
     70  *  * Redistributions in binary form must reproduce the above copyright
     71  *    notice, this list of conditions and the following disclaimer in
     72  *    the documentation and/or other materials provided with the
     73  *    distribution.
     74  *  * Neither the name Intel Corporation nor the names of its
     75  *    contributors may be used to endorse or promote products derived
     76  *    from this software without specific prior written permission.
     77  *
     78  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     79  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     80  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     81  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     82  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     83  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     84  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     85  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     86  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     87  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     88  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     89  */
     90 
     91 /*-
     92  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     93  *
     94  * Permission to use, copy, modify, and distribute this software for any
     95  * purpose with or without fee is hereby granted, provided that the above
     96  * copyright notice and this permission notice appear in all copies.
     97  *
     98  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     99  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    100  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    101  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    102  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    103  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    104  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    105  */
    106 
    107 #include <sys/cdefs.h>
    108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.29.2.3 2015/06/06 14:40:09 skrll Exp $");
    109 
    110 #include <sys/param.h>
    111 #include <sys/conf.h>
    112 #include <sys/kernel.h>
    113 #include <sys/kmem.h>
    114 #include <sys/mbuf.h>
    115 #include <sys/mutex.h>
    116 #include <sys/proc.h>
    117 #include <sys/socket.h>
    118 #include <sys/sockio.h>
    119 #include <sys/sysctl.h>
    120 #include <sys/systm.h>
    121 
    122 #include <sys/cpu.h>
    123 #include <sys/bus.h>
    124 #include <sys/workqueue.h>
    125 #include <machine/endian.h>
    126 #include <machine/intr.h>
    127 
    128 #include <dev/pci/pcireg.h>
    129 #include <dev/pci/pcivar.h>
    130 #include <dev/pci/pcidevs.h>
    131 #include <dev/firmload.h>
    132 
    133 #include <net/bpf.h>
    134 #include <net/if.h>
    135 #include <net/if_arp.h>
    136 #include <net/if_dl.h>
    137 #include <net/if_media.h>
    138 #include <net/if_types.h>
    139 #include <net/if_ether.h>
    140 
    141 #include <netinet/in.h>
    142 #include <netinet/in_systm.h>
    143 #include <netinet/ip.h>
    144 
    145 #include <net80211/ieee80211_var.h>
    146 #include <net80211/ieee80211_amrr.h>
    147 #include <net80211/ieee80211_radiotap.h>
    148 
    149 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    150 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    151 
    152 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    153 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    154 
    155 #ifdef IWM_DEBUG
    156 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    157 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    158 int iwm_debug = 0;
    159 #else
    160 #define DPRINTF(x)	do { ; } while (0)
    161 #define DPRINTFN(n, x)	do { ; } while (0)
    162 #endif
    163 
    164 #include <dev/pci/if_iwmreg.h>
    165 #include <dev/pci/if_iwmvar.h>
    166 
    167 static const uint8_t iwm_nvm_channels[] = {
    168 	/* 2.4 GHz */
    169 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    170 	/* 5 GHz */
    171 	36, 40, 44 , 48, 52, 56, 60, 64,
    172 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    173 	149, 153, 157, 161, 165
    174 };
    175 #define IWM_NUM_2GHZ_CHANNELS	14
    176 
    177 static const struct iwm_rate {
    178 	uint8_t rate;
    179 	uint8_t plcp;
    180 } iwm_rates[] = {
    181 	{   2,	IWM_RATE_1M_PLCP  },
    182 	{   4,	IWM_RATE_2M_PLCP  },
    183 	{  11,	IWM_RATE_5M_PLCP  },
    184 	{  22,	IWM_RATE_11M_PLCP },
    185 	{  12,	IWM_RATE_6M_PLCP  },
    186 	{  18,	IWM_RATE_9M_PLCP  },
    187 	{  24,	IWM_RATE_12M_PLCP },
    188 	{  36,	IWM_RATE_18M_PLCP },
    189 	{  48,	IWM_RATE_24M_PLCP },
    190 	{  72,	IWM_RATE_36M_PLCP },
    191 	{  96,	IWM_RATE_48M_PLCP },
    192 	{ 108,	IWM_RATE_54M_PLCP },
    193 };
    194 #define IWM_RIDX_CCK	0
    195 #define IWM_RIDX_OFDM	4
    196 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    197 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    198 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    199 
    200 struct iwm_newstate_state {
    201 	struct work ns_wk;
    202 	enum ieee80211_state ns_nstate;
    203 	int ns_arg;
    204 	int ns_generation;
    205 };
    206 
    207 static int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    208 static int	iwm_firmware_store_section(struct iwm_softc *,
    209 		    enum iwm_ucode_type, uint8_t *, size_t);
    210 static int	iwm_set_default_calib(struct iwm_softc *, const void *);
    211 static int	iwm_read_firmware(struct iwm_softc *);
    212 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    213 static void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    214 #ifdef IWM_DEBUG
    215 static int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    216 #endif
    217 static int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    218 static int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    219 static int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    220 static int	iwm_nic_lock(struct iwm_softc *);
    221 static void	iwm_nic_unlock(struct iwm_softc *);
    222 static void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    223 		    uint32_t);
    224 static void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    225 static void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    226 static int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    227 		    bus_size_t, bus_size_t);
    228 static void	iwm_dma_contig_free(struct iwm_dma_info *);
    229 static int	iwm_alloc_fwmem(struct iwm_softc *);
    230 static void	iwm_free_fwmem(struct iwm_softc *);
    231 static int	iwm_alloc_sched(struct iwm_softc *);
    232 static void	iwm_free_sched(struct iwm_softc *);
    233 static int	iwm_alloc_kw(struct iwm_softc *);
    234 static void	iwm_free_kw(struct iwm_softc *);
    235 static int	iwm_alloc_ict(struct iwm_softc *);
    236 static void	iwm_free_ict(struct iwm_softc *);
    237 static int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    238 static void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    239 static void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    240 static int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
    241 		    int);
    242 static void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    243 static void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    244 static void	iwm_enable_rfkill_int(struct iwm_softc *);
    245 static int	iwm_check_rfkill(struct iwm_softc *);
    246 static void	iwm_enable_interrupts(struct iwm_softc *);
    247 static void	iwm_restore_interrupts(struct iwm_softc *);
    248 static void	iwm_disable_interrupts(struct iwm_softc *);
    249 static void	iwm_ict_reset(struct iwm_softc *);
    250 static int	iwm_set_hw_ready(struct iwm_softc *);
    251 static int	iwm_prepare_card_hw(struct iwm_softc *);
    252 static void	iwm_apm_config(struct iwm_softc *);
    253 static int	iwm_apm_init(struct iwm_softc *);
    254 static void	iwm_apm_stop(struct iwm_softc *);
    255 static int	iwm_allow_mcast(struct iwm_softc *);
    256 static int	iwm_start_hw(struct iwm_softc *);
    257 static void	iwm_stop_device(struct iwm_softc *);
    258 static void	iwm_set_pwr(struct iwm_softc *);
    259 static void	iwm_mvm_nic_config(struct iwm_softc *);
    260 static int	iwm_nic_rx_init(struct iwm_softc *);
    261 static int	iwm_nic_tx_init(struct iwm_softc *);
    262 static int	iwm_nic_init(struct iwm_softc *);
    263 static void	iwm_enable_txq(struct iwm_softc *, int, int);
    264 static int	iwm_post_alive(struct iwm_softc *);
    265 static int	iwm_is_valid_channel(uint16_t);
    266 static uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    267 static uint16_t iwm_channel_id_to_papd(uint16_t);
    268 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    269 static int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
    270 		    uint8_t **, uint16_t *, uint16_t);
    271 static int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
    272 		    void *);
    273 static int	iwm_send_phy_db_data(struct iwm_softc *);
    274 static int	iwm_send_phy_db_data(struct iwm_softc *);
    275 static void	iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    276 		    struct iwm_time_event_cmd_v1 *);
    277 static int	iwm_mvm_send_time_event_cmd(struct iwm_softc *,
    278 		    const struct iwm_time_event_cmd_v2 *);
    279 static int	iwm_mvm_time_event_send_add(struct iwm_softc *,
    280 		    struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
    281 static void	iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
    282 		    uint32_t, uint32_t, uint32_t);
    283 static int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
    284 		    uint16_t, uint8_t *, uint16_t *);
    285 static int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    286 		    uint16_t *);
    287 static void	iwm_init_channel_map(struct iwm_softc *,
    288 		    const uint16_t * const);
    289 static int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    290 		    const uint16_t *, const uint16_t *, uint8_t, uint8_t);
    291 static int	iwm_nvm_init(struct iwm_softc *);
    292 static int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
    293 		    const uint8_t *, uint32_t);
    294 static int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    295 static int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    296 static int	iwm_fw_alive(struct iwm_softc *, uint32_t);
    297 static int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    298 static int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    299 static int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
    300 		    enum iwm_ucode_type);
    301 static int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    302 static int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    303 static int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    304 static int	iwm_mvm_get_signal_strength(struct iwm_softc *,
    305 		    struct iwm_rx_phy_info *);
    306 static void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
    307 		    struct iwm_rx_packet *, struct iwm_rx_data *);
    308 static int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
    309 static void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    310 		    struct iwm_rx_data *);
    311 static void	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
    312 		    struct iwm_rx_packet *, struct iwm_node *);
    313 static void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    314 		    struct iwm_rx_data *);
    315 static int	iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
    316 		    uint32_t);
    317 static int	iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
    318 		    int);
    319 static int	iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    320 static void	iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
    321 		    struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
    322 		    uint32_t, uint32_t);
    323 static void	iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
    324 		    struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    325 		    uint8_t, uint8_t);
    326 static int	iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
    327 		    struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
    328 		    uint32_t);
    329 static int	iwm_mvm_phy_ctxt_add(struct iwm_softc *,
    330 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    331 		    uint8_t, uint8_t);
    332 static int	iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
    333 		    struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
    334 		    uint8_t, uint8_t);
    335 static int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    336 static int	iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
    337 		    uint16_t, const void *);
    338 static int	iwm_mvm_send_cmd_status(struct iwm_softc *,
    339 		    struct iwm_host_cmd *, uint32_t *);
    340 static int	iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
    341 		    uint16_t, const void *, uint32_t *);
    342 static void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    343 static void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
    344 #if 0
    345 static void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
    346 		    uint16_t);
    347 #endif
    348 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
    349 		    struct iwm_node *, struct ieee80211_frame *,
    350 		    struct iwm_tx_cmd *);
    351 static int	iwm_tx(struct iwm_softc *, struct mbuf *,
    352 		    struct ieee80211_node *, int);
    353 static int	iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
    354 		    struct iwm_beacon_filter_cmd *);
    355 static void	iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
    356 		    struct iwm_node *, struct iwm_beacon_filter_cmd *);
    357 static int	iwm_mvm_update_beacon_abort(struct iwm_softc *,
    358 		    struct iwm_node *, int);
    359 static void	iwm_mvm_power_log(struct iwm_softc *,
    360 		    struct iwm_mac_power_cmd *);
    361 static void	iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    362 		    struct iwm_mac_power_cmd *);
    363 static int	iwm_mvm_power_mac_update_mode(struct iwm_softc *,
    364 		    struct iwm_node *);
    365 static int	iwm_mvm_power_update_device(struct iwm_softc *);
    366 static int	iwm_mvm_enable_beacon_filter(struct iwm_softc *,
    367 		    struct iwm_node *);
    368 static int	iwm_mvm_disable_beacon_filter(struct iwm_softc *,
    369 		    struct iwm_node *);
    370 static void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
    371 		    struct iwm_mvm_add_sta_cmd_v5 *);
    372 static int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
    373 		    struct iwm_mvm_add_sta_cmd_v6 *, int *);
    374 static int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
    375 		    int);
    376 static int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
    377 static int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
    378 static int	iwm_mvm_add_int_sta_common(struct iwm_softc *,
    379 		    struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
    380 static int	iwm_mvm_add_aux_sta(struct iwm_softc *);
    381 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
    382 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
    383 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
    384 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
    385 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
    386 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
    387 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
    388 static int	iwm_mvm_scan_fill_channels(struct iwm_softc *,
    389 		    struct iwm_scan_cmd *, int, int, int);
    390 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
    391 		    struct ieee80211_frame *, const uint8_t *, int,
    392 		    const uint8_t *, int, const uint8_t *, int, int);
    393 static int	iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
    394 		    int);
    395 static void	iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
    396 		    int *);
    397 static void	iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
    398 		    struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
    399 static int	iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
    400 		    struct iwm_mac_ctx_cmd *);
    401 static void	iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
    402 		    struct iwm_node *, struct iwm_mac_data_sta *, int);
    403 static int	iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
    404 		    struct iwm_node *, uint32_t);
    405 static int	iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
    406 		    uint32_t);
    407 static int	iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
    408 static int	iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
    409 static int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
    410 static int	iwm_auth(struct iwm_softc *);
    411 static int	iwm_assoc(struct iwm_softc *);
    412 static int	iwm_release(struct iwm_softc *, struct iwm_node *);
    413 static void	iwm_calib_timeout(void *);
    414 static void	iwm_setrates(struct iwm_node *);
    415 static int	iwm_media_change(struct ifnet *);
    416 static void	iwm_newstate_cb(struct work *, void *);
    417 static int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    418 static void	iwm_endscan_cb(struct work *, void *);
    419 static int	iwm_init_hw(struct iwm_softc *);
    420 static int	iwm_init(struct ifnet *);
    421 static void	iwm_start(struct ifnet *);
    422 static void	iwm_stop(struct ifnet *, int);
    423 static void	iwm_watchdog(struct ifnet *);
    424 static int	iwm_ioctl(struct ifnet *, u_long, void *);
    425 #ifdef IWM_DEBUG
    426 static const char *iwm_desc_lookup(uint32_t);
    427 static void	iwm_nic_error(struct iwm_softc *);
    428 #endif
    429 static void	iwm_notif_intr(struct iwm_softc *);
    430 static int	iwm_intr(void *);
    431 static int	iwm_preinit(struct iwm_softc *);
    432 static void	iwm_attach_hook(device_t);
    433 static void	iwm_attach(device_t, device_t, void *);
    434 #if 0
    435 static void	iwm_init_task(void *);
    436 static int	iwm_activate(device_t, enum devact);
    437 static void	iwm_wakeup(struct iwm_softc *);
    438 #endif
    439 static void	iwm_radiotap_attach(struct iwm_softc *);
    440 
    441 static int
    442 iwm_firmload(struct iwm_softc *sc)
    443 {
    444 	struct iwm_fw_info *fw = &sc->sc_fw;
    445 	firmware_handle_t fwh;
    446 	int error;
    447 
    448 	/* Open firmware image. */
    449 	if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
    450 		aprint_error_dev(sc->sc_dev,
    451 		    "could not get firmware handle %s\n", sc->sc_fwname);
    452 		return error;
    453 	}
    454 
    455 	fw->fw_rawsize = firmware_get_size(fwh);
    456 	/*
    457 	 * Well, this is how the Linux driver checks it ....
    458 	 */
    459 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    460 		aprint_error_dev(sc->sc_dev,
    461 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    462 		error = EINVAL;
    463 		goto out;
    464 	}
    465 
    466 	/* some sanity */
    467 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    468 		aprint_error_dev(sc->sc_dev,
    469 		    "firmware size is ridiculous: %zd bytes\n",
    470 		fw->fw_rawsize);
    471 		error = EINVAL;
    472 		goto out;
    473 	}
    474 
    475 	/* Read the firmware. */
    476 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    477 	if (fw->fw_rawdata == NULL) {
    478 		aprint_error_dev(sc->sc_dev,
    479 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    480 		error = ENOMEM;
    481 		goto out;
    482 	}
    483 	error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    484 	if (error) {
    485 		aprint_error_dev(sc->sc_dev,
    486 		    "could not read firmware %s\n", sc->sc_fwname);
    487 		goto out;
    488 	}
    489 
    490  out:
    491 	/* caller will release memory, if necessary */
    492 
    493 	firmware_close(fwh);
    494 	return error;
    495 }
    496 
    497 /*
    498  * just maintaining status quo.
    499  */
    500 static void
    501 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
    502 {
    503 	struct iwm_softc *sc = ic->ic_ifp->if_softc;
    504 	struct ieee80211_frame *wh;
    505 	uint8_t subtype;
    506 	uint8_t *frm, *efrm;
    507 
    508 	wh = mtod(m, struct ieee80211_frame *);
    509 
    510 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    511 		return;
    512 
    513 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    514 
    515 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    516 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    517 		return;
    518 
    519 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
    520 		int chan = le32toh(sc->sc_last_phy_info.channel);
    521 		if (chan < __arraycount(ic->ic_channels))
    522 			ic->ic_curchan = &ic->ic_channels[chan];
    523 		return;
    524 	}
    525 
    526 	frm = (uint8_t *)(wh + 1);
    527 	efrm = mtod(m, uint8_t *) + m->m_len;
    528 
    529 	frm += 12;      /* skip tstamp, bintval and capinfo fields */
    530 	while (frm < efrm) {
    531 		if (*frm == IEEE80211_ELEMID_DSPARMS) {
    532 #if IEEE80211_CHAN_MAX < 255
    533 			if (frm[2] <= IEEE80211_CHAN_MAX)
    534 #endif
    535 				ic->ic_curchan = &ic->ic_channels[frm[2]];
    536 		}
    537 		frm += frm[1] + 2;
    538 	}
    539 }
    540 
    541 /*
    542  * Firmware parser.
    543  */
    544 
    545 static int
    546 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    547 {
    548 	struct iwm_fw_cscheme_list *l = (void *)data;
    549 
    550 	if (dlen < sizeof(*l) ||
    551 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    552 		return EINVAL;
    553 
    554 	/* we don't actually store anything for now, always use s/w crypto */
    555 
    556 	return 0;
    557 }
    558 
    559 static int
    560 iwm_firmware_store_section(struct iwm_softc *sc,
    561 	enum iwm_ucode_type type, uint8_t *data, size_t dlen)
    562 {
    563 	struct iwm_fw_sects *fws;
    564 	struct iwm_fw_onesect *fwone;
    565 
    566 	if (type >= IWM_UCODE_TYPE_MAX)
    567 		return EINVAL;
    568 	if (dlen < sizeof(uint32_t))
    569 		return EINVAL;
    570 
    571 	fws = &sc->sc_fw.fw_sects[type];
    572 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    573 		return EINVAL;
    574 
    575 	fwone = &fws->fw_sect[fws->fw_count];
    576 
    577 	/* first 32bit are device load offset */
    578 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    579 
    580 	/* rest is data */
    581 	fwone->fws_data = data + sizeof(uint32_t);
    582 	fwone->fws_len = dlen - sizeof(uint32_t);
    583 
    584 	/* for freeing the buffer during driver unload */
    585 	fwone->fws_alloc = data;
    586 	fwone->fws_allocsize = dlen;
    587 
    588 	fws->fw_count++;
    589 	fws->fw_totlen += fwone->fws_len;
    590 
    591 	return 0;
    592 }
    593 
    594 /* iwlwifi: iwl-drv.c */
    595 struct iwm_tlv_calib_data {
    596 	uint32_t ucode_type;
    597 	struct iwm_tlv_calib_ctrl calib;
    598 } __packed;
    599 
    600 static int
    601 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    602 {
    603 	const struct iwm_tlv_calib_data *def_calib = data;
    604 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    605 
    606 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    607 		DPRINTF(("%s: Wrong ucode_type %u for default "
    608 		    "calibration.\n", DEVNAME(sc), ucode_type));
    609 		return EINVAL;
    610 	}
    611 
    612 	sc->sc_default_calib[ucode_type].flow_trigger =
    613 	    def_calib->calib.flow_trigger;
    614 	sc->sc_default_calib[ucode_type].event_trigger =
    615 	    def_calib->calib.event_trigger;
    616 
    617 	return 0;
    618 }
    619 
    620 static int
    621 iwm_read_firmware(struct iwm_softc *sc)
    622 {
    623 	struct iwm_fw_info *fw = &sc->sc_fw;
    624 	struct iwm_tlv_ucode_header *uhdr;
    625 	struct iwm_ucode_tlv tlv;
    626 	enum iwm_ucode_tlv_type tlv_type;
    627 	uint8_t *data;
    628 	int error, status;
    629 	size_t len;
    630 
    631 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    632 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    633 	} else {
    634 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    635 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    636 	}
    637 	status = fw->fw_status;
    638 
    639 	if (status == IWM_FW_STATUS_DONE)
    640 		return 0;
    641 
    642 	/*
    643 	 * Load firmware into driver memory.
    644 	 * fw_rawdata and fw_rawsize will be set.
    645 	 */
    646 	error = iwm_firmload(sc);
    647 	if (error != 0) {
    648 		aprint_error_dev(sc->sc_dev,
    649 		    "could not read firmware %s (error %d)\n",
    650 		    sc->sc_fwname, error);
    651 		goto out;
    652 	}
    653 
    654 	/*
    655 	 * Parse firmware contents
    656 	 */
    657 
    658 	uhdr = (void *)fw->fw_rawdata;
    659 	if (*(uint32_t *)fw->fw_rawdata != 0
    660 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    661 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    662 		    sc->sc_fwname);
    663 		error = EINVAL;
    664 		goto out;
    665 	}
    666 
    667 	sc->sc_fwver = le32toh(uhdr->ver);
    668 	data = uhdr->data;
    669 	len = fw->fw_rawsize - sizeof(*uhdr);
    670 
    671 	while (len >= sizeof(tlv)) {
    672 		size_t tlv_len;
    673 		void *tlv_data;
    674 
    675 		memcpy(&tlv, data, sizeof(tlv));
    676 		tlv_len = le32toh(tlv.length);
    677 		tlv_type = le32toh(tlv.type);
    678 
    679 		len -= sizeof(tlv);
    680 		data += sizeof(tlv);
    681 		tlv_data = data;
    682 
    683 		if (len < tlv_len) {
    684 			aprint_error_dev(sc->sc_dev,
    685 			    "firmware too short: %zu bytes\n", len);
    686 			error = EINVAL;
    687 			goto parse_out;
    688 		}
    689 
    690 		switch ((int)tlv_type) {
    691 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    692 			if (tlv_len < sizeof(uint32_t)) {
    693 				error = EINVAL;
    694 				goto parse_out;
    695 			}
    696 			sc->sc_capa_max_probe_len
    697 			    = le32toh(*(uint32_t *)tlv_data);
    698 			/* limit it to something sensible */
    699 			if (sc->sc_capa_max_probe_len > (1<<16)) {
    700 				DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
    701 				    "ridiculous\n", DEVNAME(sc)));
    702 				error = EINVAL;
    703 				goto parse_out;
    704 			}
    705 			break;
    706 		case IWM_UCODE_TLV_PAN:
    707 			if (tlv_len) {
    708 				error = EINVAL;
    709 				goto parse_out;
    710 			}
    711 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    712 			break;
    713 		case IWM_UCODE_TLV_FLAGS:
    714 			if (tlv_len < sizeof(uint32_t)) {
    715 				error = EINVAL;
    716 				goto parse_out;
    717 			}
    718 			/*
    719 			 * Apparently there can be many flags, but Linux driver
    720 			 * parses only the first one, and so do we.
    721 			 *
    722 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    723 			 * Intentional or a bug?  Observations from
    724 			 * current firmware file:
    725 			 *  1) TLV_PAN is parsed first
    726 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    727 			 * ==> this resets TLV_PAN to itself... hnnnk
    728 			 */
    729 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    730 			break;
    731 		case IWM_UCODE_TLV_CSCHEME:
    732 			if ((error = iwm_store_cscheme(sc,
    733 			    tlv_data, tlv_len)) != 0)
    734 				goto parse_out;
    735 			break;
    736 		case IWM_UCODE_TLV_NUM_OF_CPU:
    737 			if (tlv_len != sizeof(uint32_t)) {
    738 				error = EINVAL;
    739 				goto parse_out;
    740 			}
    741 			if (le32toh(*(uint32_t*)tlv_data) != 1) {
    742 				DPRINTF(("%s: driver supports "
    743 				    "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
    744 				error = EINVAL;
    745 				goto parse_out;
    746 			}
    747 			break;
    748 		case IWM_UCODE_TLV_SEC_RT:
    749 			if ((error = iwm_firmware_store_section(sc,
    750 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
    751 				goto parse_out;
    752 			break;
    753 		case IWM_UCODE_TLV_SEC_INIT:
    754 			if ((error = iwm_firmware_store_section(sc,
    755 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
    756 				goto parse_out;
    757 			break;
    758 		case IWM_UCODE_TLV_SEC_WOWLAN:
    759 			if ((error = iwm_firmware_store_section(sc,
    760 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
    761 				goto parse_out;
    762 			break;
    763 		case IWM_UCODE_TLV_DEF_CALIB:
    764 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    765 				error = EINVAL;
    766 				goto parse_out;
    767 			}
    768 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
    769 				goto parse_out;
    770 			break;
    771 		case IWM_UCODE_TLV_PHY_SKU:
    772 			if (tlv_len != sizeof(uint32_t)) {
    773 				error = EINVAL;
    774 				goto parse_out;
    775 			}
    776 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    777 			break;
    778 
    779 		case IWM_UCODE_TLV_API_CHANGES_SET:
    780 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
    781 			/* ignore, not used by current driver */
    782 			break;
    783 
    784 		default:
    785 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    786 			    DEVNAME(sc), tlv_type));
    787 			error = EINVAL;
    788 			goto parse_out;
    789 		}
    790 
    791 		len -= roundup(tlv_len, 4);
    792 		data += roundup(tlv_len, 4);
    793 	}
    794 
    795 	KASSERT(error == 0);
    796 
    797  parse_out:
    798 	if (error) {
    799 		aprint_error_dev(sc->sc_dev,
    800 		    "firmware parse error, section type %d\n", tlv_type);
    801 	}
    802 
    803 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    804 		aprint_error_dev(sc->sc_dev,
    805 		    "device uses unsupported power ops\n");
    806 		error = ENOTSUP;
    807 	}
    808 
    809  out:
    810 	if (error)
    811 		fw->fw_status = IWM_FW_STATUS_NONE;
    812 	else
    813 		fw->fw_status = IWM_FW_STATUS_DONE;
    814 	wakeup(&sc->sc_fw);
    815 
    816 	if (error && fw->fw_rawdata != NULL) {
    817 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    818 		fw->fw_rawdata = NULL;
    819 	}
    820 	return error;
    821 }
    822 
    823 /*
    824  * basic device access
    825  */
    826 
    827 static uint32_t
    828 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    829 {
    830 	IWM_WRITE(sc,
    831 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    832 	IWM_BARRIER_READ_WRITE(sc);
    833 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    834 }
    835 
    836 static void
    837 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    838 {
    839 	IWM_WRITE(sc,
    840 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    841 	IWM_BARRIER_WRITE(sc);
    842 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    843 }
    844 
    845 #ifdef IWM_DEBUG
    846 /* iwlwifi: pcie/trans.c */
    847 static int
    848 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    849 {
    850 	int offs, ret = 0;
    851 	uint32_t *vals = buf;
    852 
    853 	if (iwm_nic_lock(sc)) {
    854 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    855 		for (offs = 0; offs < dwords; offs++)
    856 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    857 		iwm_nic_unlock(sc);
    858 	} else {
    859 		ret = EBUSY;
    860 	}
    861 	return ret;
    862 }
    863 #endif
    864 
    865 /* iwlwifi: pcie/trans.c */
    866 static int
    867 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    868 {
    869 	int offs;
    870 	const uint32_t *vals = buf;
    871 
    872 	if (iwm_nic_lock(sc)) {
    873 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    874 		/* WADDR auto-increments */
    875 		for (offs = 0; offs < dwords; offs++) {
    876 			uint32_t val = vals ? vals[offs] : 0;
    877 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    878 		}
    879 		iwm_nic_unlock(sc);
    880 	} else {
    881 		DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
    882 		return EBUSY;
    883 	}
    884 	return 0;
    885 }
    886 
    887 static int
    888 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    889 {
    890 	return iwm_write_mem(sc, addr, &val, 1);
    891 }
    892 
    893 static int
    894 iwm_poll_bit(struct iwm_softc *sc, int reg,
    895 	uint32_t bits, uint32_t mask, int timo)
    896 {
    897 	for (;;) {
    898 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
    899 			return 1;
    900 		}
    901 		if (timo < 10) {
    902 			return 0;
    903 		}
    904 		timo -= 10;
    905 		DELAY(10);
    906 	}
    907 }
    908 
    909 static int
    910 iwm_nic_lock(struct iwm_softc *sc)
    911 {
    912 	int rv = 0;
    913 
    914 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
    915 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    916 
    917 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
    918 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
    919 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
    920 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
    921 	    	rv = 1;
    922 	} else {
    923 		/* jolt */
    924 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
    925 	}
    926 
    927 	return rv;
    928 }
    929 
    930 static void
    931 iwm_nic_unlock(struct iwm_softc *sc)
    932 {
    933 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
    934 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    935 }
    936 
    937 static void
    938 iwm_set_bits_mask_prph(struct iwm_softc *sc,
    939 	uint32_t reg, uint32_t bits, uint32_t mask)
    940 {
    941 	uint32_t val;
    942 
    943 	/* XXX: no error path? */
    944 	if (iwm_nic_lock(sc)) {
    945 		val = iwm_read_prph(sc, reg) & mask;
    946 		val |= bits;
    947 		iwm_write_prph(sc, reg, val);
    948 		iwm_nic_unlock(sc);
    949 	}
    950 }
    951 
    952 static void
    953 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    954 {
    955 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
    956 }
    957 
    958 static void
    959 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    960 {
    961 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
    962 }
    963 
    964 /*
    965  * DMA resource routines
    966  */
    967 
    968 static int
    969 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
    970     bus_size_t size, bus_size_t alignment)
    971 {
    972 	int nsegs, error;
    973 	void *va;
    974 
    975 	dma->tag = tag;
    976 	dma->size = size;
    977 
    978 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
    979 	    &dma->map);
    980 	if (error != 0)
    981 		goto fail;
    982 
    983 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
    984 	    BUS_DMA_NOWAIT);
    985 	if (error != 0)
    986 		goto fail;
    987 
    988 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
    989 	    BUS_DMA_NOWAIT);
    990 	if (error != 0)
    991 		goto fail;
    992 	dma->vaddr = va;
    993 
    994 	error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
    995 	    BUS_DMA_NOWAIT);
    996 	if (error != 0)
    997 		goto fail;
    998 
    999 	memset(dma->vaddr, 0, size);
   1000 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
   1001 	dma->paddr = dma->map->dm_segs[0].ds_addr;
   1002 
   1003 	return 0;
   1004 
   1005 fail:	iwm_dma_contig_free(dma);
   1006 	return error;
   1007 }
   1008 
   1009 static void
   1010 iwm_dma_contig_free(struct iwm_dma_info *dma)
   1011 {
   1012 	if (dma->map != NULL) {
   1013 		if (dma->vaddr != NULL) {
   1014 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
   1015 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
   1016 			bus_dmamap_unload(dma->tag, dma->map);
   1017 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
   1018 			bus_dmamem_free(dma->tag, &dma->seg, 1);
   1019 			dma->vaddr = NULL;
   1020 		}
   1021 		bus_dmamap_destroy(dma->tag, dma->map);
   1022 		dma->map = NULL;
   1023 	}
   1024 }
   1025 
   1026 /* fwmem is used to load firmware onto the card */
   1027 static int
   1028 iwm_alloc_fwmem(struct iwm_softc *sc)
   1029 {
   1030 	/* Must be aligned on a 16-byte boundary. */
   1031 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
   1032 	    sc->sc_fwdmasegsz, 16);
   1033 }
   1034 
   1035 static void
   1036 iwm_free_fwmem(struct iwm_softc *sc)
   1037 {
   1038 	iwm_dma_contig_free(&sc->fw_dma);
   1039 }
   1040 
   1041 /* tx scheduler rings.  not used? */
   1042 static int
   1043 iwm_alloc_sched(struct iwm_softc *sc)
   1044 {
   1045 	int rv;
   1046 
   1047 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   1048 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   1049 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   1050 	return rv;
   1051 }
   1052 
   1053 static void
   1054 iwm_free_sched(struct iwm_softc *sc)
   1055 {
   1056 	iwm_dma_contig_free(&sc->sched_dma);
   1057 }
   1058 
   1059 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
   1060 static int
   1061 iwm_alloc_kw(struct iwm_softc *sc)
   1062 {
   1063 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   1064 }
   1065 
   1066 static void
   1067 iwm_free_kw(struct iwm_softc *sc)
   1068 {
   1069 	iwm_dma_contig_free(&sc->kw_dma);
   1070 }
   1071 
   1072 /* interrupt cause table */
   1073 static int
   1074 iwm_alloc_ict(struct iwm_softc *sc)
   1075 {
   1076 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
   1077 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
   1078 }
   1079 
   1080 static void
   1081 iwm_free_ict(struct iwm_softc *sc)
   1082 {
   1083 	iwm_dma_contig_free(&sc->ict_dma);
   1084 }
   1085 
   1086 static int
   1087 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1088 {
   1089 	bus_size_t size;
   1090 	int i, error;
   1091 
   1092 	ring->cur = 0;
   1093 
   1094 	/* Allocate RX descriptors (256-byte aligned). */
   1095 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1096 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1097 	if (error != 0) {
   1098 		aprint_error_dev(sc->sc_dev,
   1099 		    "could not allocate RX ring DMA memory\n");
   1100 		goto fail;
   1101 	}
   1102 	ring->desc = ring->desc_dma.vaddr;
   1103 
   1104 	/* Allocate RX status area (16-byte aligned). */
   1105 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1106 	    sizeof(*ring->stat), 16);
   1107 	if (error != 0) {
   1108 		aprint_error_dev(sc->sc_dev,
   1109 		    "could not allocate RX status DMA memory\n");
   1110 		goto fail;
   1111 	}
   1112 	ring->stat = ring->stat_dma.vaddr;
   1113 
   1114 	/*
   1115 	 * Allocate and map RX buffers.
   1116 	 */
   1117 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1118 		struct iwm_rx_data *data = &ring->data[i];
   1119 
   1120 		memset(data, 0, sizeof(*data));
   1121 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1122 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1123 		    &data->map);
   1124 		if (error != 0) {
   1125 			aprint_error_dev(sc->sc_dev,
   1126 			    "could not create RX buf DMA map\n");
   1127 			goto fail;
   1128 		}
   1129 
   1130 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
   1131 			goto fail;
   1132 		}
   1133 	}
   1134 	return 0;
   1135 
   1136 fail:	iwm_free_rx_ring(sc, ring);
   1137 	return error;
   1138 }
   1139 
   1140 static void
   1141 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1142 {
   1143 	int ntries;
   1144 
   1145 	if (iwm_nic_lock(sc)) {
   1146 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1147 		for (ntries = 0; ntries < 1000; ntries++) {
   1148 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1149 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1150 				break;
   1151 			DELAY(10);
   1152 		}
   1153 		iwm_nic_unlock(sc);
   1154 	}
   1155 	ring->cur = 0;
   1156 }
   1157 
   1158 static void
   1159 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1160 {
   1161 	int i;
   1162 
   1163 	iwm_dma_contig_free(&ring->desc_dma);
   1164 	iwm_dma_contig_free(&ring->stat_dma);
   1165 
   1166 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1167 		struct iwm_rx_data *data = &ring->data[i];
   1168 
   1169 		if (data->m != NULL) {
   1170 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1171 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1172 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1173 			m_freem(data->m);
   1174 		}
   1175 		if (data->map != NULL)
   1176 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1177 	}
   1178 }
   1179 
   1180 static int
   1181 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1182 {
   1183 	bus_addr_t paddr;
   1184 	bus_size_t size;
   1185 	int i, error;
   1186 
   1187 	ring->qid = qid;
   1188 	ring->queued = 0;
   1189 	ring->cur = 0;
   1190 
   1191 	/* Allocate TX descriptors (256-byte aligned). */
   1192 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1193 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1194 	if (error != 0) {
   1195 		aprint_error_dev(sc->sc_dev,
   1196 		    "could not allocate TX ring DMA memory\n");
   1197 		goto fail;
   1198 	}
   1199 	ring->desc = ring->desc_dma.vaddr;
   1200 
   1201 	/*
   1202 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1203 	 * to allocate commands space for other rings.
   1204 	 */
   1205 	if (qid > IWM_MVM_CMD_QUEUE)
   1206 		return 0;
   1207 
   1208 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1209 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1210 	if (error != 0) {
   1211 		aprint_error_dev(sc->sc_dev,
   1212 		    "could not allocate TX cmd DMA memory\n");
   1213 		goto fail;
   1214 	}
   1215 	ring->cmd = ring->cmd_dma.vaddr;
   1216 
   1217 	paddr = ring->cmd_dma.paddr;
   1218 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1219 		struct iwm_tx_data *data = &ring->data[i];
   1220 
   1221 		data->cmd_paddr = paddr;
   1222 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1223 		    + offsetof(struct iwm_tx_cmd, scratch);
   1224 		paddr += sizeof(struct iwm_device_cmd);
   1225 
   1226 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
   1227 		    IWM_NUM_OF_TBS - 2, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
   1228 		    &data->map);
   1229 		if (error != 0) {
   1230 			aprint_error_dev(sc->sc_dev,
   1231 			    "could not create TX buf DMA map\n");
   1232 			goto fail;
   1233 		}
   1234 	}
   1235 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1236 	return 0;
   1237 
   1238 fail:	iwm_free_tx_ring(sc, ring);
   1239 	return error;
   1240 }
   1241 
   1242 static void
   1243 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1244 {
   1245 	int i;
   1246 
   1247 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1248 		struct iwm_tx_data *data = &ring->data[i];
   1249 
   1250 		if (data->m != NULL) {
   1251 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1252 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1253 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1254 			m_freem(data->m);
   1255 			data->m = NULL;
   1256 		}
   1257 	}
   1258 	/* Clear TX descriptors. */
   1259 	memset(ring->desc, 0, ring->desc_dma.size);
   1260 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1261 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1262 	sc->qfullmsk &= ~(1 << ring->qid);
   1263 	ring->queued = 0;
   1264 	ring->cur = 0;
   1265 }
   1266 
   1267 static void
   1268 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1269 {
   1270 	int i;
   1271 
   1272 	iwm_dma_contig_free(&ring->desc_dma);
   1273 	iwm_dma_contig_free(&ring->cmd_dma);
   1274 
   1275 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1276 		struct iwm_tx_data *data = &ring->data[i];
   1277 
   1278 		if (data->m != NULL) {
   1279 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1280 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1281 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1282 			m_freem(data->m);
   1283 		}
   1284 		if (data->map != NULL)
   1285 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1286 	}
   1287 }
   1288 
   1289 /*
   1290  * High-level hardware frobbing routines
   1291  */
   1292 
   1293 static void
   1294 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1295 {
   1296 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1297 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1298 }
   1299 
   1300 static int
   1301 iwm_check_rfkill(struct iwm_softc *sc)
   1302 {
   1303 	uint32_t v;
   1304 	int s;
   1305 	int rv;
   1306 
   1307 	s = splnet();
   1308 
   1309 	/*
   1310 	 * "documentation" is not really helpful here:
   1311 	 *  27:	HW_RF_KILL_SW
   1312 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1313 	 *
   1314 	 * But apparently when it's off, it's on ...
   1315 	 */
   1316 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1317 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1318 	if (rv) {
   1319 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1320 	} else {
   1321 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1322 	}
   1323 
   1324 	splx(s);
   1325 	return rv;
   1326 }
   1327 
   1328 static void
   1329 iwm_enable_interrupts(struct iwm_softc *sc)
   1330 {
   1331 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1332 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1333 }
   1334 
   1335 static void
   1336 iwm_restore_interrupts(struct iwm_softc *sc)
   1337 {
   1338 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1339 }
   1340 
   1341 static void
   1342 iwm_disable_interrupts(struct iwm_softc *sc)
   1343 {
   1344 	int s = splnet();
   1345 
   1346 	/* disable interrupts */
   1347 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1348 
   1349 	/* acknowledge all interrupts */
   1350 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1351 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1352 
   1353 	splx(s);
   1354 }
   1355 
   1356 static void
   1357 iwm_ict_reset(struct iwm_softc *sc)
   1358 {
   1359 	iwm_disable_interrupts(sc);
   1360 
   1361 	/* Reset ICT table. */
   1362 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1363 	sc->ict_cur = 0;
   1364 
   1365 	/* Set physical address of ICT table (4KB aligned). */
   1366 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1367 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1368 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1369 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1370 
   1371 	/* Switch to ICT interrupt mode in driver. */
   1372 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1373 
   1374 	/* Re-enable interrupts. */
   1375 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1376 	iwm_enable_interrupts(sc);
   1377 }
   1378 
   1379 #define IWM_HW_READY_TIMEOUT 50
   1380 static int
   1381 iwm_set_hw_ready(struct iwm_softc *sc)
   1382 {
   1383 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1384 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1385 
   1386 	return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1387 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1388 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1389 	    IWM_HW_READY_TIMEOUT);
   1390 }
   1391 #undef IWM_HW_READY_TIMEOUT
   1392 
   1393 static int
   1394 iwm_prepare_card_hw(struct iwm_softc *sc)
   1395 {
   1396 	int rv = 0;
   1397 	int t = 0;
   1398 
   1399 	if (iwm_set_hw_ready(sc))
   1400 		goto out;
   1401 
   1402 	/* If HW is not ready, prepare the conditions to check again */
   1403 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1404 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1405 
   1406 	do {
   1407 		if (iwm_set_hw_ready(sc))
   1408 			goto out;
   1409 		DELAY(200);
   1410 		t += 200;
   1411 	} while (t < 150000);
   1412 
   1413 	rv = ETIMEDOUT;
   1414 
   1415  out:
   1416 	return rv;
   1417 }
   1418 
   1419 static void
   1420 iwm_apm_config(struct iwm_softc *sc)
   1421 {
   1422 	pcireg_t reg;
   1423 
   1424 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1425 	    sc->sc_cap_off + PCIE_LCSR);
   1426 	if (reg & PCIE_LCSR_ASPM_L1) {
   1427 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1428 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1429 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1430 	} else {
   1431 		/* ... and "Enabling" here */
   1432 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1433 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1434 	}
   1435 }
   1436 
   1437 /*
   1438  * Start up NIC's basic functionality after it has been reset
   1439  * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
   1440  * NOTE:  This does not load uCode nor start the embedded processor
   1441  */
   1442 static int
   1443 iwm_apm_init(struct iwm_softc *sc)
   1444 {
   1445 	int error = 0;
   1446 
   1447 	DPRINTF(("iwm apm start\n"));
   1448 
   1449 	/* Disable L0S exit timer (platform NMI Work/Around) */
   1450 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1451 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1452 
   1453 	/*
   1454 	 * Disable L0s without affecting L1;
   1455 	 *  don't wait for ICH L0s (ICH bug W/A)
   1456 	 */
   1457 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1458 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1459 
   1460 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1461 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1462 
   1463 	/*
   1464 	 * Enable HAP INTA (interrupt from management bus) to
   1465 	 * wake device's PCI Express link L1a -> L0s
   1466 	 */
   1467 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1468 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1469 
   1470 	iwm_apm_config(sc);
   1471 
   1472 #if 0 /* not for 7k */
   1473 	/* Configure analog phase-lock-loop before activating to D0A */
   1474 	if (trans->cfg->base_params->pll_cfg_val)
   1475 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1476 		    trans->cfg->base_params->pll_cfg_val);
   1477 #endif
   1478 
   1479 	/*
   1480 	 * Set "initialization complete" bit to move adapter from
   1481 	 * D0U* --> D0A* (powered-up active) state.
   1482 	 */
   1483 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1484 
   1485 	/*
   1486 	 * Wait for clock stabilization; once stabilized, access to
   1487 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1488 	 * and accesses to uCode SRAM.
   1489 	 */
   1490 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1491 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1492 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1493 		aprint_error_dev(sc->sc_dev,
   1494 		    "timeout waiting for clock stabilization\n");
   1495 		goto out;
   1496 	}
   1497 
   1498 	if (sc->host_interrupt_operation_mode) {
   1499 		/*
   1500 		 * This is a bit of an abuse - This is needed for 7260 / 3160
   1501 		 * only check host_interrupt_operation_mode even if this is
   1502 		 * not related to host_interrupt_operation_mode.
   1503 		 *
   1504 		 * Enable the oscillator to count wake up time for L1 exit. This
   1505 		 * consumes slightly more power (100uA) - but allows to be sure
   1506 		 * that we wake up from L1 on time.
   1507 		 *
   1508 		 * This looks weird: read twice the same register, discard the
   1509 		 * value, set a bit, and yet again, read that same register
   1510 		 * just to discard the value. But that's the way the hardware
   1511 		 * seems to like it.
   1512 		 */
   1513 		iwm_read_prph(sc, IWM_OSC_CLK);
   1514 		iwm_read_prph(sc, IWM_OSC_CLK);
   1515 		iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1516 		iwm_read_prph(sc, IWM_OSC_CLK);
   1517 		iwm_read_prph(sc, IWM_OSC_CLK);
   1518 	}
   1519 
   1520 	/*
   1521 	 * Enable DMA clock and wait for it to stabilize.
   1522 	 *
   1523 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1524 	 * do not disable clocks.  This preserves any hardware bits already
   1525 	 * set by default in "CLK_CTRL_REG" after reset.
   1526 	 */
   1527 	iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1528 	//kpause("iwmapm", 0, mstohz(20), NULL);
   1529 	DELAY(20);
   1530 
   1531 	/* Disable L1-Active */
   1532 	iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1533 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1534 
   1535 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1536 	iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1537 	    IWM_APMG_RTC_INT_STT_RFKILL);
   1538 
   1539  out:
   1540 	if (error)
   1541 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
   1542 	return error;
   1543 }
   1544 
   1545 /* iwlwifi/pcie/trans.c */
   1546 static void
   1547 iwm_apm_stop(struct iwm_softc *sc)
   1548 {
   1549 	/* stop device's busmaster DMA activity */
   1550 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1551 
   1552 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1553 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1554 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1555 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1556 	DPRINTF(("iwm apm stop\n"));
   1557 }
   1558 
   1559 /* iwlwifi pcie/trans.c */
   1560 static int
   1561 iwm_start_hw(struct iwm_softc *sc)
   1562 {
   1563 	int error;
   1564 
   1565 	if ((error = iwm_prepare_card_hw(sc)) != 0)
   1566 		return error;
   1567 
   1568 	/* Reset the entire device */
   1569 	IWM_WRITE(sc, IWM_CSR_RESET,
   1570 	    IWM_CSR_RESET_REG_FLAG_SW_RESET |
   1571 	    IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1572 	DELAY(10);
   1573 
   1574 	if ((error = iwm_apm_init(sc)) != 0)
   1575 		return error;
   1576 
   1577 	iwm_enable_rfkill_int(sc);
   1578 	iwm_check_rfkill(sc);
   1579 
   1580 	return 0;
   1581 }
   1582 
   1583 /* iwlwifi pcie/trans.c */
   1584 
   1585 static void
   1586 iwm_stop_device(struct iwm_softc *sc)
   1587 {
   1588 	int chnl, ntries;
   1589 	int qid;
   1590 
   1591 	/* tell the device to stop sending interrupts */
   1592 	iwm_disable_interrupts(sc);
   1593 
   1594 	/* device going down, Stop using ICT table */
   1595 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1596 
   1597 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
   1598 
   1599 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1600 
   1601 	/* Stop all DMA channels. */
   1602 	if (iwm_nic_lock(sc)) {
   1603 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1604 			IWM_WRITE(sc,
   1605 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1606 			for (ntries = 0; ntries < 200; ntries++) {
   1607 				uint32_t r;
   1608 
   1609 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1610 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1611 				    chnl))
   1612 					break;
   1613 				DELAY(20);
   1614 			}
   1615 		}
   1616 		iwm_nic_unlock(sc);
   1617 	}
   1618 
   1619 	/* Stop RX ring. */
   1620 	iwm_reset_rx_ring(sc, &sc->rxq);
   1621 
   1622 	/* Reset all TX rings. */
   1623 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1624 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1625 
   1626 	/*
   1627 	 * Power-down device's busmaster DMA clocks
   1628 	 */
   1629 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1630 	DELAY(5);
   1631 
   1632 	/* Make sure (redundant) we've released our request to stay awake */
   1633 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1634 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1635 
   1636 	/* Stop the device, and put it in low power state */
   1637 	iwm_apm_stop(sc);
   1638 
   1639 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
   1640 	 * Clean again the interrupt here
   1641 	 */
   1642 	iwm_disable_interrupts(sc);
   1643 	/* stop and reset the on-board processor */
   1644 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1645 
   1646 	/*
   1647 	 * Even if we stop the HW, we still want the RF kill
   1648 	 * interrupt
   1649 	 */
   1650 	iwm_enable_rfkill_int(sc);
   1651 	iwm_check_rfkill(sc);
   1652 }
   1653 
   1654 /* iwlwifi pcie/trans.c (always main power) */
   1655 static void
   1656 iwm_set_pwr(struct iwm_softc *sc)
   1657 {
   1658 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1659 	    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1660 }
   1661 
   1662 /* iwlwifi: mvm/ops.c */
   1663 static void
   1664 iwm_mvm_nic_config(struct iwm_softc *sc)
   1665 {
   1666 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1667 	uint32_t reg_val = 0;
   1668 
   1669 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1670 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1671 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1672 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1673 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1674 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1675 
   1676 	/* SKU control */
   1677 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1678 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1679 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1680 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1681 
   1682 	/* radio configuration */
   1683 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1684 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1685 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1686 
   1687 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1688 
   1689 	DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1690 	    radio_cfg_step, radio_cfg_dash));
   1691 
   1692 	/*
   1693 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1694 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1695 	 * to lose ownership and not being able to obtain it back.
   1696 	 */
   1697 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1698 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1699 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1700 }
   1701 
   1702 static int
   1703 iwm_nic_rx_init(struct iwm_softc *sc)
   1704 {
   1705 	if (!iwm_nic_lock(sc))
   1706 		return EBUSY;
   1707 
   1708 	/*
   1709 	 * Initialize RX ring.  This is from the iwn driver.
   1710 	 */
   1711 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1712 
   1713 	/* stop DMA */
   1714 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1715 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1716 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1717 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1718 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1719 
   1720 	/* Set physical address of RX ring (256-byte aligned). */
   1721 	IWM_WRITE(sc,
   1722 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1723 
   1724 	/* Set physical address of RX status (16-byte aligned). */
   1725 	IWM_WRITE(sc,
   1726 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1727 
   1728 	/* Enable RX. */
   1729 	/*
   1730 	 * Note: Linux driver also sets this:
   1731 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1732 	 *
   1733 	 * It causes weird behavior.  YMMV.
   1734 	 */
   1735 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1736 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1737 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1738 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1739 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1740 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1741 
   1742 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1743 
   1744 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
   1745 	if (sc->host_interrupt_operation_mode)
   1746 		IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1747 
   1748 	/*
   1749 	 * Thus sayeth el jefe (iwlwifi) via a comment:
   1750 	 *
   1751 	 * This value should initially be 0 (before preparing any
   1752  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
   1753 	 */
   1754 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1755 
   1756 	iwm_nic_unlock(sc);
   1757 
   1758 	return 0;
   1759 }
   1760 
   1761 static int
   1762 iwm_nic_tx_init(struct iwm_softc *sc)
   1763 {
   1764 	int qid;
   1765 
   1766 	if (!iwm_nic_lock(sc))
   1767 		return EBUSY;
   1768 
   1769 	/* Deactivate TX scheduler. */
   1770 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1771 
   1772 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1773 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1774 
   1775 	/* Initialize TX rings. */
   1776 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1777 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1778 
   1779 		/* Set physical address of TX ring (256-byte aligned). */
   1780 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1781 		    txq->desc_dma.paddr >> 8);
   1782 		DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
   1783 		    qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
   1784 	}
   1785 	iwm_nic_unlock(sc);
   1786 
   1787 	return 0;
   1788 }
   1789 
   1790 static int
   1791 iwm_nic_init(struct iwm_softc *sc)
   1792 {
   1793 	int error;
   1794 
   1795 	iwm_apm_init(sc);
   1796 	iwm_set_pwr(sc);
   1797 
   1798 	iwm_mvm_nic_config(sc);
   1799 
   1800 	if ((error = iwm_nic_rx_init(sc)) != 0)
   1801 		return error;
   1802 
   1803 	/*
   1804 	 * Ditto for TX, from iwn
   1805 	 */
   1806 	if ((error = iwm_nic_tx_init(sc)) != 0)
   1807 		return error;
   1808 
   1809 	DPRINTF(("shadow registers enabled\n"));
   1810 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1811 
   1812 	return 0;
   1813 }
   1814 
   1815 #if 0
   1816 enum iwm_mvm_tx_fifo {
   1817 	IWM_MVM_TX_FIFO_BK = 0,
   1818 	IWM_MVM_TX_FIFO_BE,
   1819 	IWM_MVM_TX_FIFO_VI,
   1820 	IWM_MVM_TX_FIFO_VO,
   1821 	IWM_MVM_TX_FIFO_MCAST = 5,
   1822 };
   1823 
   1824 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
   1825 	IWM_MVM_TX_FIFO_VO,
   1826 	IWM_MVM_TX_FIFO_VI,
   1827 	IWM_MVM_TX_FIFO_BE,
   1828 	IWM_MVM_TX_FIFO_BK,
   1829 };
   1830 #endif
   1831 
   1832 static void
   1833 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
   1834 {
   1835 	if (!iwm_nic_lock(sc)) {
   1836 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1837 		return; /* XXX return EBUSY */
   1838 	}
   1839 
   1840 	/* unactivate before configuration */
   1841 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1842 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1843 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1844 
   1845 	if (qid != IWM_MVM_CMD_QUEUE) {
   1846 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
   1847 	}
   1848 
   1849 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1850 
   1851 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1852 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1853 
   1854 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1855 	/* Set scheduler window size and frame limit. */
   1856 	iwm_write_mem32(sc,
   1857 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1858 	    sizeof(uint32_t),
   1859 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1860 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1861 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1862 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1863 
   1864 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1865 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1866 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1867 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1868 	    IWM_SCD_QUEUE_STTS_REG_MSK);
   1869 
   1870 	iwm_nic_unlock(sc);
   1871 
   1872 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1873 }
   1874 
   1875 static int
   1876 iwm_post_alive(struct iwm_softc *sc)
   1877 {
   1878 	int nwords;
   1879 	int error, chnl;
   1880 
   1881 	if (!iwm_nic_lock(sc))
   1882 		return EBUSY;
   1883 
   1884 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
   1885 		DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
   1886 		error = EINVAL;
   1887 		goto out;
   1888 	}
   1889 
   1890 	iwm_ict_reset(sc);
   1891 
   1892 	/* Clear TX scheduler state in SRAM. */
   1893 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1894 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1895 	    / sizeof(uint32_t);
   1896 	error = iwm_write_mem(sc,
   1897 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1898 	    NULL, nwords);
   1899 	if (error)
   1900 		goto out;
   1901 
   1902 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1903 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1904 
   1905 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1906 
   1907 	/* enable command channel */
   1908 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
   1909 
   1910 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1911 
   1912 	/* Enable DMA channels. */
   1913 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1914 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1915 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1916 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1917 	}
   1918 
   1919 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1920 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1921 
   1922 	/* Enable L1-Active */
   1923 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1924 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1925 
   1926  out:
   1927  	iwm_nic_unlock(sc);
   1928 	return error;
   1929 }
   1930 
   1931 /*
   1932  * PHY db
   1933  * iwlwifi/iwl-phy-db.c
   1934  */
   1935 
   1936 /*
   1937  * BEGIN iwl-phy-db.c
   1938  */
   1939 
   1940 enum iwm_phy_db_section_type {
   1941 	IWM_PHY_DB_CFG = 1,
   1942 	IWM_PHY_DB_CALIB_NCH,
   1943 	IWM_PHY_DB_UNUSED,
   1944 	IWM_PHY_DB_CALIB_CHG_PAPD,
   1945 	IWM_PHY_DB_CALIB_CHG_TXP,
   1946 	IWM_PHY_DB_MAX
   1947 };
   1948 
   1949 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
   1950 
   1951 /*
   1952  * phy db - configure operational ucode
   1953  */
   1954 struct iwm_phy_db_cmd {
   1955 	uint16_t type;
   1956 	uint16_t length;
   1957 	uint8_t data[];
   1958 } __packed;
   1959 
   1960 /* for parsing of tx power channel group data that comes from the firmware*/
   1961 struct iwm_phy_db_chg_txp {
   1962 	uint32_t space;
   1963 	uint16_t max_channel_idx;
   1964 } __packed;
   1965 
   1966 /*
   1967  * phy db - Receive phy db chunk after calibrations
   1968  */
   1969 struct iwm_calib_res_notif_phy_db {
   1970 	uint16_t type;
   1971 	uint16_t length;
   1972 	uint8_t data[];
   1973 } __packed;
   1974 
   1975 /*
   1976  * get phy db section: returns a pointer to a phy db section specified by
   1977  * type and channel group id.
   1978  */
   1979 static struct iwm_phy_db_entry *
   1980 iwm_phy_db_get_section(struct iwm_softc *sc,
   1981 	enum iwm_phy_db_section_type type, uint16_t chg_id)
   1982 {
   1983 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   1984 
   1985 	if (type >= IWM_PHY_DB_MAX)
   1986 		return NULL;
   1987 
   1988 	switch (type) {
   1989 	case IWM_PHY_DB_CFG:
   1990 		return &phy_db->cfg;
   1991 	case IWM_PHY_DB_CALIB_NCH:
   1992 		return &phy_db->calib_nch;
   1993 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   1994 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   1995 			return NULL;
   1996 		return &phy_db->calib_ch_group_papd[chg_id];
   1997 	case IWM_PHY_DB_CALIB_CHG_TXP:
   1998 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   1999 			return NULL;
   2000 		return &phy_db->calib_ch_group_txp[chg_id];
   2001 	default:
   2002 		return NULL;
   2003 	}
   2004 	return NULL;
   2005 }
   2006 
   2007 static int
   2008 iwm_phy_db_set_section(struct iwm_softc *sc,
   2009     struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
   2010 {
   2011 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   2012 	struct iwm_phy_db_entry *entry;
   2013 	uint16_t chg_id = 0;
   2014 
   2015 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   2016 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   2017 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   2018 
   2019 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   2020 	if (!entry)
   2021 		return EINVAL;
   2022 
   2023 	if (entry->data)
   2024 		kmem_intr_free(entry->data, entry->size);
   2025 	entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
   2026 	if (!entry->data) {
   2027 		entry->size = 0;
   2028 		return ENOMEM;
   2029 	}
   2030 	memcpy(entry->data, phy_db_notif->data, size);
   2031 	entry->size = size;
   2032 
   2033 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
   2034 	    __func__, __LINE__, type, size, entry->data));
   2035 
   2036 	return 0;
   2037 }
   2038 
   2039 static int
   2040 iwm_is_valid_channel(uint16_t ch_id)
   2041 {
   2042 	if (ch_id <= 14 ||
   2043 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2044 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2045 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2046 		return 1;
   2047 	return 0;
   2048 }
   2049 
   2050 static uint8_t
   2051 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2052 {
   2053 	if (!iwm_is_valid_channel(ch_id))
   2054 		return 0xff;
   2055 
   2056 	if (ch_id <= 14)
   2057 		return ch_id - 1;
   2058 	if (ch_id <= 64)
   2059 		return (ch_id + 20) / 4;
   2060 	if (ch_id <= 140)
   2061 		return (ch_id - 12) / 4;
   2062 	return (ch_id - 13) / 4;
   2063 }
   2064 
   2065 
   2066 static uint16_t
   2067 iwm_channel_id_to_papd(uint16_t ch_id)
   2068 {
   2069 	if (!iwm_is_valid_channel(ch_id))
   2070 		return 0xff;
   2071 
   2072 	if (1 <= ch_id && ch_id <= 14)
   2073 		return 0;
   2074 	if (36 <= ch_id && ch_id <= 64)
   2075 		return 1;
   2076 	if (100 <= ch_id && ch_id <= 140)
   2077 		return 2;
   2078 	return 3;
   2079 }
   2080 
   2081 static uint16_t
   2082 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2083 {
   2084 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2085 	struct iwm_phy_db_chg_txp *txp_chg;
   2086 	int i;
   2087 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2088 
   2089 	if (ch_index == 0xff)
   2090 		return 0xff;
   2091 
   2092 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2093 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2094 		if (!txp_chg)
   2095 			return 0xff;
   2096 		/*
   2097 		 * Looking for the first channel group that its max channel is
   2098 		 * higher then wanted channel.
   2099 		 */
   2100 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2101 			return i;
   2102 	}
   2103 	return 0xff;
   2104 }
   2105 
   2106 static int
   2107 iwm_phy_db_get_section_data(struct iwm_softc *sc,
   2108 	uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
   2109 {
   2110 	struct iwm_phy_db_entry *entry;
   2111 	uint16_t ch_group_id = 0;
   2112 
   2113 	/* find wanted channel group */
   2114 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2115 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2116 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2117 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2118 
   2119 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2120 	if (!entry)
   2121 		return EINVAL;
   2122 
   2123 	*data = entry->data;
   2124 	*size = entry->size;
   2125 
   2126 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2127 		       __func__, __LINE__, type, *size));
   2128 
   2129 	return 0;
   2130 }
   2131 
   2132 static int
   2133 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
   2134 	uint16_t length, void *data)
   2135 {
   2136 	struct iwm_phy_db_cmd phy_db_cmd;
   2137 	struct iwm_host_cmd cmd = {
   2138 		.id = IWM_PHY_DB_CMD,
   2139 		.flags = IWM_CMD_SYNC,
   2140 	};
   2141 
   2142 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
   2143 	    type, length));
   2144 
   2145 	/* Set phy db cmd variables */
   2146 	phy_db_cmd.type = le16toh(type);
   2147 	phy_db_cmd.length = le16toh(length);
   2148 
   2149 	/* Set hcmd variables */
   2150 	cmd.data[0] = &phy_db_cmd;
   2151 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2152 	cmd.data[1] = data;
   2153 	cmd.len[1] = length;
   2154 	cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
   2155 
   2156 	return iwm_send_cmd(sc, &cmd);
   2157 }
   2158 
   2159 static int
   2160 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2161 	enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2162 {
   2163 	uint16_t i;
   2164 	int err;
   2165 	struct iwm_phy_db_entry *entry;
   2166 
   2167 	/* Send all the channel-specific groups to operational fw */
   2168 	for (i = 0; i < max_ch_groups; i++) {
   2169 		entry = iwm_phy_db_get_section(sc, type, i);
   2170 		if (!entry)
   2171 			return EINVAL;
   2172 
   2173 		if (!entry->size)
   2174 			continue;
   2175 
   2176 		/* Send the requested PHY DB section */
   2177 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2178 		if (err) {
   2179 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2180 			    "err %d\n", DEVNAME(sc), type, i, err));
   2181 			return err;
   2182 		}
   2183 
   2184 		DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
   2185 	}
   2186 
   2187 	return 0;
   2188 }
   2189 
   2190 static int
   2191 iwm_send_phy_db_data(struct iwm_softc *sc)
   2192 {
   2193 	uint8_t *data = NULL;
   2194 	uint16_t size = 0;
   2195 	int err;
   2196 
   2197 	DPRINTF(("Sending phy db data and configuration to runtime image\n"));
   2198 
   2199 	/* Send PHY DB CFG section */
   2200 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2201 	if (err) {
   2202 		DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
   2203 		    DEVNAME(sc), err));
   2204 		return err;
   2205 	}
   2206 
   2207 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2208 	if (err) {
   2209 		DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
   2210 		    DEVNAME(sc), err));
   2211 		return err;
   2212 	}
   2213 
   2214 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2215 	    &data, &size, 0);
   2216 	if (err) {
   2217 		DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
   2218 		    "%d\n", DEVNAME(sc), err));
   2219 		return err;
   2220 	}
   2221 
   2222 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2223 	if (err) {
   2224 		DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
   2225 		    "sect, %d\n", DEVNAME(sc), err));
   2226 		return err;
   2227 	}
   2228 
   2229 	/* Send all the TXP channel specific data */
   2230 	err = iwm_phy_db_send_all_channel_groups(sc,
   2231 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2232 	if (err) {
   2233 		DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
   2234 		    DEVNAME(sc), err));
   2235 		return err;
   2236 	}
   2237 
   2238 	/* Send all the TXP channel specific data */
   2239 	err = iwm_phy_db_send_all_channel_groups(sc,
   2240 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2241 	if (err) {
   2242 		DPRINTF(("%s: Cannot send channel specific TX power groups, "
   2243 		    "%d\n", DEVNAME(sc), err));
   2244 		return err;
   2245 	}
   2246 
   2247 	DPRINTF(("Finished sending phy db non channel data\n"));
   2248 	return 0;
   2249 }
   2250 
   2251 /*
   2252  * END iwl-phy-db.c
   2253  */
   2254 
   2255 /*
   2256  * BEGIN iwlwifi/mvm/time-event.c
   2257  */
   2258 
   2259 /*
   2260  * For the high priority TE use a time event type that has similar priority to
   2261  * the FW's action scan priority.
   2262  */
   2263 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2264 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2265 
   2266 /* used to convert from time event API v2 to v1 */
   2267 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2268 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2269 static inline uint16_t
   2270 iwm_te_v2_get_notify(uint16_t policy)
   2271 {
   2272 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2273 }
   2274 
   2275 static inline uint16_t
   2276 iwm_te_v2_get_dep_policy(uint16_t policy)
   2277 {
   2278 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2279 		IWM_TE_V2_PLACEMENT_POS;
   2280 }
   2281 
   2282 static inline uint16_t
   2283 iwm_te_v2_get_absence(uint16_t policy)
   2284 {
   2285 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2286 }
   2287 
   2288 static void
   2289 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2290 	struct iwm_time_event_cmd_v1 *cmd_v1)
   2291 {
   2292 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2293 	cmd_v1->action = cmd_v2->action;
   2294 	cmd_v1->id = cmd_v2->id;
   2295 	cmd_v1->apply_time = cmd_v2->apply_time;
   2296 	cmd_v1->max_delay = cmd_v2->max_delay;
   2297 	cmd_v1->depends_on = cmd_v2->depends_on;
   2298 	cmd_v1->interval = cmd_v2->interval;
   2299 	cmd_v1->duration = cmd_v2->duration;
   2300 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2301 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2302 	else
   2303 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2304 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2305 	cmd_v1->interval_reciprocal = 0; /* unused */
   2306 
   2307 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2308 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2309 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2310 }
   2311 
   2312 static int
   2313 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
   2314 	const struct iwm_time_event_cmd_v2 *cmd)
   2315 {
   2316 	struct iwm_time_event_cmd_v1 cmd_v1;
   2317 
   2318 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2319 		return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
   2320 		    IWM_CMD_SYNC, sizeof(*cmd), cmd);
   2321 
   2322 	iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
   2323 	return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
   2324 	    sizeof(cmd_v1), &cmd_v1);
   2325 }
   2326 
   2327 static int
   2328 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
   2329 	void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
   2330 {
   2331 	int ret;
   2332 
   2333 	DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
   2334 
   2335 	ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
   2336 	if (ret) {
   2337 		DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
   2338 		    DEVNAME(sc), ret));
   2339 	}
   2340 
   2341 	return ret;
   2342 }
   2343 
   2344 static void
   2345 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2346 	uint32_t duration, uint32_t min_duration, uint32_t max_delay)
   2347 {
   2348 	struct iwm_time_event_cmd_v2 time_cmd;
   2349 
   2350 	memset(&time_cmd, 0, sizeof(time_cmd));
   2351 
   2352 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2353 	time_cmd.id_and_color =
   2354 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2355 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2356 
   2357 	time_cmd.apply_time = htole32(iwm_read_prph(sc,
   2358 	    IWM_DEVICE_SYSTEM_TIME_REG));
   2359 
   2360 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2361 	time_cmd.max_delay = htole32(max_delay);
   2362 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2363 	time_cmd.interval = htole32(1);
   2364 	time_cmd.duration = htole32(duration);
   2365 	time_cmd.repeat = 1;
   2366 	time_cmd.policy
   2367 	    = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2368 	        IWM_TE_V2_NOTIF_HOST_EVENT_END);
   2369 
   2370 	iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
   2371 }
   2372 
   2373 /*
   2374  * END iwlwifi/mvm/time-event.c
   2375  */
   2376 
   2377 /*
   2378  * NVM read access and content parsing.  We do not support
   2379  * external NVM or writing NVM.
   2380  * iwlwifi/mvm/nvm.c
   2381  */
   2382 
   2383 /* list of NVM sections we are allowed/need to read */
   2384 static const int nvm_to_read[] = {
   2385 	IWM_NVM_SECTION_TYPE_HW,
   2386 	IWM_NVM_SECTION_TYPE_SW,
   2387 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2388 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2389 };
   2390 
   2391 /* Default NVM size to read */
   2392 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
   2393 #define IWM_MAX_NVM_SECTION_SIZE 7000
   2394 
   2395 #define IWM_NVM_WRITE_OPCODE 1
   2396 #define IWM_NVM_READ_OPCODE 0
   2397 
   2398 static int
   2399 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
   2400 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
   2401 {
   2402 	offset = 0;
   2403 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2404 		.offset = htole16(offset),
   2405 		.length = htole16(length),
   2406 		.type = htole16(section),
   2407 		.op_code = IWM_NVM_READ_OPCODE,
   2408 	};
   2409 	struct iwm_nvm_access_resp *nvm_resp;
   2410 	struct iwm_rx_packet *pkt;
   2411 	struct iwm_host_cmd cmd = {
   2412 		.id = IWM_NVM_ACCESS_CMD,
   2413 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
   2414 		    IWM_CMD_SEND_IN_RFKILL,
   2415 		.data = { &nvm_access_cmd, },
   2416 	};
   2417 	int ret, bytes_read, offset_read;
   2418 	uint8_t *resp_data;
   2419 
   2420 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2421 
   2422 	ret = iwm_send_cmd(sc, &cmd);
   2423 	if (ret)
   2424 		return ret;
   2425 
   2426 	pkt = cmd.resp_pkt;
   2427 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2428 		DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
   2429 		    DEVNAME(sc), pkt->hdr.flags));
   2430 		ret = EIO;
   2431 		goto exit;
   2432 	}
   2433 
   2434 	/* Extract NVM response */
   2435 	nvm_resp = (void *)pkt->data;
   2436 
   2437 	ret = le16toh(nvm_resp->status);
   2438 	bytes_read = le16toh(nvm_resp->length);
   2439 	offset_read = le16toh(nvm_resp->offset);
   2440 	resp_data = nvm_resp->data;
   2441 	if (ret) {
   2442 		DPRINTF(("%s: NVM access command failed with status %d\n",
   2443 		    DEVNAME(sc), ret));
   2444 		ret = EINVAL;
   2445 		goto exit;
   2446 	}
   2447 
   2448 	if (offset_read != offset) {
   2449 		DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
   2450 		    DEVNAME(sc), offset_read));
   2451 		ret = EINVAL;
   2452 		goto exit;
   2453 	}
   2454 
   2455 	memcpy(data + offset, resp_data, bytes_read);
   2456 	*len = bytes_read;
   2457 
   2458  exit:
   2459 	iwm_free_resp(sc, &cmd);
   2460 	return ret;
   2461 }
   2462 
   2463 /*
   2464  * Reads an NVM section completely.
   2465  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2466  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2467  * by uCode, we need to manually check in this case that we don't
   2468  * overflow and try to read more than the EEPROM size.
   2469  * For 7000 family NICs, we supply the maximal size we can read, and
   2470  * the uCode fills the response with as much data as we can,
   2471  * without overflowing, so no check is needed.
   2472  */
   2473 static int
   2474 iwm_nvm_read_section(struct iwm_softc *sc,
   2475 	uint16_t section, uint8_t *data, uint16_t *len)
   2476 {
   2477 	uint16_t length, seglen;
   2478 	int error;
   2479 
   2480 	/* Set nvm section read length */
   2481 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2482 	*len = 0;
   2483 
   2484 	/* Read the NVM until exhausted (reading less than requested) */
   2485 	while (seglen == length) {
   2486 		error = iwm_nvm_read_chunk(sc,
   2487 		    section, *len, length, data, &seglen);
   2488 		if (error) {
   2489 			aprint_error_dev(sc->sc_dev,
   2490 			    "Cannot read NVM from section %d offset %d, "
   2491 			    "length %d\n", section, *len, length);
   2492 			return error;
   2493 		}
   2494 		*len += seglen;
   2495 	}
   2496 
   2497 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2498 	return 0;
   2499 }
   2500 
   2501 /*
   2502  * BEGIN IWM_NVM_PARSE
   2503  */
   2504 
   2505 /* iwlwifi/iwl-nvm-parse.c */
   2506 
   2507 /* NVM offsets (in words) definitions */
   2508 enum wkp_nvm_offsets {
   2509 	/* NVM HW-Section offset (in words) definitions */
   2510 	IWM_HW_ADDR = 0x15,
   2511 
   2512 /* NVM SW-Section offset (in words) definitions */
   2513 	IWM_NVM_SW_SECTION = 0x1C0,
   2514 	IWM_NVM_VERSION = 0,
   2515 	IWM_RADIO_CFG = 1,
   2516 	IWM_SKU = 2,
   2517 	IWM_N_HW_ADDRS = 3,
   2518 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
   2519 
   2520 /* NVM calibration section offset (in words) definitions */
   2521 	IWM_NVM_CALIB_SECTION = 0x2B8,
   2522 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
   2523 };
   2524 
   2525 /* SKU Capabilities (actual values from NVM definition) */
   2526 enum nvm_sku_bits {
   2527 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
   2528 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
   2529 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
   2530 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
   2531 };
   2532 
   2533 /* radio config bits (actual values from NVM definition) */
   2534 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
   2535 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
   2536 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
   2537 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
   2538 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
   2539 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
   2540 
   2541 #define DEFAULT_MAX_TX_POWER 16
   2542 
   2543 /**
   2544  * enum iwm_nvm_channel_flags - channel flags in NVM
   2545  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
   2546  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
   2547  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
   2548  * @IWM_NVM_CHANNEL_RADAR: radar detection required
   2549  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
   2550  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
   2551  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
   2552  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
   2553  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
   2554  */
   2555 enum iwm_nvm_channel_flags {
   2556 	IWM_NVM_CHANNEL_VALID = (1 << 0),
   2557 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
   2558 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
   2559 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
   2560 	IWM_NVM_CHANNEL_DFS = (1 << 7),
   2561 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
   2562 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
   2563 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
   2564 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
   2565 };
   2566 
   2567 static void
   2568 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
   2569 {
   2570 	struct ieee80211com *ic = &sc->sc_ic;
   2571 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2572 	int ch_idx;
   2573 	struct ieee80211_channel *channel;
   2574 	uint16_t ch_flags;
   2575 	int is_5ghz;
   2576 	int flags, hw_value;
   2577 
   2578 	for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
   2579 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2580 
   2581 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2582 		    !data->sku_cap_band_52GHz_enable)
   2583 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2584 
   2585 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2586 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2587 			    iwm_nvm_channels[ch_idx],
   2588 			    ch_flags,
   2589 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2590 			    "5.2" : "2.4"));
   2591 			continue;
   2592 		}
   2593 
   2594 		hw_value = iwm_nvm_channels[ch_idx];
   2595 		channel = &ic->ic_channels[hw_value];
   2596 
   2597 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2598 		if (!is_5ghz) {
   2599 			flags = IEEE80211_CHAN_2GHZ;
   2600 			channel->ic_flags
   2601 			    = IEEE80211_CHAN_CCK
   2602 			    | IEEE80211_CHAN_OFDM
   2603 			    | IEEE80211_CHAN_DYN
   2604 			    | IEEE80211_CHAN_2GHZ;
   2605 		} else {
   2606 			flags = IEEE80211_CHAN_5GHZ;
   2607 			channel->ic_flags =
   2608 			    IEEE80211_CHAN_A;
   2609 		}
   2610 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2611 
   2612 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2613 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2614 	}
   2615 }
   2616 
   2617 static int
   2618 iwm_parse_nvm_data(struct iwm_softc *sc,
   2619 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
   2620 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
   2621 {
   2622 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2623 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2624 	uint16_t radio_cfg, sku;
   2625 
   2626 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2627 
   2628 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2629 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2630 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2631 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2632 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2633 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
   2634 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
   2635 
   2636 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2637 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2638 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2639 	data->sku_cap_11n_enable = 0;
   2640 
   2641 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
   2642 		DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
   2643 		    data->valid_tx_ant, data->valid_rx_ant));
   2644 		return EINVAL;
   2645 	}
   2646 
   2647 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2648 
   2649 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
   2650 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
   2651 
   2652 	/* The byte order is little endian 16 bit, meaning 214365 */
   2653 	memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2654 	data->hw_addr[0] = hw_addr[1];
   2655 	data->hw_addr[1] = hw_addr[0];
   2656 	data->hw_addr[2] = hw_addr[3];
   2657 	data->hw_addr[3] = hw_addr[2];
   2658 	data->hw_addr[4] = hw_addr[5];
   2659 	data->hw_addr[5] = hw_addr[4];
   2660 
   2661 	iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
   2662 	data->calib_version = 255;   /* TODO:
   2663 					this value will prevent some checks from
   2664 					failing, we need to check if this
   2665 					field is still needed, and if it does,
   2666 					where is it in the NVM */
   2667 
   2668 	return 0;
   2669 }
   2670 
   2671 /*
   2672  * END NVM PARSE
   2673  */
   2674 
   2675 struct iwm_nvm_section {
   2676 	uint16_t length;
   2677 	const uint8_t *data;
   2678 };
   2679 
   2680 #define IWM_FW_VALID_TX_ANT(sc) \
   2681     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
   2682     >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
   2683 #define IWM_FW_VALID_RX_ANT(sc) \
   2684     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
   2685     >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
   2686 
   2687 static int
   2688 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2689 {
   2690 	const uint16_t *hw, *sw, *calib;
   2691 
   2692 	/* Checking for required sections */
   2693 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2694 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2695 		DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
   2696 		return ENOENT;
   2697 	}
   2698 
   2699 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
   2700 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2701 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2702 	return iwm_parse_nvm_data(sc, hw, sw, calib,
   2703 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
   2704 }
   2705 
   2706 static int
   2707 iwm_nvm_init(struct iwm_softc *sc)
   2708 {
   2709 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2710 	int i, section, error;
   2711 	uint16_t len;
   2712 	uint8_t *nvm_buffer, *temp;
   2713 
   2714 	/* Read From FW NVM */
   2715 	DPRINTF(("Read NVM\n"));
   2716 
   2717 	/* TODO: find correct NVM max size for a section */
   2718 	nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
   2719 	for (i = 0; i < __arraycount(nvm_to_read); i++) {
   2720 		section = nvm_to_read[i];
   2721 		KASSERT(section <= __arraycount(nvm_sections));
   2722 
   2723 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
   2724 		if (error)
   2725 			break;
   2726 
   2727 		temp = kmem_alloc(len, KM_SLEEP);
   2728 		memcpy(temp, nvm_buffer, len);
   2729 		nvm_sections[section].data = temp;
   2730 		nvm_sections[section].length = len;
   2731 	}
   2732 	kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
   2733 	if (error)
   2734 		return error;
   2735 
   2736 	return iwm_parse_nvm_sections(sc, nvm_sections);
   2737 }
   2738 
   2739 /*
   2740  * Firmware loading gunk.  This is kind of a weird hybrid between the
   2741  * iwn driver and the Linux iwlwifi driver.
   2742  */
   2743 
   2744 static int
   2745 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2746 	const uint8_t *section, uint32_t byte_cnt)
   2747 {
   2748 	struct iwm_dma_info *dma = &sc->fw_dma;
   2749 	int error;
   2750 
   2751 	/* Copy firmware section into pre-allocated DMA-safe memory. */
   2752 	memcpy(dma->vaddr, section, byte_cnt);
   2753 	bus_dmamap_sync(sc->sc_dmat,
   2754 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
   2755 
   2756 	if (!iwm_nic_lock(sc))
   2757 		return EBUSY;
   2758 
   2759 	sc->sc_fw_chunk_done = 0;
   2760 
   2761 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2762 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2763 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2764 	    dst_addr);
   2765 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2766 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2767 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2768 	    (iwm_get_dma_hi_addr(dma->paddr)
   2769 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2770 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2771 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2772 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2773 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2774 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2775 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   2776 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   2777 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   2778 
   2779 	iwm_nic_unlock(sc);
   2780 
   2781 	/* wait 1s for this segment to load */
   2782 	while (!sc->sc_fw_chunk_done)
   2783 		if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
   2784 			break;
   2785 
   2786 	return error;
   2787 }
   2788 
   2789 static int
   2790 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2791 {
   2792 	struct iwm_fw_sects *fws;
   2793 	int error, i, w;
   2794 	void *data;
   2795 	uint32_t dlen;
   2796 	uint32_t offset;
   2797 
   2798 	sc->sc_uc.uc_intr = 0;
   2799 
   2800 	fws = &sc->sc_fw.fw_sects[ucode_type];
   2801 	for (i = 0; i < fws->fw_count; i++) {
   2802 		data = fws->fw_sect[i].fws_data;
   2803 		dlen = fws->fw_sect[i].fws_len;
   2804 		offset = fws->fw_sect[i].fws_devoff;
   2805 		DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
   2806 		    ucode_type, offset, dlen));
   2807 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
   2808 		if (error) {
   2809 			DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
   2810 			    "returned error %02d\n", i, fws->fw_count, error));
   2811 			return error;
   2812 		}
   2813 	}
   2814 
   2815 	/* wait for the firmware to load */
   2816 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   2817 
   2818 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
   2819 		error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
   2820 	}
   2821 
   2822 	return error;
   2823 }
   2824 
   2825 /* iwlwifi: pcie/trans.c */
   2826 static int
   2827 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2828 {
   2829 	int error;
   2830 
   2831 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2832 
   2833 	if ((error = iwm_nic_init(sc)) != 0) {
   2834 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   2835 		return error;
   2836 	}
   2837 
   2838 	/* make sure rfkill handshake bits are cleared */
   2839 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2840 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   2841 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   2842 
   2843 	/* clear (again), then enable host interrupts */
   2844 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2845 	iwm_enable_interrupts(sc);
   2846 
   2847 	/* really make sure rfkill handshake bits are cleared */
   2848 	/* maybe we should write a few times more?  just to make sure */
   2849 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2850 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2851 
   2852 	/* Load the given image to the HW */
   2853 	error = iwm_load_firmware(sc, ucode_type);
   2854 	if (error) {
   2855 		aprint_error_dev(sc->sc_dev, "failed to load firmware: %d\n",
   2856 		    error);
   2857 	}
   2858 	return error;
   2859 }
   2860 
   2861 static int
   2862 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
   2863 {
   2864 	return iwm_post_alive(sc);
   2865 }
   2866 
   2867 static int
   2868 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   2869 {
   2870 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   2871 		.valid = htole32(valid_tx_ant),
   2872 	};
   2873 
   2874 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
   2875 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
   2876 }
   2877 
   2878 /* iwlwifi: mvm/fw.c */
   2879 static int
   2880 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   2881 {
   2882 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   2883 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   2884 
   2885 	/* Set parameters */
   2886 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   2887 	phy_cfg_cmd.calib_control.event_trigger =
   2888 	    sc->sc_default_calib[ucode_type].event_trigger;
   2889 	phy_cfg_cmd.calib_control.flow_trigger =
   2890 	    sc->sc_default_calib[ucode_type].flow_trigger;
   2891 
   2892 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   2893 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
   2894 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   2895 }
   2896 
   2897 static int
   2898 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
   2899 	enum iwm_ucode_type ucode_type)
   2900 {
   2901 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   2902 	int error;
   2903 
   2904 	if ((error = iwm_read_firmware(sc)) != 0)
   2905 		return error;
   2906 
   2907 	sc->sc_uc_current = ucode_type;
   2908 	error = iwm_start_fw(sc, ucode_type);
   2909 	if (error) {
   2910 		sc->sc_uc_current = old_type;
   2911 		return error;
   2912 	}
   2913 
   2914 	return iwm_fw_alive(sc, sc->sched_base);
   2915 }
   2916 
   2917 /*
   2918  * mvm misc bits
   2919  */
   2920 
   2921 /*
   2922  * follows iwlwifi/fw.c
   2923  */
   2924 static int
   2925 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   2926 {
   2927 	int error;
   2928 
   2929 	/* do not operate with rfkill switch turned on */
   2930 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   2931 		aprint_error_dev(sc->sc_dev,
   2932 		    "radio is disabled by hardware switch\n");
   2933 		return EPERM;
   2934 	}
   2935 
   2936 	sc->sc_init_complete = 0;
   2937 	if ((error = iwm_mvm_load_ucode_wait_alive(sc,
   2938 	    IWM_UCODE_TYPE_INIT)) != 0)
   2939 		return error;
   2940 
   2941 	if (justnvm) {
   2942 		if ((error = iwm_nvm_init(sc)) != 0) {
   2943 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   2944 			return error;
   2945 		}
   2946 		memcpy(&sc->sc_ic.ic_myaddr,
   2947 		    &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
   2948 
   2949 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
   2950 		    + sc->sc_capa_max_probe_len
   2951 		    + IWM_MAX_NUM_SCAN_CHANNELS
   2952 		    * sizeof(struct iwm_scan_channel);
   2953 		sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
   2954 
   2955 		return 0;
   2956 	}
   2957 
   2958 	/* Send TX valid antennas before triggering calibrations */
   2959 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   2960 		return error;
   2961 
   2962 	/*
   2963 	* Send phy configurations command to init uCode
   2964 	* to start the 16.0 uCode init image internal calibrations.
   2965 	*/
   2966 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
   2967 		DPRINTF(("%s: failed to run internal calibration: %d\n",
   2968 		    DEVNAME(sc), error));
   2969 		return error;
   2970 	}
   2971 
   2972 	/*
   2973 	 * Nothing to do but wait for the init complete notification
   2974 	 * from the firmware
   2975 	 */
   2976 	while (!sc->sc_init_complete)
   2977 		if ((error = tsleep(&sc->sc_init_complete,
   2978 		    0, "iwminit", 2*hz)) != 0)
   2979 			break;
   2980 
   2981 	return error;
   2982 }
   2983 
   2984 /*
   2985  * receive side
   2986  */
   2987 
   2988 /* (re)stock rx ring, called at init-time and at runtime */
   2989 static int
   2990 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   2991 {
   2992 	struct iwm_rx_ring *ring = &sc->rxq;
   2993 	struct iwm_rx_data *data = &ring->data[idx];
   2994 	struct mbuf *m;
   2995 	int error;
   2996 	int fatal = 0;
   2997 
   2998 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   2999 	if (m == NULL)
   3000 		return ENOBUFS;
   3001 
   3002 	if (size <= MCLBYTES) {
   3003 		MCLGET(m, M_DONTWAIT);
   3004 	} else {
   3005 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3006 	}
   3007 	if ((m->m_flags & M_EXT) == 0) {
   3008 		m_freem(m);
   3009 		return ENOBUFS;
   3010 	}
   3011 
   3012 	if (data->m != NULL) {
   3013 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3014 		fatal = 1;
   3015 	}
   3016 
   3017 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   3018 	if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3019 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
   3020 		/* XXX */
   3021 		if (fatal)
   3022 			panic("iwm: could not load RX mbuf");
   3023 		m_freem(m);
   3024 		return error;
   3025 	}
   3026 	data->m = m;
   3027 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   3028 
   3029 	/* Update RX descriptor. */
   3030 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   3031 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3032 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   3033 
   3034 	return 0;
   3035 }
   3036 
   3037 /* iwlwifi: mvm/rx.c */
   3038 #define IWM_RSSI_OFFSET 50
   3039 static int
   3040 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3041 {
   3042 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   3043 	uint32_t agc_a, agc_b;
   3044 	uint32_t val;
   3045 
   3046 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3047 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3048 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3049 
   3050 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3051 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3052 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3053 
   3054 	/*
   3055 	 * dBm = rssi dB - agc dB - constant.
   3056 	 * Higher AGC (higher radio gain) means lower signal.
   3057 	 */
   3058 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3059 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3060 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3061 
   3062 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3063 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3064 
   3065 	return max_rssi_dbm;
   3066 }
   3067 
   3068 /* iwlwifi: mvm/rx.c */
   3069 /*
   3070  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
   3071  * values are reported by the fw as positive values - need to negate
   3072  * to obtain their dBM.  Account for missing antennas by replacing 0
   3073  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3074  */
   3075 static int
   3076 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
   3077     struct iwm_rx_phy_info *phy_info)
   3078 {
   3079 	int energy_a, energy_b, energy_c, max_energy;
   3080 	uint32_t val;
   3081 
   3082 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3083 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3084 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3085 	energy_a = energy_a ? -energy_a : -256;
   3086 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3087 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3088 	energy_b = energy_b ? -energy_b : -256;
   3089 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3090 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3091 	energy_c = energy_c ? -energy_c : -256;
   3092 	max_energy = MAX(energy_a, energy_b);
   3093 	max_energy = MAX(max_energy, energy_c);
   3094 
   3095 	DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
   3096 	    energy_a, energy_b, energy_c, max_energy));
   3097 
   3098 	return max_energy;
   3099 }
   3100 
   3101 static void
   3102 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
   3103 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3104 {
   3105 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3106 
   3107 	DPRINTFN(20, ("received PHY stats\n"));
   3108 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3109 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3110 
   3111 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3112 }
   3113 
   3114 /*
   3115  * Retrieve the average noise (in dBm) among receivers.
   3116  */
   3117 static int
   3118 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
   3119 {
   3120 	int i, total, nbant, noise;
   3121 
   3122 	total = nbant = noise = 0;
   3123 	for (i = 0; i < 3; i++) {
   3124 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3125 		if (noise) {
   3126 			total += noise;
   3127 			nbant++;
   3128 		}
   3129 	}
   3130 
   3131 	/* There should be at least one antenna but check anyway. */
   3132 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3133 }
   3134 
   3135 /*
   3136  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
   3137  *
   3138  * Handles the actual data of the Rx packet from the fw
   3139  */
   3140 static void
   3141 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
   3142 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3143 {
   3144 	struct ieee80211com *ic = &sc->sc_ic;
   3145 	struct ieee80211_frame *wh;
   3146 	struct ieee80211_node *ni;
   3147 	struct ieee80211_channel *c = NULL;
   3148 	struct mbuf *m;
   3149 	struct iwm_rx_phy_info *phy_info;
   3150 	struct iwm_rx_mpdu_res_start *rx_res;
   3151 	int device_timestamp;
   3152 	uint32_t len;
   3153 	uint32_t rx_pkt_status;
   3154 	int rssi;
   3155 
   3156 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3157 	    BUS_DMASYNC_POSTREAD);
   3158 
   3159 	phy_info = &sc->sc_last_phy_info;
   3160 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3161 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3162 	len = le16toh(rx_res->byte_count);
   3163 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
   3164 
   3165 	m = data->m;
   3166 	m->m_data = pkt->data + sizeof(*rx_res);
   3167 	m->m_pkthdr.len = m->m_len = len;
   3168 
   3169 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3170 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3171 		    phy_info->cfg_phy_cnt));
   3172 		return;
   3173 	}
   3174 
   3175 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3176 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3177 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3178 		return; /* drop */
   3179 	}
   3180 
   3181 	device_timestamp = le32toh(phy_info->system_timestamp);
   3182 
   3183 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3184 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
   3185 	} else {
   3186 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
   3187 	}
   3188 	rssi = -rssi;
   3189 
   3190 	if (ic->ic_state == IEEE80211_S_SCAN)
   3191 		iwm_fix_channel(ic, m);
   3192 
   3193 	/* replenish ring for the buffer we're going to feed to the sharks */
   3194 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3195 		return;
   3196 
   3197 	m->m_pkthdr.rcvif = IC2IFP(ic);
   3198 
   3199 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
   3200 		if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3201 			c = &ic->ic_channels[le32toh(phy_info->channel)];
   3202 	}
   3203 
   3204 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3205 	if (c)
   3206 		ni->ni_chan = c;
   3207 
   3208 	if (sc->sc_drvbpf != NULL) {
   3209 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3210 
   3211 		tap->wr_flags = 0;
   3212 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3213 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3214 		tap->wr_chan_freq =
   3215 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3216 		tap->wr_chan_flags =
   3217 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3218 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3219 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3220 		tap->wr_tsft = phy_info->system_timestamp;
   3221 		switch (phy_info->rate) {
   3222 		/* CCK rates. */
   3223 		case  10: tap->wr_rate =   2; break;
   3224 		case  20: tap->wr_rate =   4; break;
   3225 		case  55: tap->wr_rate =  11; break;
   3226 		case 110: tap->wr_rate =  22; break;
   3227 		/* OFDM rates. */
   3228 		case 0xd: tap->wr_rate =  12; break;
   3229 		case 0xf: tap->wr_rate =  18; break;
   3230 		case 0x5: tap->wr_rate =  24; break;
   3231 		case 0x7: tap->wr_rate =  36; break;
   3232 		case 0x9: tap->wr_rate =  48; break;
   3233 		case 0xb: tap->wr_rate =  72; break;
   3234 		case 0x1: tap->wr_rate =  96; break;
   3235 		case 0x3: tap->wr_rate = 108; break;
   3236 		/* Unknown rate: should not happen. */
   3237 		default:  tap->wr_rate =   0;
   3238 		}
   3239 
   3240 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3241 	}
   3242 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3243 	ieee80211_free_node(ni);
   3244 }
   3245 
   3246 static void
   3247 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3248 	struct iwm_node *in)
   3249 {
   3250 	struct ieee80211com *ic = &sc->sc_ic;
   3251 	struct ifnet *ifp = IC2IFP(ic);
   3252 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
   3253 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3254 	int failack = tx_resp->failure_frame;
   3255 
   3256 	KASSERT(tx_resp->frame_count == 1);
   3257 
   3258 	/* Update rate control statistics. */
   3259 	in->in_amn.amn_txcnt++;
   3260 	if (failack > 0) {
   3261 		in->in_amn.amn_retrycnt++;
   3262 	}
   3263 
   3264 	if (status != IWM_TX_STATUS_SUCCESS &&
   3265 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3266 		ifp->if_oerrors++;
   3267 	else
   3268 		ifp->if_opackets++;
   3269 }
   3270 
   3271 static void
   3272 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
   3273 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3274 {
   3275 	struct ieee80211com *ic = &sc->sc_ic;
   3276 	struct ifnet *ifp = IC2IFP(ic);
   3277 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3278 	int idx = cmd_hdr->idx;
   3279 	int qid = cmd_hdr->qid;
   3280 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3281 	struct iwm_tx_data *txd = &ring->data[idx];
   3282 	struct iwm_node *in = txd->in;
   3283 
   3284 	if (txd->done) {
   3285 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3286 		    DEVNAME(sc)));
   3287 		return;
   3288 	}
   3289 
   3290 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3291 	    BUS_DMASYNC_POSTREAD);
   3292 
   3293 	sc->sc_tx_timer = 0;
   3294 
   3295 	iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
   3296 
   3297 	/* Unmap and free mbuf. */
   3298 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3299 	    BUS_DMASYNC_POSTWRITE);
   3300 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3301 	m_freem(txd->m);
   3302 
   3303 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3304 	KASSERT(txd->done == 0);
   3305 	txd->done = 1;
   3306 	KASSERT(txd->in);
   3307 
   3308 	txd->m = NULL;
   3309 	txd->in = NULL;
   3310 	ieee80211_free_node(&in->in_ni);
   3311 
   3312 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3313 		sc->qfullmsk &= ~(1 << ring->qid);
   3314 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3315 			ifp->if_flags &= ~IFF_OACTIVE;
   3316 			/*
   3317 			 * Well, we're in interrupt context, but then again
   3318 			 * I guess net80211 does all sorts of stunts in
   3319 			 * interrupt context, so maybe this is no biggie.
   3320 			 */
   3321 			(*ifp->if_start)(ifp);
   3322 		}
   3323 	}
   3324 }
   3325 
   3326 /*
   3327  * BEGIN iwlwifi/mvm/binding.c
   3328  */
   3329 
   3330 static int
   3331 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3332 {
   3333 	struct iwm_binding_cmd cmd;
   3334 	struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
   3335 	int i, ret;
   3336 	uint32_t status;
   3337 
   3338 	memset(&cmd, 0, sizeof(cmd));
   3339 
   3340 	cmd.id_and_color
   3341 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3342 	cmd.action = htole32(action);
   3343 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3344 
   3345 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3346 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3347 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3348 
   3349 	status = 0;
   3350 	ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3351 	    sizeof(cmd), &cmd, &status);
   3352 	if (ret) {
   3353 		DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
   3354 		    DEVNAME(sc), action, ret));
   3355 		return ret;
   3356 	}
   3357 
   3358 	if (status) {
   3359 		DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
   3360 		    status));
   3361 		ret = EIO;
   3362 	}
   3363 
   3364 	return ret;
   3365 }
   3366 
   3367 static int
   3368 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
   3369 {
   3370 	return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3371 }
   3372 
   3373 static int
   3374 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
   3375 {
   3376 	return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3377 }
   3378 
   3379 /*
   3380  * END iwlwifi/mvm/binding.c
   3381  */
   3382 
   3383 /*
   3384  * BEGIN iwlwifi/mvm/phy-ctxt.c
   3385  */
   3386 
   3387 /*
   3388  * Construct the generic fields of the PHY context command
   3389  */
   3390 static void
   3391 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3392 	struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3393 {
   3394 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3395 
   3396 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3397 	    ctxt->color));
   3398 	cmd->action = htole32(action);
   3399 	cmd->apply_time = htole32(apply_time);
   3400 }
   3401 
   3402 /*
   3403  * Add the phy configuration to the PHY context command
   3404  */
   3405 static void
   3406 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
   3407 	struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
   3408 	uint8_t chains_static, uint8_t chains_dynamic)
   3409 {
   3410 	struct ieee80211com *ic = &sc->sc_ic;
   3411 	uint8_t active_cnt, idle_cnt;
   3412 
   3413 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3414 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3415 
   3416 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3417 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3418 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3419 
   3420 	/* Set rx the chains */
   3421 	idle_cnt = chains_static;
   3422 	active_cnt = chains_dynamic;
   3423 
   3424 	cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
   3425 					IWM_PHY_RX_CHAIN_VALID_POS);
   3426 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3427 	cmd->rxchain_info |= htole32(active_cnt <<
   3428 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3429 
   3430 	cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
   3431 }
   3432 
   3433 /*
   3434  * Send a command
   3435  * only if something in the configuration changed: in case that this is the
   3436  * first time that the phy configuration is applied or in case that the phy
   3437  * configuration changed from the previous apply.
   3438  */
   3439 static int
   3440 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
   3441 	struct iwm_mvm_phy_ctxt *ctxt,
   3442 	uint8_t chains_static, uint8_t chains_dynamic,
   3443 	uint32_t action, uint32_t apply_time)
   3444 {
   3445 	struct iwm_phy_context_cmd cmd;
   3446 	int ret;
   3447 
   3448 	/* Set the command header fields */
   3449 	iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3450 
   3451 	/* Set the command data */
   3452 	iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3453 	    chains_static, chains_dynamic);
   3454 
   3455 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
   3456 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3457 	if (ret) {
   3458 		DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
   3459 	}
   3460 	return ret;
   3461 }
   3462 
   3463 /*
   3464  * Send a command to add a PHY context based on the current HW configuration.
   3465  */
   3466 static int
   3467 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3468 	struct ieee80211_channel *chan,
   3469 	uint8_t chains_static, uint8_t chains_dynamic)
   3470 {
   3471 	ctxt->channel = chan;
   3472 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3473 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
   3474 }
   3475 
   3476 /*
   3477  * Send a command to modify the PHY context based on the current HW
   3478  * configuration. Note that the function does not check that the configuration
   3479  * changed.
   3480  */
   3481 static int
   3482 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
   3483 	struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
   3484 	uint8_t chains_static, uint8_t chains_dynamic)
   3485 {
   3486 	ctxt->channel = chan;
   3487 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3488 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
   3489 }
   3490 
   3491 /*
   3492  * END iwlwifi/mvm/phy-ctxt.c
   3493  */
   3494 
   3495 /*
   3496  * transmit side
   3497  */
   3498 
   3499 /*
   3500  * Send a command to the firmware.  We try to implement the Linux
   3501  * driver interface for the routine.
   3502  * mostly from if_iwn (iwn_cmd()).
   3503  *
   3504  * For now, we always copy the first part and map the second one (if it exists).
   3505  */
   3506 static int
   3507 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3508 {
   3509 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3510 	struct iwm_tfd *desc;
   3511 	struct iwm_tx_data *data;
   3512 	struct iwm_device_cmd *cmd;
   3513 	struct mbuf *m;
   3514 	bus_addr_t paddr;
   3515 	uint32_t addr_lo;
   3516 	int error = 0, i, paylen, off, s;
   3517 	int code;
   3518 	int async, wantresp;
   3519 
   3520 	code = hcmd->id;
   3521 	async = hcmd->flags & IWM_CMD_ASYNC;
   3522 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3523 
   3524 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3525 		paylen += hcmd->len[i];
   3526 	}
   3527 
   3528 	/* if the command wants an answer, busy sc_cmd_resp */
   3529 	if (wantresp) {
   3530 		KASSERT(!async);
   3531 		while (sc->sc_wantresp != -1)
   3532 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3533 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3534 		DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
   3535 	}
   3536 
   3537 	/*
   3538 	 * Is the hardware still available?  (after e.g. above wait).
   3539 	 */
   3540 	s = splnet();
   3541 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3542 		error = ENXIO;
   3543 		goto out;
   3544 	}
   3545 
   3546 	desc = &ring->desc[ring->cur];
   3547 	data = &ring->data[ring->cur];
   3548 
   3549 	if (paylen > sizeof(cmd->data)) {
   3550 		/* Command is too large */
   3551 		if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
   3552 			error = EINVAL;
   3553 			goto out;
   3554 		}
   3555 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3556 		if (m == NULL) {
   3557 			error = ENOMEM;
   3558 			goto out;
   3559 		}
   3560 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3561 		if (!(m->m_flags & M_EXT)) {
   3562 			m_freem(m);
   3563 			error = ENOMEM;
   3564 			goto out;
   3565 		}
   3566 		cmd = mtod(m, struct iwm_device_cmd *);
   3567 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
   3568 		    IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3569 		if (error != 0) {
   3570 			m_freem(m);
   3571 			goto out;
   3572 		}
   3573 		data->m = m;
   3574 		paddr = data->map->dm_segs[0].ds_addr;
   3575 	} else {
   3576 		cmd = &ring->cmd[ring->cur];
   3577 		paddr = data->cmd_paddr;
   3578 	}
   3579 
   3580 	cmd->hdr.code = code;
   3581 	cmd->hdr.flags = 0;
   3582 	cmd->hdr.qid = ring->qid;
   3583 	cmd->hdr.idx = ring->cur;
   3584 
   3585 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3586 		if (hcmd->len[i] == 0)
   3587 			continue;
   3588 		memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
   3589 		off += hcmd->len[i];
   3590 	}
   3591 	KASSERT(off == paylen);
   3592 
   3593 	/* lo field is not aligned */
   3594 	addr_lo = htole32((uint32_t)paddr);
   3595 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3596 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3597 	    | ((sizeof(cmd->hdr) + paylen) << 4));
   3598 	desc->num_tbs = 1;
   3599 
   3600 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
   3601 	    code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
   3602 
   3603 	if (paylen > sizeof(cmd->data)) {
   3604 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3605 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3606 	} else {
   3607 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3608 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3609 		    sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
   3610 	}
   3611 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3612 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3613 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   3614 
   3615 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3616 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3617 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3618 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3619 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3620 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3621 		DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
   3622 		error = EBUSY;
   3623 		goto out;
   3624 	}
   3625 
   3626 #if 0
   3627 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3628 #endif
   3629 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3630 	    code, ring->qid, ring->cur));
   3631 
   3632 	/* Kick command ring. */
   3633 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3634 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3635 
   3636 	if (!async) {
   3637 		/* m..m-mmyy-mmyyyy-mym-ym m-my generation */
   3638 		int generation = sc->sc_generation;
   3639 		error = tsleep(desc, PCATCH, "iwmcmd", hz);
   3640 		if (error == 0) {
   3641 			/* if hardware is no longer up, return error */
   3642 			if (generation != sc->sc_generation) {
   3643 				error = ENXIO;
   3644 			} else {
   3645 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3646 			}
   3647 		}
   3648 	}
   3649  out:
   3650 	if (wantresp && error != 0) {
   3651 		iwm_free_resp(sc, hcmd);
   3652 	}
   3653 	splx(s);
   3654 
   3655 	return error;
   3656 }
   3657 
   3658 /* iwlwifi: mvm/utils.c */
   3659 static int
   3660 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
   3661 	uint32_t flags, uint16_t len, const void *data)
   3662 {
   3663 	struct iwm_host_cmd cmd = {
   3664 		.id = id,
   3665 		.len = { len, },
   3666 		.data = { data, },
   3667 		.flags = flags,
   3668 	};
   3669 
   3670 	return iwm_send_cmd(sc, &cmd);
   3671 }
   3672 
   3673 /* iwlwifi: mvm/utils.c */
   3674 static int
   3675 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
   3676 	struct iwm_host_cmd *cmd, uint32_t *status)
   3677 {
   3678 	struct iwm_rx_packet *pkt;
   3679 	struct iwm_cmd_response *resp;
   3680 	int error, resp_len;
   3681 
   3682 	//lockdep_assert_held(&mvm->mutex);
   3683 
   3684 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3685 	cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
   3686 
   3687 	if ((error = iwm_send_cmd(sc, cmd)) != 0)
   3688 		return error;
   3689 	pkt = cmd->resp_pkt;
   3690 
   3691 	/* Can happen if RFKILL is asserted */
   3692 	if (!pkt) {
   3693 		error = 0;
   3694 		goto out_free_resp;
   3695 	}
   3696 
   3697 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3698 		error = EIO;
   3699 		goto out_free_resp;
   3700 	}
   3701 
   3702 	resp_len = iwm_rx_packet_payload_len(pkt);
   3703 	if (resp_len != sizeof(*resp)) {
   3704 		error = EIO;
   3705 		goto out_free_resp;
   3706 	}
   3707 
   3708 	resp = (void *)pkt->data;
   3709 	*status = le32toh(resp->status);
   3710  out_free_resp:
   3711 	iwm_free_resp(sc, cmd);
   3712 	return error;
   3713 }
   3714 
   3715 /* iwlwifi/mvm/utils.c */
   3716 static int
   3717 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
   3718 	uint16_t len, const void *data, uint32_t *status)
   3719 {
   3720 	struct iwm_host_cmd cmd = {
   3721 		.id = id,
   3722 		.len = { len, },
   3723 		.data = { data, },
   3724 	};
   3725 
   3726 	return iwm_mvm_send_cmd_status(sc, &cmd, status);
   3727 }
   3728 
   3729 static void
   3730 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3731 {
   3732 	KASSERT(sc->sc_wantresp != -1);
   3733 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
   3734 	    == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
   3735 	sc->sc_wantresp = -1;
   3736 	wakeup(&sc->sc_wantresp);
   3737 }
   3738 
   3739 /*
   3740  * Process a "command done" firmware notification.  This is where we wakeup
   3741  * processes waiting for a synchronous command completion.
   3742  * from if_iwn
   3743  */
   3744 static void
   3745 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
   3746 {
   3747 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3748 	struct iwm_tx_data *data;
   3749 
   3750 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
   3751 		return;	/* Not a command ack. */
   3752 	}
   3753 
   3754 	data = &ring->data[pkt->hdr.idx];
   3755 
   3756 	/* If the command was mapped in an mbuf, free it. */
   3757 	if (data->m != NULL) {
   3758 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3759 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3760 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3761 		m_freem(data->m);
   3762 		data->m = NULL;
   3763 	}
   3764 	wakeup(&ring->desc[pkt->hdr.idx]);
   3765 }
   3766 
   3767 #if 0
   3768 /*
   3769  * necessary only for block ack mode
   3770  */
   3771 void
   3772 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   3773 	uint16_t len)
   3774 {
   3775 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   3776 	uint16_t w_val;
   3777 
   3778 	scd_bc_tbl = sc->sched_dma.vaddr;
   3779 
   3780 	len += 8; /* magic numbers came naturally from paris */
   3781 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   3782 		len = roundup(len, 4) / 4;
   3783 
   3784 	w_val = htole16(sta_id << 12 | len);
   3785 
   3786 	/* Update TX scheduler. */
   3787 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   3788 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3789 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   3790 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   3791 
   3792 	/* I really wonder what this is ?!? */
   3793 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   3794 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   3795 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3796 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   3797 		    (char *)(void *)sc->sched_dma.vaddr,
   3798 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   3799 	}
   3800 }
   3801 #endif
   3802 
   3803 /*
   3804  * Fill in various bit for management frames, and leave them
   3805  * unfilled for data frames (firmware takes care of that).
   3806  * Return the selected TX rate.
   3807  */
   3808 static const struct iwm_rate *
   3809 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   3810 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   3811 {
   3812 	struct ieee80211com *ic = &sc->sc_ic;
   3813 	struct ieee80211_node *ni = &in->in_ni;
   3814 	const struct iwm_rate *rinfo;
   3815 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3816 	int ridx, rate_flags;
   3817 	int nrates = ni->ni_rates.rs_nrates;
   3818 
   3819 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   3820 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   3821 
   3822 	if (type != IEEE80211_FC0_TYPE_DATA) {
   3823 		/* for non-data, use the lowest supported rate */
   3824 		ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
   3825 		    IWM_RIDX_OFDM : IWM_RIDX_CCK;
   3826 	} else if (ic->ic_fixed_rate != -1) {
   3827 		ridx = sc->sc_fixed_ridx;
   3828 	} else {
   3829 		/* for data frames, use RS table */
   3830 		tx->initial_rate_index = (nrates - 1) - ni->ni_txrate;
   3831 		tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   3832 		DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
   3833 		ridx = in->in_ridx[ni->ni_txrate];
   3834 		return &iwm_rates[ridx];
   3835 	}
   3836 
   3837 	rinfo = &iwm_rates[ridx];
   3838 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   3839 	if (IWM_RIDX_IS_CCK(ridx))
   3840 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   3841 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   3842 
   3843 	return rinfo;
   3844 }
   3845 
   3846 #define TB0_SIZE 16
   3847 static int
   3848 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   3849 {
   3850 	struct ieee80211com *ic = &sc->sc_ic;
   3851 	struct iwm_node *in = (void *)ni;
   3852 	struct iwm_tx_ring *ring;
   3853 	struct iwm_tx_data *data;
   3854 	struct iwm_tfd *desc;
   3855 	struct iwm_device_cmd *cmd;
   3856 	struct iwm_tx_cmd *tx;
   3857 	struct ieee80211_frame *wh;
   3858 	struct ieee80211_key *k = NULL;
   3859 	struct mbuf *m1;
   3860 	const struct iwm_rate *rinfo;
   3861 	uint32_t flags;
   3862 	u_int hdrlen;
   3863 	bus_dma_segment_t *seg;
   3864 	uint8_t tid, type;
   3865 	int i, totlen, error, pad;
   3866 	int hdrlen2;
   3867 
   3868 	wh = mtod(m, struct ieee80211_frame *);
   3869 	hdrlen = ieee80211_anyhdrsize(wh);
   3870 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3871 
   3872 	hdrlen2 = (ieee80211_has_qos(wh)) ?
   3873 	    sizeof (struct ieee80211_qosframe) :
   3874 	    sizeof (struct ieee80211_frame);
   3875 
   3876 	if (hdrlen != hdrlen2)
   3877 		DPRINTF(("%s: hdrlen error (%d != %d)\n",
   3878 		    DEVNAME(sc), hdrlen, hdrlen2));
   3879 
   3880 	tid = 0;
   3881 
   3882 	ring = &sc->txq[ac];
   3883 	desc = &ring->desc[ring->cur];
   3884 	memset(desc, 0, sizeof(*desc));
   3885 	data = &ring->data[ring->cur];
   3886 
   3887 	/* Fill out iwm_tx_cmd to send to the firmware */
   3888 	cmd = &ring->cmd[ring->cur];
   3889 	cmd->hdr.code = IWM_TX_CMD;
   3890 	cmd->hdr.flags = 0;
   3891 	cmd->hdr.qid = ring->qid;
   3892 	cmd->hdr.idx = ring->cur;
   3893 
   3894 	tx = (void *)cmd->data;
   3895 	memset(tx, 0, sizeof(*tx));
   3896 
   3897 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   3898 
   3899 	if (sc->sc_drvbpf != NULL) {
   3900 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   3901 
   3902 		tap->wt_flags = 0;
   3903 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   3904 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   3905 		tap->wt_rate = rinfo->rate;
   3906 		tap->wt_hwqueue = ac;
   3907 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   3908 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   3909 
   3910 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   3911 	}
   3912 
   3913 	/* Encrypt the frame if need be. */
   3914 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   3915 		k = ieee80211_crypto_encap(ic, ni, m);
   3916 		if (k == NULL) {
   3917 			m_freem(m);
   3918 			return ENOBUFS;
   3919 		}
   3920 		/* Packet header may have moved, reset our local pointer. */
   3921 		wh = mtod(m, struct ieee80211_frame *);
   3922 	}
   3923 	totlen = m->m_pkthdr.len;
   3924 
   3925 	flags = 0;
   3926 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3927 		flags |= IWM_TX_CMD_FLG_ACK;
   3928 	}
   3929 
   3930 	if (type != IEEE80211_FC0_TYPE_DATA
   3931 	    && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
   3932 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3933 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   3934 	}
   3935 
   3936 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   3937 	    type != IEEE80211_FC0_TYPE_DATA)
   3938 		tx->sta_id = sc->sc_aux_sta.sta_id;
   3939 	else
   3940 		tx->sta_id = IWM_STATION_ID;
   3941 
   3942 	if (type == IEEE80211_FC0_TYPE_MGT) {
   3943 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   3944 
   3945 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   3946 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   3947 			tx->pm_frame_timeout = htole16(3);
   3948 		else
   3949 			tx->pm_frame_timeout = htole16(2);
   3950 	} else {
   3951 		tx->pm_frame_timeout = htole16(0);
   3952 	}
   3953 
   3954 	if (hdrlen & 3) {
   3955 		/* First segment length must be a multiple of 4. */
   3956 		flags |= IWM_TX_CMD_FLG_MH_PAD;
   3957 		pad = 4 - (hdrlen & 3);
   3958 	} else
   3959 		pad = 0;
   3960 
   3961 	tx->driver_txop = 0;
   3962 	tx->next_frame_len = 0;
   3963 
   3964 	tx->len = htole16(totlen);
   3965 	tx->tid_tspec = tid;
   3966 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   3967 
   3968 	/* Set physical address of "scratch area". */
   3969 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   3970 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   3971 
   3972 	/* Copy 802.11 header in TX command. */
   3973 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   3974 
   3975 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   3976 
   3977 	tx->sec_ctl = 0;
   3978 	tx->tx_flags |= htole32(flags);
   3979 
   3980 	/* Trim 802.11 header. */
   3981 	m_adj(m, hdrlen);
   3982 
   3983 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3984 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3985 	if (error != 0) {
   3986 		if (error != EFBIG) {
   3987 			aprint_error_dev(sc->sc_dev,
   3988 			    "can't map mbuf (error %d)\n", error);
   3989 			m_freem(m);
   3990 			return error;
   3991 		}
   3992 		/* Too many DMA segments, linearize mbuf. */
   3993 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   3994 		if (m1 == NULL) {
   3995 			m_freem(m);
   3996 			return ENOBUFS;
   3997 		}
   3998 		if (m->m_pkthdr.len > MHLEN) {
   3999 			MCLGET(m1, M_DONTWAIT);
   4000 			if (!(m1->m_flags & M_EXT)) {
   4001 				m_freem(m);
   4002 				m_freem(m1);
   4003 				return ENOBUFS;
   4004 			}
   4005 		}
   4006 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   4007 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   4008 		m_freem(m);
   4009 		m = m1;
   4010 
   4011 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   4012 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   4013 		if (error != 0) {
   4014 			aprint_error_dev(sc->sc_dev,
   4015 			    "can't map mbuf (error %d)\n", error);
   4016 			m_freem(m);
   4017 			return error;
   4018 		}
   4019 	}
   4020 	data->m = m;
   4021 	data->in = in;
   4022 	data->done = 0;
   4023 
   4024 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   4025 	KASSERT(data->in != NULL);
   4026 
   4027 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   4028 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   4029 
   4030 	/* Fill TX descriptor. */
   4031 	desc->num_tbs = 2 + data->map->dm_nsegs;
   4032 
   4033 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   4034 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4035 	    (TB0_SIZE << 4);
   4036 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   4037 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   4038 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   4039 	      + hdrlen + pad - TB0_SIZE) << 4);
   4040 
   4041 	/* Other DMA segments are for data payload. */
   4042 	seg = data->map->dm_segs;
   4043 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   4044 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   4045 		desc->tbs[i+2].hi_n_len = \
   4046 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4047 		    | ((seg->ds_len) << 4);
   4048 	}
   4049 
   4050 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4051 	    BUS_DMASYNC_PREWRITE);
   4052 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4053 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4054 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4055 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4056 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4057 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4058 
   4059 #if 0
   4060 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
   4061 #endif
   4062 
   4063 	/* Kick TX ring. */
   4064 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4065 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4066 
   4067 	/* Mark TX ring as full if we reach a certain threshold. */
   4068 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4069 		sc->qfullmsk |= 1 << ring->qid;
   4070 	}
   4071 
   4072 	return 0;
   4073 }
   4074 
   4075 #if 0
   4076 /* not necessary? */
   4077 static int
   4078 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4079 {
   4080 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4081 		.queues_ctl = htole32(tfd_msk),
   4082 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4083 	};
   4084 	int ret;
   4085 
   4086 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
   4087 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
   4088 	    sizeof(flush_cmd), &flush_cmd);
   4089 	if (ret)
   4090 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4091 		    ret);
   4092 	return ret;
   4093 }
   4094 #endif
   4095 
   4096 
   4097 /*
   4098  * BEGIN mvm/power.c
   4099  */
   4100 
   4101 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4102 
   4103 static int
   4104 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4105 	struct iwm_beacon_filter_cmd *cmd)
   4106 {
   4107 	int ret;
   4108 
   4109 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4110 	    IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4111 
   4112 	if (!ret) {
   4113 		DPRINTF(("ba_enable_beacon_abort is: %d\n",
   4114 		    le32toh(cmd->ba_enable_beacon_abort)));
   4115 		DPRINTF(("ba_escape_timer is: %d\n",
   4116 		    le32toh(cmd->ba_escape_timer)));
   4117 		DPRINTF(("bf_debug_flag is: %d\n",
   4118 		    le32toh(cmd->bf_debug_flag)));
   4119 		DPRINTF(("bf_enable_beacon_filter is: %d\n",
   4120 		    le32toh(cmd->bf_enable_beacon_filter)));
   4121 		DPRINTF(("bf_energy_delta is: %d\n",
   4122 		    le32toh(cmd->bf_energy_delta)));
   4123 		DPRINTF(("bf_escape_timer is: %d\n",
   4124 		    le32toh(cmd->bf_escape_timer)));
   4125 		DPRINTF(("bf_roaming_energy_delta is: %d\n",
   4126 		    le32toh(cmd->bf_roaming_energy_delta)));
   4127 		DPRINTF(("bf_roaming_state is: %d\n",
   4128 		    le32toh(cmd->bf_roaming_state)));
   4129 		DPRINTF(("bf_temp_threshold is: %d\n",
   4130 		    le32toh(cmd->bf_temp_threshold)));
   4131 		DPRINTF(("bf_temp_fast_filter is: %d\n",
   4132 		    le32toh(cmd->bf_temp_fast_filter)));
   4133 		DPRINTF(("bf_temp_slow_filter is: %d\n",
   4134 		    le32toh(cmd->bf_temp_slow_filter)));
   4135 	}
   4136 	return ret;
   4137 }
   4138 
   4139 static void
   4140 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
   4141 	struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
   4142 {
   4143 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4144 }
   4145 
   4146 static int
   4147 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
   4148 	int enable)
   4149 {
   4150 	struct iwm_beacon_filter_cmd cmd = {
   4151 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4152 		.bf_enable_beacon_filter = htole32(1),
   4153 		.ba_enable_beacon_abort = htole32(enable),
   4154 	};
   4155 
   4156 	if (!sc->sc_bf.bf_enabled)
   4157 		return 0;
   4158 
   4159 	sc->sc_bf.ba_enabled = enable;
   4160 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4161 	return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4162 }
   4163 
   4164 static void
   4165 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
   4166 {
   4167 	DPRINTF(("Sending power table command on mac id 0x%X for "
   4168 	    "power level %d, flags = 0x%X\n",
   4169 	    cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
   4170 	DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
   4171 
   4172 	if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
   4173 		DPRINTF(("Disable power management\n"));
   4174 		return;
   4175 	}
   4176 	KASSERT(0);
   4177 
   4178 #if 0
   4179 	DPRINTF(mvm, "Rx timeout = %u usec\n",
   4180 			le32_to_cpu(cmd->rx_data_timeout));
   4181 	DPRINTF(mvm, "Tx timeout = %u usec\n",
   4182 			le32_to_cpu(cmd->tx_data_timeout));
   4183 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
   4184 		DPRINTF(mvm, "DTIM periods to skip = %u\n",
   4185 				cmd->skip_dtim_periods);
   4186 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
   4187 		DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
   4188 				cmd->lprx_rssi_threshold);
   4189 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
   4190 		DPRINTF(mvm, "uAPSD enabled\n");
   4191 		DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
   4192 				le32_to_cpu(cmd->rx_data_timeout_uapsd));
   4193 		DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
   4194 				le32_to_cpu(cmd->tx_data_timeout_uapsd));
   4195 		DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
   4196 		DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
   4197 		DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
   4198 	}
   4199 #endif
   4200 }
   4201 
   4202 static void
   4203 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4204 	struct iwm_mac_power_cmd *cmd)
   4205 {
   4206 	struct ieee80211com *ic = &sc->sc_ic;
   4207 	struct ieee80211_node *ni = &in->in_ni;
   4208 	int dtimper, dtimper_msec;
   4209 	int keep_alive;
   4210 
   4211 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4212 	    in->in_color));
   4213 	dtimper = ic->ic_dtim_period ?: 1;
   4214 
   4215 	/*
   4216 	 * Regardless of power management state the driver must set
   4217 	 * keep alive period. FW will use it for sending keep alive NDPs
   4218 	 * immediately after association. Check that keep alive period
   4219 	 * is at least 3 * DTIM
   4220 	 */
   4221 	dtimper_msec = dtimper * ni->ni_intval;
   4222 	keep_alive
   4223 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4224 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4225 	cmd->keep_alive_seconds = htole16(keep_alive);
   4226 }
   4227 
   4228 static int
   4229 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4230 {
   4231 	int ret;
   4232 	int ba_enable;
   4233 	struct iwm_mac_power_cmd cmd;
   4234 
   4235 	memset(&cmd, 0, sizeof(cmd));
   4236 
   4237 	iwm_mvm_power_build_cmd(sc, in, &cmd);
   4238 	iwm_mvm_power_log(sc, &cmd);
   4239 
   4240 	if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
   4241 	    IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
   4242 		return ret;
   4243 
   4244 	ba_enable = !!(cmd.flags &
   4245 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4246 	return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
   4247 }
   4248 
   4249 static int
   4250 iwm_mvm_power_update_device(struct iwm_softc *sc)
   4251 {
   4252 	struct iwm_device_power_cmd cmd = {
   4253 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4254 	};
   4255 
   4256 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4257 		return 0;
   4258 
   4259 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4260 	DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
   4261 
   4262 	return iwm_mvm_send_cmd_pdu(sc,
   4263 	    IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   4264 }
   4265 
   4266 static int
   4267 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4268 {
   4269 	struct iwm_beacon_filter_cmd cmd = {
   4270 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4271 		.bf_enable_beacon_filter = htole32(1),
   4272 	};
   4273 	int ret;
   4274 
   4275 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4276 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4277 
   4278 	if (ret == 0)
   4279 		sc->sc_bf.bf_enabled = 1;
   4280 
   4281 	return ret;
   4282 }
   4283 
   4284 static int
   4285 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4286 {
   4287 	struct iwm_beacon_filter_cmd cmd;
   4288 	int ret;
   4289 
   4290 	memset(&cmd, 0, sizeof(cmd));
   4291 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4292 		return 0;
   4293 
   4294 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4295 	if (ret == 0)
   4296 		sc->sc_bf.bf_enabled = 0;
   4297 
   4298 	return ret;
   4299 }
   4300 
   4301 #if 0
   4302 static int
   4303 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4304 {
   4305 	if (!sc->sc_bf.bf_enabled)
   4306 		return 0;
   4307 
   4308 	return iwm_mvm_enable_beacon_filter(sc, in);
   4309 }
   4310 #endif
   4311 
   4312 /*
   4313  * END mvm/power.c
   4314  */
   4315 
   4316 /*
   4317  * BEGIN mvm/sta.c
   4318  */
   4319 
   4320 static void
   4321 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
   4322 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
   4323 {
   4324 	memset(cmd_v5, 0, sizeof(*cmd_v5));
   4325 
   4326 	cmd_v5->add_modify = cmd_v6->add_modify;
   4327 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
   4328 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
   4329 	memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
   4330 	cmd_v5->sta_id = cmd_v6->sta_id;
   4331 	cmd_v5->modify_mask = cmd_v6->modify_mask;
   4332 	cmd_v5->station_flags = cmd_v6->station_flags;
   4333 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
   4334 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
   4335 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
   4336 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
   4337 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
   4338 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
   4339 	cmd_v5->assoc_id = cmd_v6->assoc_id;
   4340 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
   4341 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
   4342 }
   4343 
   4344 static int
   4345 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
   4346 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
   4347 {
   4348 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
   4349 
   4350 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
   4351 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
   4352 		    sizeof(*cmd), cmd, status);
   4353 	}
   4354 
   4355 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
   4356 
   4357 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
   4358 	    &cmd_v5, status);
   4359 }
   4360 
   4361 /* send station add/update command to firmware */
   4362 static int
   4363 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
   4364 {
   4365 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
   4366 	int ret;
   4367 	uint32_t status;
   4368 
   4369 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4370 
   4371 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4372 	add_sta_cmd.mac_id_n_color
   4373 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4374 	if (!update) {
   4375 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
   4376 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4377 	}
   4378 	add_sta_cmd.add_modify = update ? 1 : 0;
   4379 	add_sta_cmd.station_flags_msk
   4380 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4381 
   4382 	status = IWM_ADD_STA_SUCCESS;
   4383 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
   4384 	if (ret)
   4385 		return ret;
   4386 
   4387 	switch (status) {
   4388 	case IWM_ADD_STA_SUCCESS:
   4389 		break;
   4390 	default:
   4391 		ret = EIO;
   4392 		DPRINTF(("IWM_ADD_STA failed\n"));
   4393 		break;
   4394 	}
   4395 
   4396 	return ret;
   4397 }
   4398 
   4399 static int
   4400 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
   4401 {
   4402 	int ret;
   4403 
   4404 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
   4405 	if (ret)
   4406 		return ret;
   4407 
   4408 	return 0;
   4409 }
   4410 
   4411 static int
   4412 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
   4413 {
   4414 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
   4415 }
   4416 
   4417 static int
   4418 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
   4419 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
   4420 {
   4421 	struct iwm_mvm_add_sta_cmd_v6 cmd;
   4422 	int ret;
   4423 	uint32_t status;
   4424 
   4425 	memset(&cmd, 0, sizeof(cmd));
   4426 	cmd.sta_id = sta->sta_id;
   4427 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
   4428 
   4429 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
   4430 
   4431 	if (addr)
   4432 		memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
   4433 
   4434 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
   4435 	if (ret)
   4436 		return ret;
   4437 
   4438 	switch (status) {
   4439 	case IWM_ADD_STA_SUCCESS:
   4440 		DPRINTF(("Internal station added.\n"));
   4441 		return 0;
   4442 	default:
   4443 		DPRINTF(("%s: Add internal station failed, status=0x%x\n",
   4444 		    DEVNAME(sc), status));
   4445 		ret = EIO;
   4446 		break;
   4447 	}
   4448 	return ret;
   4449 }
   4450 
   4451 static int
   4452 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
   4453 {
   4454 	int ret;
   4455 
   4456 	sc->sc_aux_sta.sta_id = 3;
   4457 	sc->sc_aux_sta.tfd_queue_msk = 0;
   4458 
   4459 	ret = iwm_mvm_add_int_sta_common(sc,
   4460 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
   4461 
   4462 	if (ret)
   4463 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
   4464 	return ret;
   4465 }
   4466 
   4467 /*
   4468  * END mvm/sta.c
   4469  */
   4470 
   4471 /*
   4472  * BEGIN mvm/scan.c
   4473  */
   4474 
   4475 #define IWM_PLCP_QUIET_THRESH 1
   4476 #define IWM_ACTIVE_QUIET_TIME 10
   4477 #define LONG_OUT_TIME_PERIOD 600
   4478 #define SHORT_OUT_TIME_PERIOD 200
   4479 #define SUSPEND_TIME_PERIOD 100
   4480 
   4481 static uint16_t
   4482 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
   4483 {
   4484 	uint16_t rx_chain;
   4485 	uint8_t rx_ant;
   4486 
   4487 	rx_ant = IWM_FW_VALID_RX_ANT(sc);
   4488 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4489 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4490 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4491 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4492 	return htole16(rx_chain);
   4493 }
   4494 
   4495 #define ieee80211_tu_to_usec(a) (1024*(a))
   4496 
   4497 static uint32_t
   4498 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
   4499 {
   4500 	if (!is_assoc)
   4501 		return 0;
   4502 	if (flags & 0x1)
   4503 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
   4504 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
   4505 }
   4506 
   4507 static uint32_t
   4508 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
   4509 {
   4510 	if (!is_assoc)
   4511 		return 0;
   4512 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
   4513 }
   4514 
   4515 static uint32_t
   4516 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
   4517 {
   4518 	if (flags & IEEE80211_CHAN_2GHZ)
   4519 		return htole32(IWM_PHY_BAND_24);
   4520 	else
   4521 		return htole32(IWM_PHY_BAND_5);
   4522 }
   4523 
   4524 static uint32_t
   4525 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4526 {
   4527 	uint32_t tx_ant;
   4528 	int i, ind;
   4529 
   4530 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4531 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4532 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4533 		if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
   4534 			sc->sc_scan_last_antenna = ind;
   4535 			break;
   4536 		}
   4537 	}
   4538 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4539 
   4540 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4541 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4542 				   tx_ant);
   4543 	else
   4544 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4545 }
   4546 
   4547 /*
   4548  * If req->n_ssids > 0, it means we should do an active scan.
   4549  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4550  * just to notify that this scan is active and not passive.
   4551  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4552  * the zero-length one), we need to set the corresponding bits in chan->type,
   4553  * one for each SSID, and set the active bit (first). If the first SSID is
   4554  * already included in the probe template, so we need to set only
   4555  * req->n_ssids - 1 bits in addition to the first bit.
   4556  */
   4557 static uint16_t
   4558 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4559 {
   4560 	if (flags & IEEE80211_CHAN_2GHZ)
   4561 		return 30  + 3 * (n_ssids + 1);
   4562 	return 20  + 2 * (n_ssids + 1);
   4563 }
   4564 
   4565 static uint16_t
   4566 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4567 {
   4568 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4569 }
   4570 
   4571 static int
   4572 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
   4573 	int flags, int n_ssids, int basic_ssid)
   4574 {
   4575 	struct ieee80211com *ic = &sc->sc_ic;
   4576 	uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
   4577 	uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
   4578 	struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
   4579 		(cmd->data + le16toh(cmd->tx_cmd.len));
   4580 	int type = (1 << n_ssids) - 1;
   4581 	struct ieee80211_channel *c;
   4582 	int nchan;
   4583 
   4584 	if (!basic_ssid)
   4585 		type |= (1 << n_ssids);
   4586 
   4587 	for (nchan = 0, c = &ic->ic_channels[1];
   4588 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
   4589 	    c++) {
   4590 		if ((c->ic_flags & flags) != flags)
   4591 			continue;
   4592 
   4593 		chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
   4594 		chan->type = htole32(type);
   4595 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
   4596 			chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
   4597 		chan->active_dwell = htole16(active_dwell);
   4598 		chan->passive_dwell = htole16(passive_dwell);
   4599 		chan->iteration_count = htole16(1);
   4600 		chan++;
   4601 		nchan++;
   4602 	}
   4603 	if (nchan == 0)
   4604 		DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
   4605 	return nchan;
   4606 }
   4607 
   4608 /*
   4609  * Fill in probe request with the following parameters:
   4610  * TA is our vif HW address, which mac80211 ensures we have.
   4611  * Packet is broadcasted, so this is both SA and DA.
   4612  * The probe request IE is made out of two: first comes the most prioritized
   4613  * SSID if a directed scan is requested. Second comes whatever extra
   4614  * information was given to us as the scan request IE.
   4615  */
   4616 static uint16_t
   4617 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
   4618 	const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
   4619 	const uint8_t *ie, int ie_len, int left)
   4620 {
   4621 	int len = 0;
   4622 	uint8_t *pos = NULL;
   4623 
   4624 	/* Make sure there is enough space for the probe request,
   4625 	 * two mandatory IEs and the data */
   4626 	left -= sizeof(*frame);
   4627 	if (left < 0)
   4628 		return 0;
   4629 
   4630 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4631 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4632 	frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4633 	IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
   4634 	memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
   4635 	IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
   4636 
   4637 	len += sizeof(*frame);
   4638 	CTASSERT(sizeof(*frame) == 24);
   4639 
   4640 	/* for passive scans, no need to fill anything */
   4641 	if (n_ssids == 0)
   4642 		return (uint16_t)len;
   4643 
   4644 	/* points to the payload of the request */
   4645 	pos = (uint8_t *)frame + sizeof(*frame);
   4646 
   4647 	/* fill in our SSID IE */
   4648 	left -= ssid_len + 2;
   4649 	if (left < 0)
   4650 		return 0;
   4651 	*pos++ = IEEE80211_ELEMID_SSID;
   4652 	*pos++ = ssid_len;
   4653 	if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
   4654 		memcpy(pos, ssid, ssid_len);
   4655 		pos += ssid_len;
   4656 	}
   4657 
   4658 	len += ssid_len + 2;
   4659 
   4660 	if (left < ie_len)
   4661 		return len;
   4662 
   4663 	if (ie && ie_len) {
   4664 		memcpy(pos, ie, ie_len);
   4665 		len += ie_len;
   4666 	}
   4667 
   4668 	return (uint16_t)len;
   4669 }
   4670 
   4671 static int
   4672 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
   4673 	int n_ssids, uint8_t *ssid, int ssid_len)
   4674 {
   4675 	struct ieee80211com *ic = &sc->sc_ic;
   4676 	struct iwm_host_cmd hcmd = {
   4677 		.id = IWM_SCAN_REQUEST_CMD,
   4678 		.len = { 0, },
   4679 		.data = { sc->sc_scan_cmd, },
   4680 		.flags = IWM_CMD_SYNC,
   4681 		.dataflags = { IWM_HCMD_DFL_NOCOPY, },
   4682 	};
   4683 	struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
   4684 	int is_assoc = 0;
   4685 	int ret;
   4686 	uint32_t status;
   4687 	int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
   4688 
   4689 	//lockdep_assert_held(&mvm->mutex);
   4690 
   4691 	sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
   4692 
   4693 	DPRINTF(("Handling ieee80211 scan request\n"));
   4694 	memset(cmd, 0, sc->sc_scan_cmd_len);
   4695 
   4696 	cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
   4697 	cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
   4698 	cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
   4699 	cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
   4700 	cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
   4701 	cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
   4702 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
   4703 	    IWM_MAC_FILTER_IN_BEACON);
   4704 
   4705 	cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
   4706 	cmd->repeats = htole32(1);
   4707 
   4708 	/*
   4709 	 * If the user asked for passive scan, don't change to active scan if
   4710 	 * you see any activity on the channel - remain passive.
   4711 	 */
   4712 	if (n_ssids > 0) {
   4713 		cmd->passive2active = htole16(1);
   4714 		cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4715 #if 0
   4716 		if (basic_ssid) {
   4717 			ssid = req->ssids[0].ssid;
   4718 			ssid_len = req->ssids[0].ssid_len;
   4719 		}
   4720 #endif
   4721 	} else {
   4722 		cmd->passive2active = 0;
   4723 		cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4724 	}
   4725 
   4726 	cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4727 	    IWM_TX_CMD_FLG_BT_DIS);
   4728 	cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
   4729 	cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4730 	cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
   4731 
   4732 	cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
   4733 			    (struct ieee80211_frame *)cmd->data,
   4734 			    ic->ic_myaddr, n_ssids, ssid, ssid_len,
   4735 			    NULL, 0, sc->sc_capa_max_probe_len));
   4736 
   4737 	cmd->channel_count
   4738 	    = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
   4739 
   4740 	cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
   4741 		le16toh(cmd->tx_cmd.len) +
   4742 		(cmd->channel_count * sizeof(struct iwm_scan_channel)));
   4743 	hcmd.len[0] = le16toh(cmd->len);
   4744 
   4745 	status = IWM_SCAN_RESPONSE_OK;
   4746 	ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
   4747 	if (!ret && status == IWM_SCAN_RESPONSE_OK) {
   4748 		DPRINTF(("Scan request was sent successfully\n"));
   4749 	} else {
   4750 		/*
   4751 		 * If the scan failed, it usually means that the FW was unable
   4752 		 * to allocate the time events. Warn on it, but maybe we
   4753 		 * should try to send the command again with different params.
   4754 		 */
   4755 		sc->sc_scanband = 0;
   4756 		ret = EIO;
   4757 	}
   4758 	return ret;
   4759 }
   4760 
   4761 /*
   4762  * END mvm/scan.c
   4763  */
   4764 
   4765 /*
   4766  * BEGIN mvm/mac-ctxt.c
   4767  */
   4768 
   4769 static void
   4770 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
   4771 	int *cck_rates, int *ofdm_rates)
   4772 {
   4773 	struct ieee80211_node *ni = &in->in_ni;
   4774 	int lowest_present_ofdm = 100;
   4775 	int lowest_present_cck = 100;
   4776 	uint8_t cck = 0;
   4777 	uint8_t ofdm = 0;
   4778 	int i;
   4779 
   4780 	if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
   4781 		for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
   4782 			cck |= (1 << i);
   4783 			if (lowest_present_cck > i)
   4784 				lowest_present_cck = i;
   4785 		}
   4786 	}
   4787 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   4788 		int adj = i - IWM_FIRST_OFDM_RATE;
   4789 		ofdm |= (1 << adj);
   4790 		if (lowest_present_ofdm > i)
   4791 			lowest_present_ofdm = i;
   4792 	}
   4793 
   4794 	/*
   4795 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   4796 	 * variables. This isn't sufficient though, as there might not
   4797 	 * be all the right rates in the bitmap. E.g. if the only basic
   4798 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   4799 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   4800 	 *
   4801 	 *    [...] a STA responding to a received frame shall transmit
   4802 	 *    its Control Response frame [...] at the highest rate in the
   4803 	 *    BSSBasicRateSet parameter that is less than or equal to the
   4804 	 *    rate of the immediately previous frame in the frame exchange
   4805 	 *    sequence ([...]) and that is of the same modulation class
   4806 	 *    ([...]) as the received frame. If no rate contained in the
   4807 	 *    BSSBasicRateSet parameter meets these conditions, then the
   4808 	 *    control frame sent in response to a received frame shall be
   4809 	 *    transmitted at the highest mandatory rate of the PHY that is
   4810 	 *    less than or equal to the rate of the received frame, and
   4811 	 *    that is of the same modulation class as the received frame.
   4812 	 *
   4813 	 * As a consequence, we need to add all mandatory rates that are
   4814 	 * lower than all of the basic rates to these bitmaps.
   4815 	 */
   4816 
   4817 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   4818 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   4819 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   4820 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   4821 	/* 6M already there or needed so always add */
   4822 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   4823 
   4824 	/*
   4825 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   4826 	 * Note, however:
   4827 	 *  - if no CCK rates are basic, it must be ERP since there must
   4828 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   4829 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   4830 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   4831 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   4832 	 *  - if 2M is basic, 1M is mandatory
   4833 	 *  - if 1M is basic, that's the only valid ACK rate.
   4834 	 * As a consequence, it's not as complicated as it sounds, just add
   4835 	 * any lower rates to the ACK rate bitmap.
   4836 	 */
   4837 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   4838 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   4839 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   4840 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   4841 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   4842 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   4843 	/* 1M already there or needed so always add */
   4844 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   4845 
   4846 	*cck_rates = cck;
   4847 	*ofdm_rates = ofdm;
   4848 }
   4849 
   4850 static void
   4851 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   4852 	struct iwm_mac_ctx_cmd *cmd, uint32_t action)
   4853 {
   4854 	struct ieee80211com *ic = &sc->sc_ic;
   4855 	struct ieee80211_node *ni = ic->ic_bss;
   4856 	int cck_ack_rates, ofdm_ack_rates;
   4857 	int i;
   4858 
   4859 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4860 	    in->in_color));
   4861 	cmd->action = htole32(action);
   4862 
   4863 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   4864 	cmd->tsf_id = htole32(in->in_tsfid);
   4865 
   4866 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   4867 	if (in->in_assoc) {
   4868 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   4869 	} else {
   4870 		memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
   4871 	}
   4872 	iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   4873 	cmd->cck_rates = htole32(cck_ack_rates);
   4874 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   4875 
   4876 	cmd->cck_short_preamble
   4877 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   4878 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   4879 	cmd->short_slot
   4880 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   4881 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   4882 
   4883 	for (i = 0; i < IWM_AC_NUM+1; i++) {
   4884 		int txf = i;
   4885 
   4886 		cmd->ac[txf].cw_min = htole16(0x0f);
   4887 		cmd->ac[txf].cw_max = htole16(0x3f);
   4888 		cmd->ac[txf].aifsn = 1;
   4889 		cmd->ac[txf].fifos_mask = (1 << txf);
   4890 		cmd->ac[txf].edca_txop = 0;
   4891 	}
   4892 
   4893 	if (ic->ic_flags & IEEE80211_F_USEPROT)
   4894 		cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   4895 
   4896 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   4897 }
   4898 
   4899 static int
   4900 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
   4901 {
   4902 	int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
   4903 				       sizeof(*cmd), cmd);
   4904 	if (ret)
   4905 		DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
   4906 		    DEVNAME(sc), le32toh(cmd->action), ret));
   4907 	return ret;
   4908 }
   4909 
   4910 /*
   4911  * Fill the specific data for mac context of type station or p2p client
   4912  */
   4913 static void
   4914 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   4915 	struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
   4916 {
   4917 	struct ieee80211_node *ni = &in->in_ni;
   4918 	unsigned dtim_period, dtim_count;
   4919 
   4920 	dtim_period = ni->ni_dtim_period;
   4921 	dtim_count = ni->ni_dtim_count;
   4922 
   4923 	/* We need the dtim_period to set the MAC as associated */
   4924 	if (in->in_assoc && dtim_period && !force_assoc_off) {
   4925 		uint64_t tsf;
   4926 		uint32_t dtim_offs;
   4927 
   4928 		/*
   4929 		 * The DTIM count counts down, so when it is N that means N
   4930 		 * more beacon intervals happen until the DTIM TBTT. Therefore
   4931 		 * add this to the current time. If that ends up being in the
   4932 		 * future, the firmware will handle it.
   4933 		 *
   4934 		 * Also note that the system_timestamp (which we get here as
   4935 		 * "sync_device_ts") and TSF timestamp aren't at exactly the
   4936 		 * same offset in the frame -- the TSF is at the first symbol
   4937 		 * of the TSF, the system timestamp is at signal acquisition
   4938 		 * time. This means there's an offset between them of at most
   4939 		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
   4940 		 * 384us in the longest case), this is currently not relevant
   4941 		 * as the firmware wakes up around 2ms before the TBTT.
   4942 		 */
   4943 		dtim_offs = dtim_count * ni->ni_intval;
   4944 		/* convert TU to usecs */
   4945 		dtim_offs *= 1024;
   4946 
   4947 		tsf = ni->ni_tstamp.tsf;
   4948 
   4949 		ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
   4950 		ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
   4951 
   4952 		DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
   4953 		    (long long)le64toh(ctxt_sta->dtim_tsf),
   4954 		    le32toh(ctxt_sta->dtim_time), dtim_offs));
   4955 
   4956 		ctxt_sta->is_assoc = htole32(1);
   4957 	} else {
   4958 		ctxt_sta->is_assoc = htole32(0);
   4959 	}
   4960 
   4961 	ctxt_sta->bi = htole32(ni->ni_intval);
   4962 	ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
   4963 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
   4964 	ctxt_sta->dtim_reciprocal =
   4965 	    htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
   4966 
   4967 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
   4968 	ctxt_sta->listen_interval = htole32(10);
   4969 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
   4970 }
   4971 
   4972 static int
   4973 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
   4974 	uint32_t action)
   4975 {
   4976 	struct iwm_mac_ctx_cmd cmd;
   4977 
   4978 	memset(&cmd, 0, sizeof(cmd));
   4979 
   4980 	/* Fill the common data for all mac context types */
   4981 	iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
   4982 
   4983 	/* Allow beacons to pass through as long as we are not associated,or we
   4984 	 * do not have dtim period information */
   4985 	if (!in->in_assoc || !sc->sc_ic.ic_dtim_period)
   4986 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   4987 	else
   4988 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
   4989 
   4990 	/* Fill the data specific for station mode */
   4991 	iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
   4992 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
   4993 
   4994 	return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
   4995 }
   4996 
   4997 static int
   4998 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   4999 {
   5000 	return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
   5001 }
   5002 
   5003 static int
   5004 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
   5005 {
   5006 	int ret;
   5007 
   5008 	ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
   5009 	if (ret)
   5010 		return ret;
   5011 
   5012 	return 0;
   5013 }
   5014 
   5015 static int
   5016 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
   5017 {
   5018 	return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
   5019 }
   5020 
   5021 #if 0
   5022 static int
   5023 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
   5024 {
   5025 	struct iwm_mac_ctx_cmd cmd;
   5026 	int ret;
   5027 
   5028 	if (!in->in_uploaded) {
   5029 		print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
   5030 		return EIO;
   5031 	}
   5032 
   5033 	memset(&cmd, 0, sizeof(cmd));
   5034 
   5035 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   5036 	    in->in_color));
   5037 	cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
   5038 
   5039 	ret = iwm_mvm_send_cmd_pdu(sc,
   5040 	    IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   5041 	if (ret) {
   5042 		aprint_error_dev(sc->sc_dev,
   5043 		    "Failed to remove MAC context: %d\n", ret);
   5044 		return ret;
   5045 	}
   5046 	in->in_uploaded = 0;
   5047 
   5048 	return 0;
   5049 }
   5050 #endif
   5051 
   5052 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
   5053 
   5054 static void
   5055 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5056 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5057 {
   5058 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5059 
   5060 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5061 	    le32toh(mb->mac_id),
   5062 	    le32toh(mb->consec_missed_beacons),
   5063 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5064 	    le32toh(mb->num_recvd_beacons),
   5065 	    le32toh(mb->num_expected_beacons)));
   5066 
   5067 	/*
   5068 	 * TODO: the threshold should be adjusted based on latency conditions,
   5069 	 * and/or in case of a CS flow on one of the other AP vifs.
   5070 	 */
   5071 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5072 	    IWM_MVM_MISSED_BEACONS_THRESHOLD)
   5073 		ieee80211_beacon_miss(&sc->sc_ic);
   5074 }
   5075 
   5076 /*
   5077  * END mvm/mac-ctxt.c
   5078  */
   5079 
   5080 /*
   5081  * BEGIN mvm/quota.c
   5082  */
   5083 
   5084 static int
   5085 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5086 {
   5087 	struct iwm_time_quota_cmd cmd;
   5088 	int i, idx, ret, num_active_macs, quota, quota_rem;
   5089 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5090 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5091 	uint16_t id;
   5092 
   5093 	memset(&cmd, 0, sizeof(cmd));
   5094 
   5095 	/* currently, PHY ID == binding ID */
   5096 	if (in) {
   5097 		id = in->in_phyctxt->id;
   5098 		KASSERT(id < IWM_MAX_BINDINGS);
   5099 		colors[id] = in->in_phyctxt->color;
   5100 
   5101 		if (1)
   5102 			n_ifs[id] = 1;
   5103 	}
   5104 
   5105 	/*
   5106 	 * The FW's scheduling session consists of
   5107 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
   5108 	 * equally between all the bindings that require quota
   5109 	 */
   5110 	num_active_macs = 0;
   5111 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5112 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5113 		num_active_macs += n_ifs[i];
   5114 	}
   5115 
   5116 	quota = 0;
   5117 	quota_rem = 0;
   5118 	if (num_active_macs) {
   5119 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
   5120 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
   5121 	}
   5122 
   5123 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5124 		if (colors[i] < 0)
   5125 			continue;
   5126 
   5127 		cmd.quotas[idx].id_and_color =
   5128 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5129 
   5130 		if (n_ifs[i] <= 0) {
   5131 			cmd.quotas[idx].quota = htole32(0);
   5132 			cmd.quotas[idx].max_duration = htole32(0);
   5133 		} else {
   5134 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5135 			cmd.quotas[idx].max_duration = htole32(0);
   5136 		}
   5137 		idx++;
   5138 	}
   5139 
   5140 	/* Give the remainder of the session to the first binding */
   5141 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5142 
   5143 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
   5144 	    sizeof(cmd), &cmd);
   5145 	if (ret)
   5146 		DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
   5147 	return ret;
   5148 }
   5149 
   5150 /*
   5151  * END mvm/quota.c
   5152  */
   5153 
   5154 /*
   5155  * aieee80211 routines
   5156  */
   5157 
   5158 /*
   5159  * Change to AUTH state in 80211 state machine.  Roughly matches what
   5160  * Linux does in bss_info_changed().
   5161  */
   5162 static int
   5163 iwm_auth(struct iwm_softc *sc)
   5164 {
   5165 	struct ieee80211com *ic = &sc->sc_ic;
   5166 	struct iwm_node *in = (void *)ic->ic_bss;
   5167 	uint32_t duration;
   5168 	uint32_t min_duration;
   5169 	int error;
   5170 
   5171 	in->in_assoc = 0;
   5172 
   5173 	if ((error = iwm_allow_mcast(sc)) != 0)
   5174 		return error;
   5175 
   5176 	if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
   5177 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5178 		return error;
   5179 	}
   5180 
   5181 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
   5182 	    in->in_ni.ni_chan, 1, 1)) != 0) {
   5183 		DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
   5184 		return error;
   5185 	}
   5186 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5187 
   5188 	if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
   5189 		DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
   5190 		return error;
   5191 	}
   5192 
   5193 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
   5194 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5195 		return error;
   5196 	}
   5197 
   5198 	/* a bit superfluous? */
   5199 	while (sc->sc_auth_prot)
   5200 		tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
   5201 	sc->sc_auth_prot = 1;
   5202 
   5203 	duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
   5204 	    200 + in->in_ni.ni_intval);
   5205 	min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
   5206 	    100 + in->in_ni.ni_intval);
   5207 	iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
   5208 
   5209 	while (sc->sc_auth_prot != 2) {
   5210 		/*
   5211 		 * well, meh, but if the kernel is sleeping for half a
   5212 		 * second, we have bigger problems
   5213 		 */
   5214 		if (sc->sc_auth_prot == 0) {
   5215 			DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
   5216 			return ETIMEDOUT;
   5217 		} else if (sc->sc_auth_prot == -1) {
   5218 			DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
   5219 			sc->sc_auth_prot = 0;
   5220 			return EAUTH;
   5221 		}
   5222 		tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
   5223 	}
   5224 
   5225 	return 0;
   5226 }
   5227 
   5228 static int
   5229 iwm_assoc(struct iwm_softc *sc)
   5230 {
   5231 	struct ieee80211com *ic = &sc->sc_ic;
   5232 	struct iwm_node *in = (void *)ic->ic_bss;
   5233 	int error;
   5234 
   5235 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
   5236 		DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
   5237 		return error;
   5238 	}
   5239 
   5240 	in->in_assoc = 1;
   5241 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5242 		DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
   5243 		return error;
   5244 	}
   5245 
   5246 	return 0;
   5247 }
   5248 
   5249 static int
   5250 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
   5251 {
   5252 	/*
   5253 	 * Ok, so *technically* the proper set of calls for going
   5254 	 * from RUN back to SCAN is:
   5255 	 *
   5256 	 * iwm_mvm_power_mac_disable(sc, in);
   5257 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5258 	 * iwm_mvm_rm_sta(sc, in);
   5259 	 * iwm_mvm_update_quotas(sc, NULL);
   5260 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5261 	 * iwm_mvm_binding_remove_vif(sc, in);
   5262 	 * iwm_mvm_mac_ctxt_remove(sc, in);
   5263 	 *
   5264 	 * However, that freezes the device not matter which permutations
   5265 	 * and modifications are attempted.  Obviously, this driver is missing
   5266 	 * something since it works in the Linux driver, but figuring out what
   5267 	 * is missing is a little more complicated.  Now, since we're going
   5268 	 * back to nothing anyway, we'll just do a complete device reset.
   5269 	 * Up your's, device!
   5270 	 */
   5271 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
   5272 	iwm_stop_device(sc);
   5273 	iwm_init_hw(sc);
   5274 	if (in)
   5275 		in->in_assoc = 0;
   5276 	return 0;
   5277 
   5278 #if 0
   5279 	int error;
   5280 
   5281 	iwm_mvm_power_mac_disable(sc, in);
   5282 
   5283 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5284 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
   5285 		    error);
   5286 		return error;
   5287 	}
   5288 
   5289 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
   5290 		aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
   5291 		return error;
   5292 	}
   5293 	error = iwm_mvm_rm_sta(sc, in);
   5294 	in->in_assoc = 0;
   5295 	iwm_mvm_update_quotas(sc, NULL);
   5296 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5297 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
   5298 		    error);
   5299 		return error;
   5300 	}
   5301 	iwm_mvm_binding_remove_vif(sc, in);
   5302 
   5303 	iwm_mvm_mac_ctxt_remove(sc, in);
   5304 
   5305 	return error;
   5306 #endif
   5307 }
   5308 
   5309 
   5310 static struct ieee80211_node *
   5311 iwm_node_alloc(struct ieee80211_node_table *nt)
   5312 {
   5313 	return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
   5314 }
   5315 
   5316 static void
   5317 iwm_calib_timeout(void *arg)
   5318 {
   5319 	struct iwm_softc *sc = arg;
   5320 	struct ieee80211com *ic = &sc->sc_ic;
   5321 	int s;
   5322 
   5323 	s = splnet();
   5324 	if (ic->ic_fixed_rate == -1
   5325 	    && ic->ic_opmode == IEEE80211_M_STA
   5326 	    && ic->ic_bss) {
   5327 		struct iwm_node *in = (void *)ic->ic_bss;
   5328 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5329 	}
   5330 	splx(s);
   5331 
   5332 	callout_schedule(&sc->sc_calib_to, hz/2);
   5333 }
   5334 
   5335 static void
   5336 iwm_setrates(struct iwm_node *in)
   5337 {
   5338 	struct ieee80211_node *ni = &in->in_ni;
   5339 	struct ieee80211com *ic = ni->ni_ic;
   5340 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5341 	struct iwm_lq_cmd *lq = &in->in_lq;
   5342 	int nrates = ni->ni_rates.rs_nrates;
   5343 	int i, ridx, tab = 0;
   5344 	int txant = 0;
   5345 
   5346 	if (nrates > __arraycount(lq->rs_table) ||
   5347 	    nrates > IEEE80211_RATE_MAXSIZE) {
   5348 		DPRINTF(("%s: node supports %d rates, driver handles only "
   5349 		    "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
   5350 		return;
   5351 	}
   5352 
   5353 	/* first figure out which rates we should support */
   5354 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
   5355 	for (i = 0; i < nrates; i++) {
   5356 		int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
   5357 
   5358 		/* Map 802.11 rate to HW rate index. */
   5359 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5360 			if (iwm_rates[ridx].rate == rate)
   5361 				break;
   5362 		if (ridx > IWM_RIDX_MAX)
   5363 			DPRINTF(("%s: WARNING: device rate for %d not found!\n",
   5364 			    DEVNAME(sc), rate));
   5365 		else
   5366 			in->in_ridx[i] = ridx;
   5367 	}
   5368 
   5369 	/* then construct a lq_cmd based on those */
   5370 	memset(lq, 0, sizeof(*lq));
   5371 	lq->sta_id = IWM_STATION_ID;
   5372 
   5373 	/*
   5374 	 * are these used? (we don't do SISO or MIMO)
   5375 	 * need to set them to non-zero, though, or we get an error.
   5376 	 */
   5377 	lq->single_stream_ant_msk = 1;
   5378 	lq->dual_stream_ant_msk = 1;
   5379 
   5380 	/*
   5381 	 * Build the actual rate selection table.
   5382 	 * The lowest bits are the rates.  Additionally,
   5383 	 * CCK needs bit 9 to be set.  The rest of the bits
   5384 	 * we add to the table select the tx antenna
   5385 	 * Note that we add the rates in the highest rate first
   5386 	 * (opposite of ni_rates).
   5387 	 */
   5388 	for (i = 0; i < nrates; i++) {
   5389 		int nextant;
   5390 
   5391 		if (txant == 0)
   5392 			txant = IWM_FW_VALID_TX_ANT(sc);
   5393 		nextant = 1<<(ffs(txant)-1);
   5394 		txant &= ~nextant;
   5395 
   5396 		ridx = in->in_ridx[(nrates-1)-i];
   5397 		tab = iwm_rates[ridx].plcp;
   5398 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
   5399 		if (IWM_RIDX_IS_CCK(ridx))
   5400 			tab |= IWM_RATE_MCS_CCK_MSK;
   5401 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5402 		lq->rs_table[i] = htole32(tab);
   5403 	}
   5404 	/* then fill the rest with the lowest possible rate */
   5405 	for (i = nrates; i < __arraycount(lq->rs_table); i++) {
   5406 		KASSERT(tab != 0);
   5407 		lq->rs_table[i] = htole32(tab);
   5408 	}
   5409 
   5410 	/* init amrr */
   5411 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5412 	/* Start at lowest available bit-rate, AMRR will raise. */
   5413 	ni->ni_txrate = 0;
   5414 }
   5415 
   5416 static int
   5417 iwm_media_change(struct ifnet *ifp)
   5418 {
   5419 	struct iwm_softc *sc = ifp->if_softc;
   5420 	struct ieee80211com *ic = &sc->sc_ic;
   5421 	uint8_t rate, ridx;
   5422 	int error;
   5423 
   5424 	error = ieee80211_media_change(ifp);
   5425 	if (error != ENETRESET)
   5426 		return error;
   5427 
   5428 	if (ic->ic_fixed_rate != -1) {
   5429 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5430 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5431 		/* Map 802.11 rate to HW rate index. */
   5432 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5433 			if (iwm_rates[ridx].rate == rate)
   5434 				break;
   5435 		sc->sc_fixed_ridx = ridx;
   5436 	}
   5437 
   5438 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5439 	    (IFF_UP | IFF_RUNNING)) {
   5440 		iwm_stop(ifp, 0);
   5441 		error = iwm_init(ifp);
   5442 	}
   5443 	return error;
   5444 }
   5445 
   5446 static void
   5447 iwm_newstate_cb(struct work *wk, void *v)
   5448 {
   5449 	struct iwm_softc *sc = v;
   5450 	struct ieee80211com *ic = &sc->sc_ic;
   5451 	struct iwm_newstate_state *iwmns = (void *)wk;
   5452 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5453 	int generation = iwmns->ns_generation;
   5454 	struct iwm_node *in;
   5455 	int arg = iwmns->ns_arg;
   5456 	int error;
   5457 
   5458 	kmem_free(iwmns, sizeof(*iwmns));
   5459 
   5460 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
   5461 	if (sc->sc_generation != generation) {
   5462 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5463 		if (nstate == IEEE80211_S_INIT) {
   5464 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5465 			sc->sc_newstate(ic, nstate, arg);
   5466 		}
   5467 		return;
   5468 	}
   5469 
   5470 	DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
   5471 
   5472 	/* disable beacon filtering if we're hopping out of RUN */
   5473 	if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
   5474 		iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
   5475 
   5476 		if (((in = (void *)ic->ic_bss) != NULL))
   5477 			in->in_assoc = 0;
   5478 		iwm_release(sc, NULL);
   5479 
   5480 		/*
   5481 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
   5482 		 * above then the card will be completely reinitialized,
   5483 		 * so the driver must do everything necessary to bring the card
   5484 		 * from INIT to SCAN.
   5485 		 *
   5486 		 * Additionally, upon receiving deauth frame from AP,
   5487 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
   5488 		 * state. This will also fail with this driver, so bring the FSM
   5489 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
   5490 		 */
   5491 		if (nstate == IEEE80211_S_SCAN ||
   5492 		    nstate == IEEE80211_S_AUTH ||
   5493 		    nstate == IEEE80211_S_ASSOC) {
   5494 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5495 			sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
   5496 			DPRINTF(("Going INIT->SCAN\n"));
   5497 			nstate = IEEE80211_S_SCAN;
   5498 		}
   5499 	}
   5500 
   5501 	switch (nstate) {
   5502 	case IEEE80211_S_INIT:
   5503 		sc->sc_scanband = 0;
   5504 		break;
   5505 
   5506 	case IEEE80211_S_SCAN:
   5507 		if (sc->sc_scanband)
   5508 			break;
   5509 
   5510 		if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
   5511 		    ic->ic_des_esslen != 0,
   5512 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5513 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5514 			return;
   5515 		}
   5516 		ic->ic_state = nstate;
   5517 		return;
   5518 
   5519 	case IEEE80211_S_AUTH:
   5520 		if ((error = iwm_auth(sc)) != 0) {
   5521 			DPRINTF(("%s: could not move to auth state: %d\n",
   5522 			    DEVNAME(sc), error));
   5523 			return;
   5524 		}
   5525 
   5526 		break;
   5527 
   5528 	case IEEE80211_S_ASSOC:
   5529 		if ((error = iwm_assoc(sc)) != 0) {
   5530 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5531 			    error));
   5532 			return;
   5533 		}
   5534 		break;
   5535 
   5536 	case IEEE80211_S_RUN: {
   5537 		struct iwm_host_cmd cmd = {
   5538 			.id = IWM_LQ_CMD,
   5539 			.len = { sizeof(in->in_lq), },
   5540 			.flags = IWM_CMD_SYNC,
   5541 		};
   5542 
   5543 		in = (struct iwm_node *)ic->ic_bss;
   5544 		iwm_mvm_power_mac_update_mode(sc, in);
   5545 		iwm_mvm_enable_beacon_filter(sc, in);
   5546 		iwm_mvm_update_quotas(sc, in);
   5547 		iwm_setrates(in);
   5548 
   5549 		cmd.data[0] = &in->in_lq;
   5550 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
   5551 			DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
   5552 		}
   5553 
   5554 		callout_schedule(&sc->sc_calib_to, hz/2);
   5555 
   5556 		break; }
   5557 
   5558 	default:
   5559 		DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
   5560 		break;
   5561 	}
   5562 
   5563 	sc->sc_newstate(ic, nstate, arg);
   5564 }
   5565 
   5566 static int
   5567 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5568 {
   5569 	struct iwm_newstate_state *iwmns;
   5570 	struct ifnet *ifp = IC2IFP(ic);
   5571 	struct iwm_softc *sc = ifp->if_softc;
   5572 
   5573 	callout_stop(&sc->sc_calib_to);
   5574 
   5575 	iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5576 	if (!iwmns) {
   5577 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5578 		return ENOMEM;
   5579 	}
   5580 
   5581 	iwmns->ns_nstate = nstate;
   5582 	iwmns->ns_arg = arg;
   5583 	iwmns->ns_generation = sc->sc_generation;
   5584 
   5585 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5586 
   5587 	return 0;
   5588 }
   5589 
   5590 static void
   5591 iwm_endscan_cb(struct work *work __unused, void *arg)
   5592 {
   5593 	struct iwm_softc *sc = arg;
   5594 	struct ieee80211com *ic = &sc->sc_ic;
   5595 	int done;
   5596 
   5597 	DPRINTF(("scan ended\n"));
   5598 
   5599 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
   5600 	    sc->sc_nvm.sku_cap_band_52GHz_enable) {
   5601 		int error;
   5602 		done = 0;
   5603 		if ((error = iwm_mvm_scan_request(sc,
   5604 		    IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
   5605 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5606 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5607 			done = 1;
   5608 		}
   5609 	} else {
   5610 		done = 1;
   5611 	}
   5612 
   5613 	if (done) {
   5614 		if (!sc->sc_scanband) {
   5615 			ieee80211_cancel_scan(ic);
   5616 		} else {
   5617 			ieee80211_end_scan(ic);
   5618 		}
   5619 		sc->sc_scanband = 0;
   5620 	}
   5621 }
   5622 
   5623 static int
   5624 iwm_init_hw(struct iwm_softc *sc)
   5625 {
   5626 	struct ieee80211com *ic = &sc->sc_ic;
   5627 	int error, i, qid;
   5628 
   5629 	if ((error = iwm_preinit(sc)) != 0)
   5630 		return error;
   5631 
   5632 	if ((error = iwm_start_hw(sc)) != 0)
   5633 		return error;
   5634 
   5635 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
   5636 		return error;
   5637 	}
   5638 
   5639 	/*
   5640 	 * should stop and start HW since that INIT
   5641 	 * image just loaded
   5642 	 */
   5643 	iwm_stop_device(sc);
   5644 	if ((error = iwm_start_hw(sc)) != 0) {
   5645 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   5646 		return error;
   5647 	}
   5648 
   5649 	/* omstart, this time with the regular firmware */
   5650 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   5651 	if (error) {
   5652 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   5653 		goto error;
   5654 	}
   5655 
   5656 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   5657 		goto error;
   5658 
   5659 	/* Send phy db control command and then phy db calibration*/
   5660 	if ((error = iwm_send_phy_db_data(sc)) != 0)
   5661 		goto error;
   5662 
   5663 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
   5664 		goto error;
   5665 
   5666 	/* Add auxiliary station for scanning */
   5667 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
   5668 		goto error;
   5669 
   5670 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   5671 		/*
   5672 		 * The channel used here isn't relevant as it's
   5673 		 * going to be overwritten in the other flows.
   5674 		 * For now use the first channel we have.
   5675 		 */
   5676 		if ((error = iwm_mvm_phy_ctxt_add(sc,
   5677 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
   5678 			goto error;
   5679 	}
   5680 
   5681 	error = iwm_mvm_power_update_device(sc);
   5682 	if (error)
   5683 		goto error;
   5684 
   5685 	/* Mark TX rings as active. */
   5686 	for (qid = 0; qid < 4; qid++) {
   5687 		iwm_enable_txq(sc, qid, qid);
   5688 	}
   5689 
   5690 	return 0;
   5691 
   5692  error:
   5693 	iwm_stop_device(sc);
   5694 	return error;
   5695 }
   5696 
   5697 /* Allow multicast from our BSSID. */
   5698 static int
   5699 iwm_allow_mcast(struct iwm_softc *sc)
   5700 {
   5701 	struct ieee80211com *ic = &sc->sc_ic;
   5702 	struct ieee80211_node *ni = ic->ic_bss;
   5703 	struct iwm_mcast_filter_cmd *cmd;
   5704 	size_t size;
   5705 	int error;
   5706 
   5707 	size = roundup(sizeof(*cmd), 4);
   5708 	cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
   5709 	if (cmd == NULL)
   5710 		return ENOMEM;
   5711 	cmd->filter_own = 1;
   5712 	cmd->port_id = 0;
   5713 	cmd->count = 0;
   5714 	cmd->pass_all = 1;
   5715 	IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
   5716 
   5717 	error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
   5718 	    IWM_CMD_SYNC, size, cmd);
   5719 	kmem_intr_free(cmd, size);
   5720 	return error;
   5721 }
   5722 
   5723 /*
   5724  * ifnet interfaces
   5725  */
   5726 
   5727 static int
   5728 iwm_init(struct ifnet *ifp)
   5729 {
   5730 	struct iwm_softc *sc = ifp->if_softc;
   5731 	int error;
   5732 
   5733 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
   5734 		return 0;
   5735 	}
   5736 	sc->sc_generation++;
   5737 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   5738 
   5739 	if ((error = iwm_init_hw(sc)) != 0) {
   5740 		iwm_stop(ifp, 1);
   5741 		return error;
   5742 	}
   5743 
   5744 	/*
   5745  	 * Ok, firmware loaded and we are jogging
   5746 	 */
   5747 
   5748 	ifp->if_flags &= ~IFF_OACTIVE;
   5749 	ifp->if_flags |= IFF_RUNNING;
   5750 
   5751 	ieee80211_begin_scan(&sc->sc_ic, 0);
   5752 	sc->sc_flags |= IWM_FLAG_HW_INITED;
   5753 
   5754 	return 0;
   5755 }
   5756 
   5757 /*
   5758  * Dequeue packets from sendq and call send.
   5759  * mostly from iwn
   5760  */
   5761 static void
   5762 iwm_start(struct ifnet *ifp)
   5763 {
   5764 	struct iwm_softc *sc = ifp->if_softc;
   5765 	struct ieee80211com *ic = &sc->sc_ic;
   5766 	struct ieee80211_node *ni;
   5767 	struct ether_header *eh;
   5768 	struct mbuf *m;
   5769 	int ac;
   5770 
   5771 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   5772 		return;
   5773 
   5774 	for (;;) {
   5775 		/* why isn't this done per-queue? */
   5776 		if (sc->qfullmsk != 0) {
   5777 			ifp->if_flags |= IFF_OACTIVE;
   5778 			break;
   5779 		}
   5780 
   5781 		/* need to send management frames even if we're not RUNning */
   5782 		IF_DEQUEUE(&ic->ic_mgtq, m);
   5783 		if (m) {
   5784 			ni = (void *)m->m_pkthdr.rcvif;
   5785 			ac = 0;
   5786 			goto sendit;
   5787 		}
   5788 		if (ic->ic_state != IEEE80211_S_RUN) {
   5789 			break;
   5790 		}
   5791 
   5792 		IFQ_DEQUEUE(&ifp->if_snd, m);
   5793 		if (!m)
   5794 			break;
   5795 		if (m->m_len < sizeof (*eh) &&
   5796 		   (m = m_pullup(m, sizeof (*eh))) == NULL) {
   5797 			ifp->if_oerrors++;
   5798 			continue;
   5799 		}
   5800 		if (ifp->if_bpf != NULL)
   5801 			bpf_mtap(ifp, m);
   5802 
   5803 		eh = mtod(m, struct ether_header *);
   5804 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   5805 		if (ni == NULL) {
   5806 			m_freem(m);
   5807 			ifp->if_oerrors++;
   5808 			continue;
   5809 		}
   5810 		/* classify mbuf so we can find which tx ring to use */
   5811 		if (ieee80211_classify(ic, m, ni) != 0) {
   5812 			m_freem(m);
   5813 			ieee80211_free_node(ni);
   5814 			ifp->if_oerrors++;
   5815 			continue;
   5816 		}
   5817 
   5818 		/* No QoS encapsulation for EAPOL frames. */
   5819 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   5820 		    M_WME_GETAC(m) : WME_AC_BE;
   5821 
   5822 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   5823 			ieee80211_free_node(ni);
   5824 			ifp->if_oerrors++;
   5825 			continue;
   5826 		}
   5827 
   5828  sendit:
   5829 		if (ic->ic_rawbpf != NULL)
   5830 			bpf_mtap3(ic->ic_rawbpf, m);
   5831 		if (iwm_tx(sc, m, ni, ac) != 0) {
   5832 			ieee80211_free_node(ni);
   5833 			ifp->if_oerrors++;
   5834 			continue;
   5835 		}
   5836 
   5837 		if (ifp->if_flags & IFF_UP) {
   5838 			sc->sc_tx_timer = 15;
   5839 			ifp->if_timer = 1;
   5840 		}
   5841 	}
   5842 
   5843 	return;
   5844 }
   5845 
   5846 static void
   5847 iwm_stop(struct ifnet *ifp, int disable)
   5848 {
   5849 	struct iwm_softc *sc = ifp->if_softc;
   5850 	struct ieee80211com *ic = &sc->sc_ic;
   5851 
   5852 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   5853 	sc->sc_flags |= IWM_FLAG_STOPPED;
   5854 	sc->sc_generation++;
   5855 	sc->sc_scanband = 0;
   5856 	sc->sc_auth_prot = 0;
   5857 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5858 
   5859 	if (ic->ic_state != IEEE80211_S_INIT)
   5860 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   5861 
   5862 	callout_stop(&sc->sc_calib_to);
   5863 	ifp->if_timer = sc->sc_tx_timer = 0;
   5864 	iwm_stop_device(sc);
   5865 }
   5866 
   5867 static void
   5868 iwm_watchdog(struct ifnet *ifp)
   5869 {
   5870 	struct iwm_softc *sc = ifp->if_softc;
   5871 
   5872 	ifp->if_timer = 0;
   5873 	if (sc->sc_tx_timer > 0) {
   5874 		if (--sc->sc_tx_timer == 0) {
   5875 			aprint_error_dev(sc->sc_dev, "device timeout\n");
   5876 #ifdef IWM_DEBUG
   5877 			iwm_nic_error(sc);
   5878 #endif
   5879 			ifp->if_flags &= ~IFF_UP;
   5880 			iwm_stop(ifp, 1);
   5881 			ifp->if_oerrors++;
   5882 			return;
   5883 		}
   5884 		ifp->if_timer = 1;
   5885 	}
   5886 
   5887 	ieee80211_watchdog(&sc->sc_ic);
   5888 }
   5889 
   5890 static int
   5891 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   5892 {
   5893 	struct iwm_softc *sc = ifp->if_softc;
   5894 	struct ieee80211com *ic = &sc->sc_ic;
   5895 	const struct sockaddr *sa;
   5896 	int s, error = 0;
   5897 
   5898 	s = splnet();
   5899 
   5900 	switch (cmd) {
   5901 	case SIOCSIFADDR:
   5902 		ifp->if_flags |= IFF_UP;
   5903 		/* FALLTHROUGH */
   5904 	case SIOCSIFFLAGS:
   5905 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
   5906 			break;
   5907 		if (ifp->if_flags & IFF_UP) {
   5908 			if (!(ifp->if_flags & IFF_RUNNING)) {
   5909 				if ((error = iwm_init(ifp)) != 0)
   5910 					ifp->if_flags &= ~IFF_UP;
   5911 			}
   5912 		} else {
   5913 			if (ifp->if_flags & IFF_RUNNING)
   5914 				iwm_stop(ifp, 1);
   5915 		}
   5916 		break;
   5917 
   5918 	case SIOCADDMULTI:
   5919 	case SIOCDELMULTI:
   5920 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   5921 		error = (cmd == SIOCADDMULTI) ?
   5922 		    ether_addmulti(sa, &sc->sc_ec) :
   5923 		    ether_delmulti(sa, &sc->sc_ec);
   5924 
   5925 		if (error == ENETRESET)
   5926 			error = 0;
   5927 		break;
   5928 
   5929 	default:
   5930 		error = ieee80211_ioctl(ic, cmd, data);
   5931 	}
   5932 
   5933 	if (error == ENETRESET) {
   5934 		error = 0;
   5935 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5936 		    (IFF_UP | IFF_RUNNING)) {
   5937 			iwm_stop(ifp, 0);
   5938 			error = iwm_init(ifp);
   5939 		}
   5940 	}
   5941 
   5942 	splx(s);
   5943 	return error;
   5944 }
   5945 
   5946 /*
   5947  * The interrupt side of things
   5948  */
   5949 
   5950 /*
   5951  * error dumping routines are from iwlwifi/mvm/utils.c
   5952  */
   5953 
   5954 /*
   5955  * Note: This structure is read from the device with IO accesses,
   5956  * and the reading already does the endian conversion. As it is
   5957  * read with uint32_t-sized accesses, any members with a different size
   5958  * need to be ordered correctly though!
   5959  */
   5960 struct iwm_error_event_table {
   5961 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   5962 	uint32_t error_id;		/* type of error */
   5963 	uint32_t pc;			/* program counter */
   5964 	uint32_t blink1;		/* branch link */
   5965 	uint32_t blink2;		/* branch link */
   5966 	uint32_t ilink1;		/* interrupt link */
   5967 	uint32_t ilink2;		/* interrupt link */
   5968 	uint32_t data1;		/* error-specific data */
   5969 	uint32_t data2;		/* error-specific data */
   5970 	uint32_t data3;		/* error-specific data */
   5971 	uint32_t bcon_time;		/* beacon timer */
   5972 	uint32_t tsf_low;		/* network timestamp function timer */
   5973 	uint32_t tsf_hi;		/* network timestamp function timer */
   5974 	uint32_t gp1;		/* GP1 timer register */
   5975 	uint32_t gp2;		/* GP2 timer register */
   5976 	uint32_t gp3;		/* GP3 timer register */
   5977 	uint32_t ucode_ver;		/* uCode version */
   5978 	uint32_t hw_ver;		/* HW Silicon version */
   5979 	uint32_t brd_ver;		/* HW board version */
   5980 	uint32_t log_pc;		/* log program counter */
   5981 	uint32_t frame_ptr;		/* frame pointer */
   5982 	uint32_t stack_ptr;		/* stack pointer */
   5983 	uint32_t hcmd;		/* last host command header */
   5984 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   5985 				 * rxtx_flag */
   5986 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   5987 				 * host_flag */
   5988 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   5989 				 * enc_flag */
   5990 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   5991 				 * time_flag */
   5992 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   5993 				 * wico interrupt */
   5994 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
   5995 	uint32_t wait_event;		/* wait event() caller address */
   5996 	uint32_t l2p_control;	/* L2pControlField */
   5997 	uint32_t l2p_duration;	/* L2pDurationField */
   5998 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   5999 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   6000 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   6001 				 * (LMPM_PMG_SEL) */
   6002 	uint32_t u_timestamp;	/* indicate when the date and time of the
   6003 				 * compilation */
   6004 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   6005 } __packed;
   6006 
   6007 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   6008 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   6009 
   6010 #ifdef IWM_DEBUG
   6011 static const struct {
   6012 	const char *name;
   6013 	uint8_t num;
   6014 } advanced_lookup[] = {
   6015 	{ "NMI_INTERRUPT_WDG", 0x34 },
   6016 	{ "SYSASSERT", 0x35 },
   6017 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   6018 	{ "BAD_COMMAND", 0x38 },
   6019 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   6020 	{ "FATAL_ERROR", 0x3D },
   6021 	{ "NMI_TRM_HW_ERR", 0x46 },
   6022 	{ "NMI_INTERRUPT_TRM", 0x4C },
   6023 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   6024 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   6025 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   6026 	{ "NMI_INTERRUPT_HOST", 0x66 },
   6027 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   6028 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   6029 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   6030 	{ "ADVANCED_SYSASSERT", 0 },
   6031 };
   6032 
   6033 static const char *
   6034 iwm_desc_lookup(uint32_t num)
   6035 {
   6036 	int i;
   6037 
   6038 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   6039 		if (advanced_lookup[i].num == num)
   6040 			return advanced_lookup[i].name;
   6041 
   6042 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   6043 	return advanced_lookup[i].name;
   6044 }
   6045 
   6046 /*
   6047  * Support for dumping the error log seemed like a good idea ...
   6048  * but it's mostly hex junk and the only sensible thing is the
   6049  * hw/ucode revision (which we know anyway).  Since it's here,
   6050  * I'll just leave it in, just in case e.g. the Intel guys want to
   6051  * help us decipher some "ADVANCED_SYSASSERT" later.
   6052  */
   6053 static void
   6054 iwm_nic_error(struct iwm_softc *sc)
   6055 {
   6056 	struct iwm_error_event_table table;
   6057 	uint32_t base;
   6058 
   6059 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   6060 	base = sc->sc_uc.uc_error_event_table;
   6061 	if (base < 0x800000 || base >= 0x80C000) {
   6062 		aprint_error_dev(sc->sc_dev,
   6063 		    "Not valid error log pointer 0x%08x\n", base);
   6064 		return;
   6065 	}
   6066 
   6067 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
   6068 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   6069 		return;
   6070 	}
   6071 
   6072 	if (!table.valid) {
   6073 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   6074 		return;
   6075 	}
   6076 
   6077 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
   6078 		aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
   6079 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6080 		    sc->sc_flags, table.valid);
   6081 	}
   6082 
   6083 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
   6084 		iwm_desc_lookup(table.error_id));
   6085 	aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
   6086 	aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
   6087 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
   6088 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
   6089 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
   6090 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
   6091 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
   6092 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
   6093 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
   6094 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
   6095 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
   6096 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
   6097 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
   6098 	aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
   6099 	aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
   6100 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
   6101 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
   6102 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
   6103 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
   6104 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
   6105 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
   6106 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
   6107 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
   6108 	aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
   6109 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
   6110 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
   6111 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
   6112 	    table.l2p_duration);
   6113 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
   6114 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6115 	    table.l2p_addr_match);
   6116 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
   6117 	    table.lmpm_pmg_sel);
   6118 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
   6119 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
   6120 	    table.flow_handler);
   6121 }
   6122 #endif
   6123 
   6124 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6125 do {									\
   6126 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6127 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6128 	_var_ = (void *)((_pkt_)+1);					\
   6129 } while (/*CONSTCOND*/0)
   6130 
   6131 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6132 do {									\
   6133 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6134 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6135 	_ptr_ = (void *)((_pkt_)+1);					\
   6136 } while (/*CONSTCOND*/0)
   6137 
   6138 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6139 
   6140 /*
   6141  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
   6142  * Basic structure from if_iwn
   6143  */
   6144 static void
   6145 iwm_notif_intr(struct iwm_softc *sc)
   6146 {
   6147 	uint16_t hw;
   6148 
   6149 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6150 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6151 
   6152 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6153 	while (sc->rxq.cur != hw) {
   6154 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6155 		struct iwm_rx_packet *pkt, tmppkt;
   6156 		struct iwm_cmd_response *cresp;
   6157 		int qid, idx;
   6158 
   6159 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6160 		    BUS_DMASYNC_POSTREAD);
   6161 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6162 
   6163 		qid = pkt->hdr.qid & ~0x80;
   6164 		idx = pkt->hdr.idx;
   6165 
   6166 		DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
   6167 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
   6168 		    pkt->hdr.code, sc->rxq.cur, hw));
   6169 
   6170 		/*
   6171 		 * randomly get these from the firmware, no idea why.
   6172 		 * they at least seem harmless, so just ignore them for now
   6173 		 */
   6174 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6175 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6176 			ADVANCE_RXQ(sc);
   6177 			continue;
   6178 		}
   6179 
   6180 		switch (pkt->hdr.code) {
   6181 		case IWM_REPLY_RX_PHY_CMD:
   6182 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
   6183 			break;
   6184 
   6185 		case IWM_REPLY_RX_MPDU_CMD:
   6186 			tmppkt = *pkt; // XXX m is freed by ieee80211_input()
   6187 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
   6188 			pkt = &tmppkt;
   6189 			break;
   6190 
   6191 		case IWM_TX_CMD:
   6192 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
   6193 			break;
   6194 
   6195 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6196 			iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
   6197 			break;
   6198 
   6199 		case IWM_MVM_ALIVE: {
   6200 			struct iwm_mvm_alive_resp *resp;
   6201 			SYNC_RESP_STRUCT(resp, pkt);
   6202 
   6203 			sc->sc_uc.uc_error_event_table
   6204 			    = le32toh(resp->error_event_table_ptr);
   6205 			sc->sc_uc.uc_log_event_table
   6206 			    = le32toh(resp->log_event_table_ptr);
   6207 			sc->sched_base = le32toh(resp->scd_base_ptr);
   6208 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
   6209 
   6210 			sc->sc_uc.uc_intr = 1;
   6211 			wakeup(&sc->sc_uc);
   6212 			break; }
   6213 
   6214 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6215 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6216 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6217 
   6218 			uint16_t size = le16toh(phy_db_notif->length);
   6219 			bus_dmamap_sync(sc->sc_dmat, data->map,
   6220 			    sizeof(*pkt) + sizeof(*phy_db_notif),
   6221 			    size, BUS_DMASYNC_POSTREAD);
   6222 			iwm_phy_db_set_section(sc, phy_db_notif, size);
   6223 
   6224 			break; }
   6225 
   6226 		case IWM_STATISTICS_NOTIFICATION: {
   6227 			struct iwm_notif_statistics *stats;
   6228 			SYNC_RESP_STRUCT(stats, pkt);
   6229 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6230 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6231 			break; }
   6232 
   6233 		case IWM_NVM_ACCESS_CMD:
   6234 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6235 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6236 				    sizeof(sc->sc_cmd_resp),
   6237 				    BUS_DMASYNC_POSTREAD);
   6238 				memcpy(sc->sc_cmd_resp,
   6239 				    pkt, sizeof(sc->sc_cmd_resp));
   6240 			}
   6241 			break;
   6242 
   6243 		case IWM_PHY_CONFIGURATION_CMD:
   6244 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6245 		case IWM_ADD_STA:
   6246 		case IWM_MAC_CONTEXT_CMD:
   6247 		case IWM_REPLY_SF_CFG_CMD:
   6248 		case IWM_POWER_TABLE_CMD:
   6249 		case IWM_PHY_CONTEXT_CMD:
   6250 		case IWM_BINDING_CONTEXT_CMD:
   6251 		case IWM_TIME_EVENT_CMD:
   6252 		case IWM_SCAN_REQUEST_CMD:
   6253 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6254 		case IWM_MAC_PM_POWER_TABLE:
   6255 		case IWM_TIME_QUOTA_CMD:
   6256 		case IWM_REMOVE_STA:
   6257 		case IWM_TXPATH_FLUSH:
   6258 		case IWM_LQ_CMD:
   6259 			SYNC_RESP_STRUCT(cresp, pkt);
   6260 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6261 				memcpy(sc->sc_cmd_resp,
   6262 				    pkt, sizeof(*pkt)+sizeof(*cresp));
   6263 			}
   6264 			break;
   6265 
   6266 		/* ignore */
   6267 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
   6268 			break;
   6269 
   6270 		case IWM_INIT_COMPLETE_NOTIF:
   6271 			sc->sc_init_complete = 1;
   6272 			wakeup(&sc->sc_init_complete);
   6273 			break;
   6274 
   6275 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
   6276 			struct iwm_scan_complete_notif *notif;
   6277 			SYNC_RESP_STRUCT(notif, pkt);
   6278 
   6279 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6280 			break; }
   6281 
   6282 		case IWM_REPLY_ERROR: {
   6283 			struct iwm_error_resp *resp;
   6284 			SYNC_RESP_STRUCT(resp, pkt);
   6285 
   6286 			aprint_error_dev(sc->sc_dev,
   6287 			    "firmware error 0x%x, cmd 0x%x\n",
   6288 			    le32toh(resp->error_type), resp->cmd_id);
   6289 			break; }
   6290 
   6291 		case IWM_TIME_EVENT_NOTIFICATION: {
   6292 			struct iwm_time_event_notif *notif;
   6293 			SYNC_RESP_STRUCT(notif, pkt);
   6294 
   6295 			if (notif->status) {
   6296 				if (le32toh(notif->action) &
   6297 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
   6298 					sc->sc_auth_prot = 2;
   6299 				else
   6300 					sc->sc_auth_prot = 0;
   6301 			} else {
   6302 				sc->sc_auth_prot = -1;
   6303 			}
   6304 			wakeup(&sc->sc_auth_prot);
   6305 			break; }
   6306 
   6307 		case IWM_MCAST_FILTER_CMD:
   6308 			break;
   6309 
   6310 		default:
   6311 			aprint_error_dev(sc->sc_dev,
   6312 			    "code %02x frame %d/%d %x UNHANDLED "
   6313 			    "(this should not happen)\n",
   6314 			    pkt->hdr.code, qid, idx, pkt->len_n_flags);
   6315 			break;
   6316 		}
   6317 
   6318 		/*
   6319 		 * Why test bit 0x80?  The Linux driver:
   6320 		 *
   6321 		 * There is one exception:  uCode sets bit 15 when it
   6322 		 * originates the response/notification, i.e. when the
   6323 		 * response/notification is not a direct response to a
   6324 		 * command sent by the driver.  For example, uCode issues
   6325 		 * IWM_REPLY_RX when it sends a received frame to the driver;
   6326 		 * it is not a direct response to any driver command.
   6327 		 *
   6328 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
   6329 		 * uses a slightly different format for pkt->hdr, and "qid"
   6330 		 * is actually the upper byte of a two-byte field.
   6331 		 */
   6332 		if (!(pkt->hdr.qid & (1 << 7))) {
   6333 			iwm_cmd_done(sc, pkt);
   6334 		}
   6335 
   6336 		ADVANCE_RXQ(sc);
   6337 	}
   6338 
   6339 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   6340 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   6341 
   6342 	/*
   6343 	 * Tell the firmware what we have processed.
   6344 	 * Seems like the hardware gets upset unless we align
   6345 	 * the write by 8??
   6346 	 */
   6347 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   6348 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   6349 }
   6350 
   6351 static int
   6352 iwm_intr(void *arg)
   6353 {
   6354 	struct iwm_softc *sc = arg;
   6355 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6356 	int handled = 0;
   6357 	int r1, r2, rv = 0;
   6358 	int isperiodic = 0;
   6359 
   6360 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   6361 
   6362 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   6363 		uint32_t *ict = sc->ict_dma.vaddr;
   6364 		int tmp;
   6365 
   6366 		tmp = htole32(ict[sc->ict_cur]);
   6367 		if (!tmp)
   6368 			goto out_ena;
   6369 
   6370 		/*
   6371 		 * ok, there was something.  keep plowing until we have all.
   6372 		 */
   6373 		r1 = r2 = 0;
   6374 		while (tmp) {
   6375 			r1 |= tmp;
   6376 			ict[sc->ict_cur] = 0;
   6377 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
   6378 			tmp = htole32(ict[sc->ict_cur]);
   6379 		}
   6380 
   6381 		/* this is where the fun begins.  don't ask */
   6382 		if (r1 == 0xffffffff)
   6383 			r1 = 0;
   6384 
   6385 		/* i am not expected to understand this */
   6386 		if (r1 & 0xc0000)
   6387 			r1 |= 0x8000;
   6388 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   6389 	} else {
   6390 		r1 = IWM_READ(sc, IWM_CSR_INT);
   6391 		/* "hardware gone" (where, fishing?) */
   6392 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   6393 			goto out;
   6394 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   6395 	}
   6396 	if (r1 == 0 && r2 == 0) {
   6397 		goto out_ena;
   6398 	}
   6399 
   6400 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   6401 
   6402 	/* ignored */
   6403 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
   6404 
   6405 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   6406 #ifdef IWM_DEBUG
   6407 		int i;
   6408 
   6409 		iwm_nic_error(sc);
   6410 
   6411 		/* Dump driver status (TX and RX rings) while we're here. */
   6412 		DPRINTF(("driver status:\n"));
   6413 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
   6414 			struct iwm_tx_ring *ring = &sc->txq[i];
   6415 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   6416 			    "queued=%-3d\n",
   6417 			    i, ring->qid, ring->cur, ring->queued));
   6418 		}
   6419 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   6420 		DPRINTF(("  802.11 state %d\n", sc->sc_ic.ic_state));
   6421 #endif
   6422 
   6423 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   6424 		ifp->if_flags &= ~IFF_UP;
   6425 		iwm_stop(ifp, 1);
   6426 		rv = 1;
   6427 		goto out;
   6428 
   6429 	}
   6430 
   6431 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   6432 		handled |= IWM_CSR_INT_BIT_HW_ERR;
   6433 		aprint_error_dev(sc->sc_dev,
   6434 		    "hardware error, stopping device\n");
   6435 		ifp->if_flags &= ~IFF_UP;
   6436 		iwm_stop(ifp, 1);
   6437 		rv = 1;
   6438 		goto out;
   6439 	}
   6440 
   6441 	/* firmware chunk loaded */
   6442 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   6443 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   6444 		handled |= IWM_CSR_INT_BIT_FH_TX;
   6445 
   6446 		sc->sc_fw_chunk_done = 1;
   6447 		wakeup(&sc->sc_fw);
   6448 	}
   6449 
   6450 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   6451 		handled |= IWM_CSR_INT_BIT_RF_KILL;
   6452 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   6453 			DPRINTF(("%s: rfkill switch, disabling interface\n",
   6454 			    DEVNAME(sc)));
   6455 			ifp->if_flags &= ~IFF_UP;
   6456 			iwm_stop(ifp, 1);
   6457 		}
   6458 	}
   6459 
   6460 	/*
   6461 	 * The Linux driver uses periodic interrupts to avoid races.
   6462 	 * We cargo-cult like it's going out of fashion.
   6463 	 */
   6464 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   6465 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
   6466 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   6467 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   6468 			IWM_WRITE_1(sc,
   6469 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   6470 		isperiodic = 1;
   6471 	}
   6472 
   6473 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
   6474 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
   6475 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   6476 
   6477 		iwm_notif_intr(sc);
   6478 
   6479 		/* enable periodic interrupt, see above */
   6480 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
   6481 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   6482 			    IWM_CSR_INT_PERIODIC_ENA);
   6483 	}
   6484 
   6485 	if (__predict_false(r1 & ~handled))
   6486 		DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
   6487 	rv = 1;
   6488 
   6489  out_ena:
   6490 	iwm_restore_interrupts(sc);
   6491  out:
   6492 	return rv;
   6493 }
   6494 
   6495 /*
   6496  * Autoconf glue-sniffing
   6497  */
   6498 
   6499 static const pci_product_id_t iwm_devices[] = {
   6500 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
   6501 	PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
   6502 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
   6503 	PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
   6504 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
   6505 	PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
   6506 };
   6507 
   6508 static int
   6509 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
   6510 {
   6511 	struct pci_attach_args *pa = aux;
   6512 
   6513 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   6514 		return 0;
   6515 
   6516 	for (size_t i = 0; i < __arraycount(iwm_devices); i++)
   6517 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   6518 			return 1;
   6519 
   6520 	return 0;
   6521 }
   6522 
   6523 static int
   6524 iwm_preinit(struct iwm_softc *sc)
   6525 {
   6526 	int error;
   6527 
   6528 	if ((error = iwm_prepare_card_hw(sc)) != 0) {
   6529 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6530 		return error;
   6531 	}
   6532 
   6533 	if (sc->sc_flags & IWM_FLAG_ATTACHED)
   6534 		return 0;
   6535 
   6536 	if ((error = iwm_start_hw(sc)) != 0) {
   6537 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6538 		return error;
   6539 	}
   6540 
   6541 	error = iwm_run_init_mvm_ucode(sc, 1);
   6542 	iwm_stop_device(sc);
   6543 	return error;
   6544 }
   6545 
   6546 static void
   6547 iwm_attach_hook(device_t dev)
   6548 {
   6549 	struct iwm_softc *sc = device_private(dev);
   6550 	struct ieee80211com *ic = &sc->sc_ic;
   6551 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   6552 
   6553 	KASSERT(!cold);
   6554 
   6555 	if (iwm_preinit(sc) != 0)
   6556 		return;
   6557 
   6558 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   6559 
   6560 	aprint_normal_dev(sc->sc_dev,
   6561 	    "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
   6562 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
   6563 	    IWM_UCODE_MAJOR(sc->sc_fwver),
   6564 	    IWM_UCODE_MINOR(sc->sc_fwver),
   6565 	    IWM_UCODE_API(sc->sc_fwver),
   6566 	    ether_sprintf(sc->sc_nvm.hw_addr));
   6567 
   6568 	ic->ic_ifp = ifp;
   6569 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   6570 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   6571 	ic->ic_state = IEEE80211_S_INIT;
   6572 
   6573 	/* Set device capabilities. */
   6574 	ic->ic_caps =
   6575 	    IEEE80211_C_WEP |		/* WEP */
   6576 	    IEEE80211_C_WPA |		/* 802.11i */
   6577 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   6578 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   6579 
   6580 	/* not all hardware can do 5GHz band */
   6581 	if (sc->sc_nvm.sku_cap_band_52GHz_enable)
   6582 		ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   6583 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   6584 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   6585 
   6586 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   6587 		sc->sc_phyctxt[i].id = i;
   6588 	}
   6589 
   6590 	sc->sc_amrr.amrr_min_success_threshold =  1;
   6591 	sc->sc_amrr.amrr_max_success_threshold = 15;
   6592 
   6593 	/* IBSS channel undefined for now. */
   6594 	ic->ic_ibss_chan = &ic->ic_channels[1];
   6595 
   6596 #if 0
   6597 	/* Max RSSI */
   6598 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   6599 #endif
   6600 
   6601 	ifp->if_softc = sc;
   6602 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   6603 	ifp->if_init = iwm_init;
   6604 	ifp->if_stop = iwm_stop;
   6605 	ifp->if_ioctl = iwm_ioctl;
   6606 	ifp->if_start = iwm_start;
   6607 	ifp->if_watchdog = iwm_watchdog;
   6608 	IFQ_SET_READY(&ifp->if_snd);
   6609 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   6610 
   6611 	if_initialize(ifp);
   6612 	ieee80211_ifattach(ic);
   6613 	if_register(ifp);
   6614 
   6615 	ic->ic_node_alloc = iwm_node_alloc;
   6616 
   6617 	/* Override 802.11 state transition machine. */
   6618 	sc->sc_newstate = ic->ic_newstate;
   6619 	ic->ic_newstate = iwm_newstate;
   6620 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   6621 	ieee80211_announce(ic);
   6622 
   6623 	iwm_radiotap_attach(sc);
   6624 	callout_init(&sc->sc_calib_to, 0);
   6625 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   6626 
   6627 	//task_set(&sc->init_task, iwm_init_task, sc);
   6628 
   6629 	if (pmf_device_register(dev, NULL, NULL))
   6630 		pmf_class_network_register(dev, ifp);
   6631 	else
   6632 		aprint_error_dev(dev, "couldn't establish power handler\n");
   6633 }
   6634 
   6635 static void
   6636 iwm_attach(device_t parent, device_t self, void *aux)
   6637 {
   6638 	struct iwm_softc *sc = device_private(self);
   6639 	struct pci_attach_args *pa = aux;
   6640 #ifndef __HAVE_PCI_MSI_MSIX
   6641 	pci_intr_handle_t ih;
   6642 #endif
   6643 	pcireg_t reg, memtype;
   6644 	const char *intrstr;
   6645 	int error;
   6646 	int txq_i;
   6647 
   6648 	sc->sc_dev = self;
   6649 	sc->sc_pct = pa->pa_pc;
   6650 	sc->sc_pcitag = pa->pa_tag;
   6651 	sc->sc_dmat = pa->pa_dmat;
   6652 	sc->sc_pciid = pa->pa_id;
   6653 
   6654 	pci_aprint_devinfo(pa, NULL);
   6655 
   6656 	/*
   6657 	 * Get the offset of the PCI Express Capability Structure in PCI
   6658 	 * Configuration Space.
   6659 	 */
   6660 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   6661 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   6662 	if (error == 0) {
   6663 		aprint_error_dev(self,
   6664 		    "PCIe capability structure not found!\n");
   6665 		return;
   6666 	}
   6667 
   6668 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6669 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6670 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6671 
   6672 	/* Enable bus-mastering and hardware bug workaround. */
   6673 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   6674 	reg |= PCI_COMMAND_MASTER_ENABLE;
   6675 	/* if !MSI */
   6676 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
   6677 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
   6678 	}
   6679 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   6680 
   6681 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   6682 	error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   6683 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   6684 	if (error != 0) {
   6685 		aprint_error_dev(self, "can't map mem space\n");
   6686 		return;
   6687 	}
   6688 
   6689 	/* Install interrupt handler. */
   6690 #ifdef __HAVE_PCI_MSI_MSIX
   6691 	error = ENODEV;
   6692 	if (pci_msi_count(pa) > 0)
   6693 		error = pci_msi_alloc_exact(pa, &sc->sc_pihp, 1);
   6694 	if (error != 0) {
   6695 		if (pci_intx_alloc(pa, &sc->sc_pihp)) {
   6696 			aprint_error_dev(self, "can't map interrupt\n");
   6697 			return;
   6698 		}
   6699 	}
   6700 #else	/* !__HAVE_PCI_MSI_MSIX */
   6701 	if (pci_intr_map(pa, &ih)) {
   6702 		aprint_error_dev(self, "can't map interrupt\n");
   6703 		return;
   6704 	}
   6705 #endif	/* __HAVE_PCI_MSI_MSIX */
   6706 
   6707 	char intrbuf[PCI_INTRSTR_LEN];
   6708 #ifdef __HAVE_PCI_MSI_MSIX
   6709 	intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
   6710 	    sizeof(intrbuf));
   6711 	sc->sc_ih = pci_intr_establish(sc->sc_pct, sc->sc_pihp[0], IPL_NET,
   6712 	    iwm_intr, sc);
   6713 #else	/* !__HAVE_PCI_MSI_MSIX */
   6714 	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
   6715 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
   6716 #endif	/* __HAVE_PCI_MSI_MSIX */
   6717 	if (sc->sc_ih == NULL) {
   6718 		aprint_error_dev(self, "can't establish interrupt");
   6719 		if (intrstr != NULL)
   6720 			aprint_error(" at %s", intrstr);
   6721 		aprint_error("\n");
   6722 		return;
   6723 	}
   6724 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   6725 
   6726 	sc->sc_wantresp = -1;
   6727 
   6728 	switch (PCI_PRODUCT(sc->sc_pciid)) {
   6729 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
   6730 	case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
   6731 		sc->sc_fwname = "iwlwifi-7260-9.ucode";
   6732 		sc->host_interrupt_operation_mode = 1;
   6733 		break;
   6734 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
   6735 	case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
   6736 		sc->sc_fwname = "iwlwifi-3160-9.ucode";
   6737 		sc->host_interrupt_operation_mode = 1;
   6738 		break;
   6739 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
   6740 	case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
   6741 		sc->sc_fwname = "iwlwifi-7265-9.ucode";
   6742 		sc->host_interrupt_operation_mode = 0;
   6743 		break;
   6744 	default:
   6745 		aprint_error_dev(self, "unknown product %#x",
   6746 		    PCI_PRODUCT(sc->sc_pciid));
   6747 		return;
   6748 	}
   6749 	DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
   6750 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   6751 
   6752 	/*
   6753 	 * We now start fiddling with the hardware
   6754 	 */
   6755 
   6756 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   6757 	if (iwm_prepare_card_hw(sc) != 0) {
   6758 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6759 		return;
   6760 	}
   6761 
   6762 	/* Allocate DMA memory for firmware transfers. */
   6763 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
   6764 		aprint_error_dev(sc->sc_dev,
   6765 		    "could not allocate memory for firmware\n");
   6766 		return;
   6767 	}
   6768 
   6769 	/* Allocate "Keep Warm" page. */
   6770 	if ((error = iwm_alloc_kw(sc)) != 0) {
   6771 		aprint_error_dev(sc->sc_dev,
   6772 		    "could not allocate keep warm page\n");
   6773 		goto fail1;
   6774 	}
   6775 
   6776 	/* We use ICT interrupts */
   6777 	if ((error = iwm_alloc_ict(sc)) != 0) {
   6778 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   6779 		goto fail2;
   6780 	}
   6781 
   6782 	/* Allocate TX scheduler "rings". */
   6783 	if ((error = iwm_alloc_sched(sc)) != 0) {
   6784 		aprint_error_dev(sc->sc_dev,
   6785 		    "could not allocate TX scheduler rings\n");
   6786 		goto fail3;
   6787 	}
   6788 
   6789 	/* Allocate TX rings */
   6790 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   6791 		if ((error = iwm_alloc_tx_ring(sc,
   6792 		    &sc->txq[txq_i], txq_i)) != 0) {
   6793 			aprint_error_dev(sc->sc_dev,
   6794 			    "could not allocate TX ring %d\n", txq_i);
   6795 			goto fail4;
   6796 		}
   6797 	}
   6798 
   6799 	/* Allocate RX ring. */
   6800 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
   6801 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   6802 		goto fail4;
   6803 	}
   6804 
   6805 	workqueue_create(&sc->sc_eswq, "iwmes",
   6806 	    iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
   6807 	workqueue_create(&sc->sc_nswq, "iwmns",
   6808 	    iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
   6809 
   6810 	/* Clear pending interrupts. */
   6811 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   6812 
   6813 	/*
   6814 	 * We can't do normal attach before the file system is mounted
   6815 	 * because we cannot read the MAC address without loading the
   6816 	 * firmware from disk.  So we postpone until mountroot is done.
   6817 	 * Notably, this will require a full driver unload/load cycle
   6818 	 * (or reboot) in case the firmware is not present when the
   6819 	 * hook runs.
   6820 	 */
   6821 	config_mountroot(self, iwm_attach_hook);
   6822 
   6823 	return;
   6824 
   6825 	/* Free allocated memory if something failed during attachment. */
   6826 fail4:	while (--txq_i >= 0)
   6827 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   6828 	iwm_free_sched(sc);
   6829 fail3:	if (sc->ict_dma.vaddr != NULL)
   6830 		iwm_free_ict(sc);
   6831 fail2:	iwm_free_kw(sc);
   6832 fail1:	iwm_free_fwmem(sc);
   6833 }
   6834 
   6835 /*
   6836  * Attach the interface to 802.11 radiotap.
   6837  */
   6838 void
   6839 iwm_radiotap_attach(struct iwm_softc *sc)
   6840 {
   6841 	struct ifnet *ifp = sc->sc_ic.ic_ifp;
   6842 
   6843 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   6844 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   6845 	    &sc->sc_drvbpf);
   6846 
   6847 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   6848 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   6849 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   6850 
   6851 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   6852 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   6853 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   6854 }
   6855 
   6856 #if 0
   6857 static void
   6858 iwm_init_task(void *arg1)
   6859 {
   6860 	struct iwm_softc *sc = arg1;
   6861 	struct ifnet *ifp = &sc->sc_ic.ic_if;
   6862 	int s;
   6863 
   6864 	s = splnet();
   6865 	while (sc->sc_flags & IWM_FLAG_BUSY)
   6866 		tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
   6867 	sc->sc_flags |= IWM_FLAG_BUSY;
   6868 
   6869 	iwm_stop(ifp, 0);
   6870 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   6871 		iwm_init(ifp);
   6872 
   6873 	sc->sc_flags &= ~IWM_FLAG_BUSY;
   6874 	wakeup(&sc->sc_flags);
   6875 	splx(s);
   6876 }
   6877 
   6878 static void
   6879 iwm_wakeup(struct iwm_softc *sc)
   6880 {
   6881 	pcireg_t reg;
   6882 
   6883 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6884 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6885 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6886 
   6887 	iwm_init_task(sc);
   6888 }
   6889 
   6890 static int
   6891 iwm_activate(device_t self, enum devact act)
   6892 {
   6893 	struct iwm_softc *sc = device_private(self);
   6894 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6895 
   6896 	switch (act) {
   6897 	case DVACT_DEACTIVATE:
   6898 		if (ifp->if_flags & IFF_RUNNING)
   6899 			iwm_stop(ifp, 0);
   6900 		return 0;
   6901 	default:
   6902 		return EOPNOTSUPP;
   6903 	}
   6904 }
   6905 #endif
   6906 
   6907 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   6908 	NULL, NULL);
   6909 
   6910 #ifdef IWM_DEBUG
   6911 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
   6912 {
   6913 	const struct sysctlnode *rnode, *cnode;
   6914 	int rc;
   6915 
   6916 	if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
   6917 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
   6918 	    SYSCTL_DESCR("iwm global controls"),
   6919 	    NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
   6920 		goto err;
   6921 
   6922 	/* control debugging printfs */
   6923 	if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
   6924 	    CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
   6925 	    "debug", SYSCTL_DESCR("Enable debugging output"),
   6926 	    NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
   6927 		goto err;
   6928 
   6929 	return;
   6930 
   6931  err:
   6932 	aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
   6933 }
   6934 #endif /* IWM_DEBUG */
   6935