Home | History | Annotate | Line # | Download | only in pci
if_iwm.c revision 1.3
      1 /*	$NetBSD: if_iwm.c,v 1.3 2015/02/13 17:40:13 nonaka Exp $	*/
      2 /*	OpenBSD: if_iwm.c,v 1.18 2015/02/11 01:12:42 brad Exp	*/
      3 
      4 /*
      5  * Copyright (c) 2014 genua mbh <info (at) genua.de>
      6  * Copyright (c) 2014 Fixup Software Ltd.
      7  *
      8  * Permission to use, copy, modify, and distribute this software for any
      9  * purpose with or without fee is hereby granted, provided that the above
     10  * copyright notice and this permission notice appear in all copies.
     11  *
     12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
     14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
     15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
     16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
     17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
     18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
     19  */
     20 
     21 /*-
     22  * Based on BSD-licensed source modules in the Linux iwlwifi driver,
     23  * which were used as the reference documentation for this implementation.
     24  *
     25  * Driver version we are currently based off of is
     26  * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
     27  *
     28  ***********************************************************************
     29  *
     30  * This file is provided under a dual BSD/GPLv2 license.  When using or
     31  * redistributing this file, you may do so under either license.
     32  *
     33  * GPL LICENSE SUMMARY
     34  *
     35  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
     36  *
     37  * This program is free software; you can redistribute it and/or modify
     38  * it under the terms of version 2 of the GNU General Public License as
     39  * published by the Free Software Foundation.
     40  *
     41  * This program is distributed in the hope that it will be useful, but
     42  * WITHOUT ANY WARRANTY; without even the implied warranty of
     43  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     44  * General Public License for more details.
     45  *
     46  * You should have received a copy of the GNU General Public License
     47  * along with this program; if not, write to the Free Software
     48  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
     49  * USA
     50  *
     51  * The full GNU General Public License is included in this distribution
     52  * in the file called COPYING.
     53  *
     54  * Contact Information:
     55  *  Intel Linux Wireless <ilw (at) linux.intel.com>
     56  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
     57  *
     58  *
     59  * BSD LICENSE
     60  *
     61  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
     62  * All rights reserved.
     63  *
     64  * Redistribution and use in source and binary forms, with or without
     65  * modification, are permitted provided that the following conditions
     66  * are met:
     67  *
     68  *  * Redistributions of source code must retain the above copyright
     69  *    notice, this list of conditions and the following disclaimer.
     70  *  * Redistributions in binary form must reproduce the above copyright
     71  *    notice, this list of conditions and the following disclaimer in
     72  *    the documentation and/or other materials provided with the
     73  *    distribution.
     74  *  * Neither the name Intel Corporation nor the names of its
     75  *    contributors may be used to endorse or promote products derived
     76  *    from this software without specific prior written permission.
     77  *
     78  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     79  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     80  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     81  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     82  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     83  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     84  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     85  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     86  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     87  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     88  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     89  */
     90 
     91 /*-
     92  * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
     93  *
     94  * Permission to use, copy, modify, and distribute this software for any
     95  * purpose with or without fee is hereby granted, provided that the above
     96  * copyright notice and this permission notice appear in all copies.
     97  *
     98  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
     99  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
    100  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
    101  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
    102  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
    103  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
    104  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
    105  */
    106 
    107 #include <sys/cdefs.h>
    108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.3 2015/02/13 17:40:13 nonaka Exp $");
    109 
    110 #include <sys/param.h>
    111 #include <sys/conf.h>
    112 #include <sys/kernel.h>
    113 #include <sys/kmem.h>
    114 #include <sys/mbuf.h>
    115 #include <sys/mutex.h>
    116 #include <sys/proc.h>
    117 #include <sys/socket.h>
    118 #include <sys/sockio.h>
    119 #include <sys/systm.h>
    120 
    121 #include <sys/cpu.h>
    122 #include <sys/bus.h>
    123 #include <sys/workqueue.h>
    124 #include <machine/endian.h>
    125 #include <machine/intr.h>
    126 
    127 #include <dev/pci/pcireg.h>
    128 #include <dev/pci/pcivar.h>
    129 #include <dev/pci/pcidevs.h>
    130 #include <dev/firmload.h>
    131 
    132 #include <net/bpf.h>
    133 #include <net/if.h>
    134 #include <net/if_arp.h>
    135 #include <net/if_dl.h>
    136 #include <net/if_media.h>
    137 #include <net/if_types.h>
    138 #include <net/if_ether.h>
    139 
    140 #include <netinet/in.h>
    141 #include <netinet/in_systm.h>
    142 #include <netinet/ip.h>
    143 
    144 #include <net80211/ieee80211_var.h>
    145 #include <net80211/ieee80211_amrr.h>
    146 #include <net80211/ieee80211_radiotap.h>
    147 
    148 #define DEVNAME(_s)	device_xname((_s)->sc_dev)
    149 #define IC2IFP(_ic_)	((_ic_)->ic_ifp)
    150 
    151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
    152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
    153 
    154 #ifdef IWM_DEBUG
    155 #define DPRINTF(x)	do { if (iwm_debug > 0) printf x; } while (0)
    156 #define DPRINTFN(n, x)	do { if (iwm_debug >= (n)) printf x; } while (0)
    157 int iwm_debug = 1;
    158 #else
    159 #define DPRINTF(x)	do { ; } while (0)
    160 #define DPRINTFN(n, x)	do { ; } while (0)
    161 #endif
    162 
    163 #include <dev/pci/if_iwmreg.h>
    164 #include <dev/pci/if_iwmvar.h>
    165 
    166 const uint8_t iwm_nvm_channels[] = {
    167 	/* 2.4 GHz */
    168 	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
    169 	/* 5 GHz */
    170 	36, 40, 44 , 48, 52, 56, 60, 64,
    171 	100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
    172 	149, 153, 157, 161, 165
    173 };
    174 #define IWM_NUM_2GHZ_CHANNELS	14
    175 
    176 /* It looks like 11a TX is broken, unfortunately. */
    177 #define IWM_NO_5GHZ		1
    178 
    179 const struct iwm_rate {
    180 	uint8_t rate;
    181 	uint8_t plcp;
    182 } iwm_rates[] = {
    183 	{   2,	IWM_RATE_1M_PLCP  },
    184 	{   4,	IWM_RATE_2M_PLCP  },
    185 	{  11,	IWM_RATE_5M_PLCP  },
    186 	{  22,	IWM_RATE_11M_PLCP },
    187 	{  12,	IWM_RATE_6M_PLCP  },
    188 	{  18,	IWM_RATE_9M_PLCP  },
    189 	{  24,	IWM_RATE_12M_PLCP },
    190 	{  36,	IWM_RATE_18M_PLCP },
    191 	{  48,	IWM_RATE_24M_PLCP },
    192 	{  72,	IWM_RATE_36M_PLCP },
    193 	{  96,	IWM_RATE_48M_PLCP },
    194 	{ 108,	IWM_RATE_54M_PLCP },
    195 };
    196 #define IWM_RIDX_CCK	0
    197 #define IWM_RIDX_OFDM	4
    198 #define IWM_RIDX_MAX	(__arraycount(iwm_rates)-1)
    199 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
    200 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
    201 
    202 struct iwm_newstate_state {
    203 	struct work ns_wk;
    204 	struct ieee80211com *ns_ic;
    205 	enum ieee80211_state ns_nstate;
    206 	int ns_arg;
    207 	int ns_generation;
    208 };
    209 
    210 int	iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
    211 int	iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type,
    212 					uint8_t *, size_t);
    213 int	iwm_set_default_calib(struct iwm_softc *, const void *);
    214 int	iwm_read_firmware(struct iwm_softc *);
    215 uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
    216 void	iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
    217 int	iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
    218 int	iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
    219 int	iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
    220 int	iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
    221 int	iwm_nic_lock(struct iwm_softc *);
    222 void	iwm_nic_unlock(struct iwm_softc *);
    223 void	iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
    224 				uint32_t);
    225 void	iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    226 void	iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
    227 int	iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
    228 				bus_size_t, bus_size_t);
    229 void	iwm_dma_contig_free(struct iwm_dma_info *);
    230 int	iwm_alloc_fwmem(struct iwm_softc *);
    231 void	iwm_free_fwmem(struct iwm_softc *);
    232 int	iwm_alloc_sched(struct iwm_softc *);
    233 void	iwm_free_sched(struct iwm_softc *);
    234 int	iwm_alloc_kw(struct iwm_softc *);
    235 void	iwm_free_kw(struct iwm_softc *);
    236 int	iwm_alloc_ict(struct iwm_softc *);
    237 void	iwm_free_ict(struct iwm_softc *);
    238 int	iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    239 void	iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    240 void	iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
    241 int	iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int);
    242 void	iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    243 void	iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
    244 void	iwm_enable_rfkill_int(struct iwm_softc *);
    245 int	iwm_check_rfkill(struct iwm_softc *);
    246 void	iwm_enable_interrupts(struct iwm_softc *);
    247 void	iwm_restore_interrupts(struct iwm_softc *);
    248 void	iwm_disable_interrupts(struct iwm_softc *);
    249 void	iwm_ict_reset(struct iwm_softc *);
    250 int	iwm_set_hw_ready(struct iwm_softc *);
    251 int	iwm_prepare_card_hw(struct iwm_softc *);
    252 void	iwm_apm_config(struct iwm_softc *);
    253 int	iwm_apm_init(struct iwm_softc *);
    254 void	iwm_apm_stop(struct iwm_softc *);
    255 int	iwm_start_hw(struct iwm_softc *);
    256 void	iwm_stop_device(struct iwm_softc *);
    257 void	iwm_set_pwr(struct iwm_softc *);
    258 void	iwm_mvm_nic_config(struct iwm_softc *);
    259 int	iwm_nic_rx_init(struct iwm_softc *);
    260 int	iwm_nic_tx_init(struct iwm_softc *);
    261 int	iwm_nic_init(struct iwm_softc *);
    262 void	iwm_enable_txq(struct iwm_softc *, int, int);
    263 int	iwm_post_alive(struct iwm_softc *);
    264 int	iwm_is_valid_channel(uint16_t);
    265 uint8_t	iwm_ch_id_to_ch_index(uint16_t);
    266 uint16_t iwm_channel_id_to_papd(uint16_t);
    267 uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
    268 int	iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t, uint8_t **,
    269 					uint16_t *, uint16_t);
    270 int	iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t, void *);
    271 int	iwm_send_phy_db_data(struct iwm_softc *);
    272 int	iwm_send_phy_db_data(struct iwm_softc *);
    273 void	iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
    274 				struct iwm_time_event_cmd_v1 *);
    275 int	iwm_mvm_send_time_event_cmd(struct iwm_softc *,
    276 					const struct iwm_time_event_cmd_v2 *);
    277 int	iwm_mvm_time_event_send_add(struct iwm_softc *, struct iwm_node *,
    278 					void *, struct iwm_time_event_cmd_v2 *);
    279 void	iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
    280 				uint32_t, uint32_t, uint32_t);
    281 int	iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t,
    282 				uint8_t *, uint16_t *);
    283 int	iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
    284 				uint16_t *);
    285 void	iwm_init_channel_map(struct iwm_softc *, const uint16_t * const);
    286 int	iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
    287 				const uint16_t *, const uint16_t *, uint8_t,
    288 				uint8_t);
    289 int	iwm_nvm_init(struct iwm_softc *);
    290 int	iwm_firmware_load_chunk(struct iwm_softc *, uint32_t, const uint8_t *,
    291 				uint32_t);
    292 int	iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
    293 int	iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
    294 int	iwm_fw_alive(struct iwm_softc *, uint32_t);
    295 int	iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
    296 int	iwm_send_phy_cfg_cmd(struct iwm_softc *);
    297 int	iwm_mvm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type);
    298 int	iwm_run_init_mvm_ucode(struct iwm_softc *, int);
    299 int	iwm_rx_addbuf(struct iwm_softc *, int, int);
    300 int	iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
    301 int	iwm_mvm_get_signal_strength(struct iwm_softc *,
    302 					struct iwm_rx_phy_info *);
    303 void	iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    304 				struct iwm_rx_data *);
    305 int	iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
    306 void	iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
    307 				struct iwm_rx_data *);
    308 void	iwm_mvm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *,
    309 				struct iwm_node *);
    310 void	iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
    311 			struct iwm_rx_data *);
    312 int	iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *, uint32_t);
    313 int	iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *, int);
    314 int	iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
    315 void	iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
    316 			struct iwm_phy_context_cmd *, uint32_t, uint32_t);
    317 void	iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
    318 		struct iwm_phy_context_cmd *, struct ieee80211_channel *,
    319 		uint8_t, uint8_t);
    320 int	iwm_mvm_phy_ctxt_apply(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
    321 				uint8_t, uint8_t, uint32_t, uint32_t);
    322 int	iwm_mvm_phy_ctxt_add(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
    323 				struct ieee80211_channel *, uint8_t, uint8_t);
    324 int	iwm_mvm_phy_ctxt_changed(struct iwm_softc *, struct iwm_mvm_phy_ctxt *,
    325 				struct ieee80211_channel *, uint8_t, uint8_t);
    326 int	iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
    327 int	iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t, uint16_t,
    328 				const void *);
    329 int	iwm_mvm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
    330 				uint32_t *);
    331 int	iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
    332 					uint16_t, const void *, uint32_t *);
    333 void	iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
    334 void	iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
    335 void	iwm_update_sched(struct iwm_softc *, int, int, uint8_t, uint16_t);
    336 const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
    337 			struct ieee80211_frame *, struct iwm_tx_cmd *);
    338 int	iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int);
    339 int	iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
    340 					struct iwm_beacon_filter_cmd *);
    341 void	iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
    342 			struct iwm_node *, struct iwm_beacon_filter_cmd *);
    343 int	iwm_mvm_update_beacon_abort(struct iwm_softc *, struct iwm_node *, int);
    344 void	iwm_mvm_power_log(struct iwm_softc *, struct iwm_mac_power_cmd *);
    345 void	iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
    346 				struct iwm_mac_power_cmd *);
    347 int	iwm_mvm_power_mac_update_mode(struct iwm_softc *, struct iwm_node *);
    348 int	iwm_mvm_power_update_device(struct iwm_softc *);
    349 int	iwm_mvm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
    350 int	iwm_mvm_disable_beacon_filter(struct iwm_softc *, struct iwm_node *);
    351 void	iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
    352 					struct iwm_mvm_add_sta_cmd_v5 *);
    353 int	iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
    354 					struct iwm_mvm_add_sta_cmd_v6 *, int *);
    355 int	iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *, int);
    356 int	iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
    357 int	iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
    358 int	iwm_mvm_add_int_sta_common(struct iwm_softc *, struct iwm_int_sta *,
    359 				const uint8_t *, uint16_t, uint16_t);
    360 int	iwm_mvm_add_aux_sta(struct iwm_softc *);
    361 uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
    362 uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
    363 uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
    364 uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
    365 uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
    366 uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
    367 uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
    368 int	iwm_mvm_scan_fill_channels(struct iwm_softc *, struct iwm_scan_cmd *,
    369 				int, int, int);
    370 uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *, struct ieee80211_frame *,
    371 	const uint8_t *, int, const uint8_t *, int, const uint8_t *, int, int);
    372 int	iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *, int);
    373 void	iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *, int *);
    374 void	iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
    375 					struct iwm_mac_ctx_cmd *, uint32_t);
    376 int	iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *, struct iwm_mac_ctx_cmd *);
    377 void	iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
    378 					struct iwm_mac_data_sta *, int);
    379 int	iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *, struct iwm_node *,
    380 					uint32_t);
    381 int	iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *, uint32_t);
    382 int	iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
    383 int	iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
    384 int	iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
    385 int	iwm_auth(struct iwm_softc *);
    386 int	iwm_assoc(struct iwm_softc *);
    387 int	iwm_release(struct iwm_softc *, struct iwm_node *);
    388 void	iwm_calib_timeout(void *);
    389 void	iwm_setrates(struct iwm_node *);
    390 int	iwm_media_change(struct ifnet *);
    391 void	iwm_newstate_cb(void *);
    392 int	iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
    393 void	iwm_endscan_cb(void *);
    394 int	iwm_init_hw(struct iwm_softc *);
    395 int	iwm_init(struct ifnet *);
    396 void	iwm_start(struct ifnet *);
    397 void	iwm_stop(struct ifnet *, int);
    398 void	iwm_watchdog(struct ifnet *);
    399 int	iwm_ioctl(struct ifnet *, u_long, void *);
    400 const char *iwm_desc_lookup(uint32_t);
    401 #ifdef IWM_DEBUG
    402 void	iwm_nic_error(struct iwm_softc *);
    403 #endif
    404 void	iwm_notif_intr(struct iwm_softc *);
    405 int	iwm_intr(void *);
    406 int	iwm_preinit(struct iwm_softc *);
    407 void	iwm_attach_hook(struct device *);
    408 void	iwm_attach(struct device *, struct device *, void *);
    409 void	iwm_init_task(void *);
    410 int	iwm_activate(struct device *, int);
    411 void	iwm_wakeup(struct iwm_softc *);
    412 
    413 void	iwm_radiotap_attach(struct iwm_softc *);
    414 
    415 static int
    416 iwm_firmload(struct iwm_softc *sc)
    417 {
    418 	struct iwm_fw_info *fw = &sc->sc_fw;
    419 	firmware_handle_t fwh;
    420 	int error;
    421 
    422 	/* Open firmware image. */
    423 	if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
    424 		aprint_error_dev(sc->sc_dev,
    425 		    "could not get firmware handle %s\n", sc->sc_fwname);
    426 		return error;
    427 	}
    428 
    429 	fw->fw_rawsize = firmware_get_size(fwh);
    430 	/*
    431 	 * Well, this is how the Linux driver checks it ....
    432 	 */
    433 	if (fw->fw_rawsize < sizeof(uint32_t)) {
    434 		aprint_error_dev(sc->sc_dev,
    435 		    "firmware too short: %zd bytes\n", fw->fw_rawsize);
    436 		error = EINVAL;
    437 		goto out;
    438 	}
    439 
    440 	/* some sanity */
    441 	if (fw->fw_rawsize > IWM_FWMAXSIZE) {
    442 		aprint_error_dev(sc->sc_dev,
    443 		    "firmware size is ridiculous: %zd bytes\n",
    444 		fw->fw_rawsize);
    445 		error = EINVAL;
    446 		goto out;
    447 	}
    448 
    449 	/* Read the firmware. */
    450 	fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
    451 	if (fw->fw_rawdata == NULL) {
    452 		aprint_error_dev(sc->sc_dev,
    453 		    "not enough memory to stock firmware %s\n", sc->sc_fwname);
    454 		error = ENOMEM;
    455 		goto out;
    456 	}
    457 	error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
    458 	if (error) {
    459 		aprint_error_dev(sc->sc_dev,
    460 		    "could not read firmware %s\n", sc->sc_fwname);
    461 		goto out;
    462 	}
    463 
    464  out:
    465 	/* caller will release memory, if necessary */
    466 
    467 	firmware_close(fwh);
    468 	return error;
    469 }
    470 
    471 /*
    472  * just maintaining status quo.
    473  */
    474 static void
    475 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
    476 {
    477 	struct ieee80211_frame *wh;
    478 	uint8_t subtype;
    479 	uint8_t *frm, *efrm;
    480 
    481 	wh = mtod(m, struct ieee80211_frame *);
    482 
    483 	if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
    484 		return;
    485 
    486 	subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
    487 
    488 	if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
    489 	    subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
    490 		return;
    491 
    492 	frm = (uint8_t *)(wh + 1);
    493 	efrm = mtod(m, uint8_t *) + m->m_len;
    494 
    495 	frm += 12;      /* skip tstamp, bintval and capinfo fields */
    496 	while (frm < efrm) {
    497 		if (*frm == IEEE80211_ELEMID_DSPARMS) {
    498 #if IEEE80211_CHAN_MAX < 255
    499 			if (frm[2] <= IEEE80211_CHAN_MAX)
    500 #endif
    501 				ic->ic_curchan = &ic->ic_channels[frm[2]];
    502 		}
    503 		frm += frm[1] + 2;
    504 	}
    505 }
    506 
    507 /*
    508  * Firmware parser.
    509  */
    510 
    511 int
    512 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
    513 {
    514 	struct iwm_fw_cscheme_list *l = (void *)data;
    515 
    516 	if (dlen < sizeof(*l) ||
    517 	    dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
    518 		return EINVAL;
    519 
    520 	/* we don't actually store anything for now, always use s/w crypto */
    521 
    522 	return 0;
    523 }
    524 
    525 int
    526 iwm_firmware_store_section(struct iwm_softc *sc,
    527 	enum iwm_ucode_type type, uint8_t *data, size_t dlen)
    528 {
    529 	struct iwm_fw_sects *fws;
    530 	struct iwm_fw_onesect *fwone;
    531 
    532 	if (type >= IWM_UCODE_TYPE_MAX)
    533 		return EINVAL;
    534 	if (dlen < sizeof(uint32_t))
    535 		return EINVAL;
    536 
    537 	fws = &sc->sc_fw.fw_sects[type];
    538 	if (fws->fw_count >= IWM_UCODE_SECT_MAX)
    539 		return EINVAL;
    540 
    541 	fwone = &fws->fw_sect[fws->fw_count];
    542 
    543 	/* first 32bit are device load offset */
    544 	memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
    545 
    546 	/* rest is data */
    547 	fwone->fws_data = data + sizeof(uint32_t);
    548 	fwone->fws_len = dlen - sizeof(uint32_t);
    549 
    550 	/* for freeing the buffer during driver unload */
    551 	fwone->fws_alloc = data;
    552 	fwone->fws_allocsize = dlen;
    553 
    554 	fws->fw_count++;
    555 	fws->fw_totlen += fwone->fws_len;
    556 
    557 	return 0;
    558 }
    559 
    560 /* iwlwifi: iwl-drv.c */
    561 struct iwm_tlv_calib_data {
    562 	uint32_t ucode_type;
    563 	struct iwm_tlv_calib_ctrl calib;
    564 } __packed;
    565 
    566 int
    567 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
    568 {
    569 	const struct iwm_tlv_calib_data *def_calib = data;
    570 	uint32_t ucode_type = le32toh(def_calib->ucode_type);
    571 
    572 	if (ucode_type >= IWM_UCODE_TYPE_MAX) {
    573 		DPRINTF(("%s: Wrong ucode_type %u for default "
    574 		    "calibration.\n", DEVNAME(sc), ucode_type));
    575 		return EINVAL;
    576 	}
    577 
    578 	sc->sc_default_calib[ucode_type].flow_trigger =
    579 	    def_calib->calib.flow_trigger;
    580 	sc->sc_default_calib[ucode_type].event_trigger =
    581 	    def_calib->calib.event_trigger;
    582 
    583 	return 0;
    584 }
    585 
    586 int
    587 iwm_read_firmware(struct iwm_softc *sc)
    588 {
    589 	struct iwm_fw_info *fw = &sc->sc_fw;
    590         struct iwm_tlv_ucode_header *uhdr;
    591         struct iwm_ucode_tlv tlv;
    592 	enum iwm_ucode_tlv_type tlv_type;
    593 	uint8_t *data;
    594 	int error, status;
    595 	size_t len;
    596 
    597 	if (fw->fw_status == IWM_FW_STATUS_NONE) {
    598 		fw->fw_status = IWM_FW_STATUS_INPROGRESS;
    599 	} else {
    600 		while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
    601 			tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
    602 	}
    603 	status = fw->fw_status;
    604 
    605 	if (status == IWM_FW_STATUS_DONE)
    606 		return 0;
    607 
    608 	/*
    609 	 * Load firmware into driver memory.
    610 	 * fw_rawdata and fw_rawsize will be set.
    611 	 */
    612 	error = iwm_firmload(sc);
    613 	if (error != 0) {
    614 		aprint_error_dev(sc->sc_dev,
    615 		    "could not read firmware %s (error %d)\n",
    616 		    sc->sc_fwname, error);
    617 		goto out;
    618 	}
    619 
    620 	/*
    621 	 * Parse firmware contents
    622 	 */
    623 
    624 	uhdr = (void *)fw->fw_rawdata;
    625 	if (*(uint32_t *)fw->fw_rawdata != 0
    626 	    || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
    627 		aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
    628 		    sc->sc_fwname);
    629 		error = EINVAL;
    630 		goto out;
    631 	}
    632 
    633 	sc->sc_fwver = le32toh(uhdr->ver);
    634 	data = uhdr->data;
    635 	len = fw->fw_rawsize - sizeof(*uhdr);
    636 
    637 	while (len >= sizeof(tlv)) {
    638 		size_t tlv_len;
    639 		void *tlv_data;
    640 
    641 		memcpy(&tlv, data, sizeof(tlv));
    642 		tlv_len = le32toh(tlv.length);
    643 		tlv_type = le32toh(tlv.type);
    644 
    645 		len -= sizeof(tlv);
    646 		data += sizeof(tlv);
    647 		tlv_data = data;
    648 
    649 		if (len < tlv_len) {
    650 			aprint_error_dev(sc->sc_dev,
    651 			    "firmware too short: %zu bytes\n", len);
    652 			error = EINVAL;
    653 			goto parse_out;
    654 		}
    655 
    656 		switch ((int)tlv_type) {
    657 		case IWM_UCODE_TLV_PROBE_MAX_LEN:
    658 			if (tlv_len < sizeof(uint32_t)) {
    659 				error = EINVAL;
    660 				goto parse_out;
    661 			}
    662 			sc->sc_capa_max_probe_len
    663 			    = le32toh(*(uint32_t *)tlv_data);
    664 			/* limit it to something sensible */
    665 			if (sc->sc_capa_max_probe_len > (1<<16)) {
    666 				DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
    667 				    "ridiculous\n", DEVNAME(sc)));
    668 				error = EINVAL;
    669 				goto parse_out;
    670 			}
    671 			break;
    672 		case IWM_UCODE_TLV_PAN:
    673 			if (tlv_len) {
    674 				error = EINVAL;
    675 				goto parse_out;
    676 			}
    677 			sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
    678 			break;
    679 		case IWM_UCODE_TLV_FLAGS:
    680 			if (tlv_len < sizeof(uint32_t)) {
    681 				error = EINVAL;
    682 				goto parse_out;
    683 			}
    684 			/*
    685 			 * Apparently there can be many flags, but Linux driver
    686 			 * parses only the first one, and so do we.
    687 			 *
    688 			 * XXX: why does this override IWM_UCODE_TLV_PAN?
    689 			 * Intentional or a bug?  Observations from
    690 			 * current firmware file:
    691 			 *  1) TLV_PAN is parsed first
    692 			 *  2) TLV_FLAGS contains TLV_FLAGS_PAN
    693 			 * ==> this resets TLV_PAN to itself... hnnnk
    694 			 */
    695 			sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
    696 			break;
    697 		case IWM_UCODE_TLV_CSCHEME:
    698 			if ((error = iwm_store_cscheme(sc,
    699 			    tlv_data, tlv_len)) != 0)
    700 				goto parse_out;
    701 			break;
    702 		case IWM_UCODE_TLV_NUM_OF_CPU:
    703 			if (tlv_len != sizeof(uint32_t)) {
    704 				error = EINVAL;
    705 				goto parse_out;
    706 			}
    707 			if (le32toh(*(uint32_t*)tlv_data) != 1) {
    708 				DPRINTF(("%s: driver supports "
    709 				    "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
    710 				error = EINVAL;
    711 				goto parse_out;
    712 			}
    713 			break;
    714 		case IWM_UCODE_TLV_SEC_RT:
    715 			if ((error = iwm_firmware_store_section(sc,
    716 			    IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
    717 				goto parse_out;
    718 			break;
    719 		case IWM_UCODE_TLV_SEC_INIT:
    720 			if ((error = iwm_firmware_store_section(sc,
    721 			    IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
    722 				goto parse_out;
    723 			break;
    724 		case IWM_UCODE_TLV_SEC_WOWLAN:
    725 			if ((error = iwm_firmware_store_section(sc,
    726 			    IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
    727 				goto parse_out;
    728 			break;
    729 		case IWM_UCODE_TLV_DEF_CALIB:
    730 			if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
    731 				error = EINVAL;
    732 				goto parse_out;
    733 			}
    734 			if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
    735 				goto parse_out;
    736 			break;
    737 		case IWM_UCODE_TLV_PHY_SKU:
    738 			if (tlv_len != sizeof(uint32_t)) {
    739 				error = EINVAL;
    740 				goto parse_out;
    741 			}
    742 			sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
    743 			break;
    744 
    745 		case IWM_UCODE_TLV_API_CHANGES_SET:
    746 		case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
    747 			/* ignore, not used by current driver */
    748 			break;
    749 
    750 		default:
    751 			DPRINTF(("%s: unknown firmware section %d, abort\n",
    752 			    DEVNAME(sc), tlv_type));
    753 			error = EINVAL;
    754 			goto parse_out;
    755 		}
    756 
    757 		len -= roundup(tlv_len, 4);
    758 		data += roundup(tlv_len, 4);
    759 	}
    760 
    761 	KASSERT(error == 0);
    762 
    763  parse_out:
    764 	if (error) {
    765 		aprint_error_dev(sc->sc_dev,
    766 		    "firmware parse error, section type %d\n", tlv_type);
    767 	}
    768 
    769 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
    770 		aprint_error_dev(sc->sc_dev,
    771 		    "device uses unsupported power ops\n");
    772 		error = ENOTSUP;
    773 	}
    774 
    775  out:
    776 	if (error)
    777 		fw->fw_status = IWM_FW_STATUS_NONE;
    778 	else
    779 		fw->fw_status = IWM_FW_STATUS_DONE;
    780 	wakeup(&sc->sc_fw);
    781 
    782 	if (error) {
    783 		kmem_free(fw->fw_rawdata, fw->fw_rawsize);
    784 		fw->fw_rawdata = NULL;
    785 	}
    786 	return error;
    787 }
    788 
    789 /*
    790  * basic device access
    791  */
    792 
    793 uint32_t
    794 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
    795 {
    796 	IWM_WRITE(sc,
    797 	    IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
    798 	IWM_BARRIER_READ_WRITE(sc);
    799 	return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
    800 }
    801 
    802 void
    803 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    804 {
    805 	IWM_WRITE(sc,
    806 	    IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
    807 	IWM_BARRIER_WRITE(sc);
    808 	IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
    809 }
    810 
    811 /* iwlwifi: pcie/trans.c */
    812 int
    813 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
    814 {
    815 	int offs, ret = 0;
    816 	uint32_t *vals = buf;
    817 
    818 	if (iwm_nic_lock(sc)) {
    819 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
    820 		for (offs = 0; offs < dwords; offs++)
    821 			vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
    822 		iwm_nic_unlock(sc);
    823 	} else {
    824 		ret = EBUSY;
    825 	}
    826 	return ret;
    827 }
    828 
    829 /* iwlwifi: pcie/trans.c */
    830 int
    831 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
    832 {
    833 	int offs;
    834 	const uint32_t *vals = buf;
    835 
    836 	if (iwm_nic_lock(sc)) {
    837 		IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
    838 		/* WADDR auto-increments */
    839 		for (offs = 0; offs < dwords; offs++) {
    840 			uint32_t val = vals ? vals[offs] : 0;
    841 			IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
    842 		}
    843 		iwm_nic_unlock(sc);
    844 	} else {
    845 		DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
    846 		return EBUSY;
    847 	}
    848 	return 0;
    849 }
    850 
    851 int
    852 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
    853 {
    854 	return iwm_write_mem(sc, addr, &val, 1);
    855 }
    856 
    857 int
    858 iwm_poll_bit(struct iwm_softc *sc, int reg,
    859 	uint32_t bits, uint32_t mask, int timo)
    860 {
    861 	for (;;) {
    862 		if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
    863 			return 1;
    864 		}
    865 		if (timo < 10) {
    866 			return 0;
    867 		}
    868 		timo -= 10;
    869 		DELAY(10);
    870 	}
    871 }
    872 
    873 int
    874 iwm_nic_lock(struct iwm_softc *sc)
    875 {
    876 	int rv = 0;
    877 
    878 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
    879 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    880 
    881 	if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
    882 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
    883 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
    884 	     | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
    885 	    	rv = 1;
    886 	} else {
    887 		/* jolt */
    888 		IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
    889 	}
    890 
    891 	return rv;
    892 }
    893 
    894 void
    895 iwm_nic_unlock(struct iwm_softc *sc)
    896 {
    897 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
    898 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
    899 }
    900 
    901 void
    902 iwm_set_bits_mask_prph(struct iwm_softc *sc,
    903 	uint32_t reg, uint32_t bits, uint32_t mask)
    904 {
    905 	uint32_t val;
    906 
    907 	/* XXX: no error path? */
    908 	if (iwm_nic_lock(sc)) {
    909 		val = iwm_read_prph(sc, reg) & mask;
    910 		val |= bits;
    911 		iwm_write_prph(sc, reg, val);
    912 		iwm_nic_unlock(sc);
    913 	}
    914 }
    915 
    916 void
    917 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    918 {
    919 	iwm_set_bits_mask_prph(sc, reg, bits, ~0);
    920 }
    921 
    922 void
    923 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
    924 {
    925 	iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
    926 }
    927 
    928 /*
    929  * DMA resource routines
    930  */
    931 
    932 int
    933 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
    934     bus_size_t size, bus_size_t alignment)
    935 {
    936 	int nsegs, error;
    937 	void *va;
    938 
    939 	dma->tag = tag;
    940 	dma->size = size;
    941 
    942 	error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
    943 	    &dma->map);
    944 	if (error != 0)
    945 		goto fail;
    946 
    947 	error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
    948 	    BUS_DMA_NOWAIT);
    949 	if (error != 0)
    950 		goto fail;
    951 
    952 	error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
    953 	    BUS_DMA_NOWAIT);
    954 	if (error != 0)
    955 		goto fail;
    956 	dma->vaddr = va;
    957 
    958 	error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
    959 	    BUS_DMA_NOWAIT);
    960 	if (error != 0)
    961 		goto fail;
    962 
    963 	memset(dma->vaddr, 0, size);
    964 	bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
    965 	dma->paddr = dma->map->dm_segs[0].ds_addr;
    966 
    967 	return 0;
    968 
    969 fail:	iwm_dma_contig_free(dma);
    970 	return error;
    971 }
    972 
    973 void
    974 iwm_dma_contig_free(struct iwm_dma_info *dma)
    975 {
    976 	if (dma->map != NULL) {
    977 		if (dma->vaddr != NULL) {
    978 			bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
    979 			    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
    980 			bus_dmamap_unload(dma->tag, dma->map);
    981 			bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
    982 			bus_dmamem_free(dma->tag, &dma->seg, 1);
    983 			dma->vaddr = NULL;
    984 		}
    985 		bus_dmamap_destroy(dma->tag, dma->map);
    986 		dma->map = NULL;
    987 	}
    988 }
    989 
    990 /* fwmem is used to load firmware onto the card */
    991 int
    992 iwm_alloc_fwmem(struct iwm_softc *sc)
    993 {
    994 	/* Must be aligned on a 16-byte boundary. */
    995 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
    996 	    sc->sc_fwdmasegsz, 16);
    997 }
    998 
    999 void
   1000 iwm_free_fwmem(struct iwm_softc *sc)
   1001 {
   1002 	iwm_dma_contig_free(&sc->fw_dma);
   1003 }
   1004 
   1005 /* tx scheduler rings.  not used? */
   1006 int
   1007 iwm_alloc_sched(struct iwm_softc *sc)
   1008 {
   1009 	int rv;
   1010 
   1011 	/* TX scheduler rings must be aligned on a 1KB boundary. */
   1012 	rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
   1013 	    __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
   1014 	return rv;
   1015 }
   1016 
   1017 void
   1018 iwm_free_sched(struct iwm_softc *sc)
   1019 {
   1020 	iwm_dma_contig_free(&sc->sched_dma);
   1021 }
   1022 
   1023 /* keep-warm page is used internally by the card.  see iwl-fh.h for more info */
   1024 int
   1025 iwm_alloc_kw(struct iwm_softc *sc)
   1026 {
   1027 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
   1028 }
   1029 
   1030 void
   1031 iwm_free_kw(struct iwm_softc *sc)
   1032 {
   1033 	iwm_dma_contig_free(&sc->kw_dma);
   1034 }
   1035 
   1036 /* interrupt cause table */
   1037 int
   1038 iwm_alloc_ict(struct iwm_softc *sc)
   1039 {
   1040 	return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
   1041 	    IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
   1042 }
   1043 
   1044 void
   1045 iwm_free_ict(struct iwm_softc *sc)
   1046 {
   1047 	iwm_dma_contig_free(&sc->ict_dma);
   1048 }
   1049 
   1050 int
   1051 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1052 {
   1053 	bus_size_t size;
   1054 	int i, error;
   1055 
   1056 	ring->cur = 0;
   1057 
   1058 	/* Allocate RX descriptors (256-byte aligned). */
   1059 	size = IWM_RX_RING_COUNT * sizeof(uint32_t);
   1060 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1061 	if (error != 0) {
   1062 		aprint_error_dev(sc->sc_dev,
   1063 		    "could not allocate RX ring DMA memory\n");
   1064 		goto fail;
   1065 	}
   1066 	ring->desc = ring->desc_dma.vaddr;
   1067 
   1068 	/* Allocate RX status area (16-byte aligned). */
   1069 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
   1070 	    sizeof(*ring->stat), 16);
   1071 	if (error != 0) {
   1072 		aprint_error_dev(sc->sc_dev,
   1073 		    "could not allocate RX status DMA memory\n");
   1074 		goto fail;
   1075 	}
   1076 	ring->stat = ring->stat_dma.vaddr;
   1077 
   1078 	/*
   1079 	 * Allocate and map RX buffers.
   1080 	 */
   1081 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1082 		struct iwm_rx_data *data = &ring->data[i];
   1083 
   1084 		memset(data, 0, sizeof(*data));
   1085 		error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
   1086 		    IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
   1087 		    &data->map);
   1088 		if (error != 0) {
   1089 			aprint_error_dev(sc->sc_dev,
   1090 			    "could not create RX buf DMA map\n");
   1091 			goto fail;
   1092 		}
   1093 
   1094 		if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
   1095 			goto fail;
   1096 		}
   1097 	}
   1098 	return 0;
   1099 
   1100 fail:	iwm_free_rx_ring(sc, ring);
   1101 	return error;
   1102 }
   1103 
   1104 void
   1105 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1106 {
   1107 	int ntries;
   1108 
   1109 	if (iwm_nic_lock(sc)) {
   1110 		IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1111 		for (ntries = 0; ntries < 1000; ntries++) {
   1112 			if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
   1113 			    IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
   1114 				break;
   1115 			DELAY(10);
   1116 		}
   1117 		iwm_nic_unlock(sc);
   1118 	}
   1119 	ring->cur = 0;
   1120 }
   1121 
   1122 void
   1123 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
   1124 {
   1125 	int i;
   1126 
   1127 	iwm_dma_contig_free(&ring->desc_dma);
   1128 	iwm_dma_contig_free(&ring->stat_dma);
   1129 
   1130 	for (i = 0; i < IWM_RX_RING_COUNT; i++) {
   1131 		struct iwm_rx_data *data = &ring->data[i];
   1132 
   1133 		if (data->m != NULL) {
   1134 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1135 			    data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
   1136 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1137 			m_freem(data->m);
   1138 		}
   1139 		if (data->map != NULL)
   1140 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1141 	}
   1142 }
   1143 
   1144 int
   1145 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
   1146 {
   1147 	bus_addr_t paddr;
   1148 	bus_size_t size;
   1149 	int i, error;
   1150 
   1151 	ring->qid = qid;
   1152 	ring->queued = 0;
   1153 	ring->cur = 0;
   1154 
   1155 	/* Allocate TX descriptors (256-byte aligned). */
   1156 	size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
   1157 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
   1158 	if (error != 0) {
   1159 		aprint_error_dev(sc->sc_dev,
   1160 		    "could not allocate TX ring DMA memory\n");
   1161 		goto fail;
   1162 	}
   1163 	ring->desc = ring->desc_dma.vaddr;
   1164 
   1165 	/*
   1166 	 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
   1167 	 * to allocate commands space for other rings.
   1168 	 */
   1169 	if (qid > IWM_MVM_CMD_QUEUE)
   1170 		return 0;
   1171 
   1172 	size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
   1173 	error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
   1174 	if (error != 0) {
   1175 		aprint_error_dev(sc->sc_dev,
   1176 		    "could not allocate TX cmd DMA memory\n");
   1177 		goto fail;
   1178 	}
   1179 	ring->cmd = ring->cmd_dma.vaddr;
   1180 
   1181 	paddr = ring->cmd_dma.paddr;
   1182 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1183 		struct iwm_tx_data *data = &ring->data[i];
   1184 
   1185 		data->cmd_paddr = paddr;
   1186 		data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
   1187 		    + offsetof(struct iwm_tx_cmd, scratch);
   1188 		paddr += sizeof(struct iwm_device_cmd);
   1189 
   1190 		error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
   1191 		    IWM_NUM_OF_TBS, MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
   1192 		if (error != 0) {
   1193 			aprint_error_dev(sc->sc_dev,
   1194 			    "could not create TX buf DMA map\n");
   1195 			goto fail;
   1196 		}
   1197 	}
   1198 	KASSERT(paddr == ring->cmd_dma.paddr + size);
   1199 	return 0;
   1200 
   1201 fail:	iwm_free_tx_ring(sc, ring);
   1202 	return error;
   1203 }
   1204 
   1205 void
   1206 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1207 {
   1208 	int i;
   1209 
   1210 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1211 		struct iwm_tx_data *data = &ring->data[i];
   1212 
   1213 		if (data->m != NULL) {
   1214 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1215 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1216 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1217 			m_freem(data->m);
   1218 			data->m = NULL;
   1219 		}
   1220 	}
   1221 	/* Clear TX descriptors. */
   1222 	memset(ring->desc, 0, ring->desc_dma.size);
   1223 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
   1224 	    ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
   1225 	sc->qfullmsk &= ~(1 << ring->qid);
   1226 	ring->queued = 0;
   1227 	ring->cur = 0;
   1228 }
   1229 
   1230 void
   1231 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
   1232 {
   1233 	int i;
   1234 
   1235 	iwm_dma_contig_free(&ring->desc_dma);
   1236 	iwm_dma_contig_free(&ring->cmd_dma);
   1237 
   1238 	for (i = 0; i < IWM_TX_RING_COUNT; i++) {
   1239 		struct iwm_tx_data *data = &ring->data[i];
   1240 
   1241 		if (data->m != NULL) {
   1242 			bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   1243 			    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   1244 			bus_dmamap_unload(sc->sc_dmat, data->map);
   1245 			m_freem(data->m);
   1246 		}
   1247 		if (data->map != NULL)
   1248 			bus_dmamap_destroy(sc->sc_dmat, data->map);
   1249 	}
   1250 }
   1251 
   1252 /*
   1253  * High-level hardware frobbing routines
   1254  */
   1255 
   1256 void
   1257 iwm_enable_rfkill_int(struct iwm_softc *sc)
   1258 {
   1259 	sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
   1260 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1261 }
   1262 
   1263 int
   1264 iwm_check_rfkill(struct iwm_softc *sc)
   1265 {
   1266 	uint32_t v;
   1267 	int s;
   1268 	int rv;
   1269 
   1270 	s = splnet();
   1271 
   1272 	/*
   1273 	 * "documentation" is not really helpful here:
   1274 	 *  27:	HW_RF_KILL_SW
   1275 	 *	Indicates state of (platform's) hardware RF-Kill switch
   1276 	 *
   1277 	 * But apparently when it's off, it's on ...
   1278 	 */
   1279 	v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
   1280 	rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
   1281 	if (rv) {
   1282 		sc->sc_flags |= IWM_FLAG_RFKILL;
   1283 	} else {
   1284 		sc->sc_flags &= ~IWM_FLAG_RFKILL;
   1285 	}
   1286 
   1287 	splx(s);
   1288 	return rv;
   1289 }
   1290 
   1291 void
   1292 iwm_enable_interrupts(struct iwm_softc *sc)
   1293 {
   1294 	sc->sc_intmask = IWM_CSR_INI_SET_MASK;
   1295 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1296 }
   1297 
   1298 void
   1299 iwm_restore_interrupts(struct iwm_softc *sc)
   1300 {
   1301 	IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
   1302 }
   1303 
   1304 void
   1305 iwm_disable_interrupts(struct iwm_softc *sc)
   1306 {
   1307 	int s = splnet();
   1308 
   1309 	/* disable interrupts */
   1310 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   1311 
   1312 	/* acknowledge all interrupts */
   1313 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1314 	IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
   1315 
   1316 	splx(s);
   1317 }
   1318 
   1319 void
   1320 iwm_ict_reset(struct iwm_softc *sc)
   1321 {
   1322 	iwm_disable_interrupts(sc);
   1323 
   1324 	/* Reset ICT table. */
   1325 	memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
   1326 	sc->ict_cur = 0;
   1327 
   1328 	/* Set physical address of ICT table (4KB aligned). */
   1329 	IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
   1330 	    IWM_CSR_DRAM_INT_TBL_ENABLE
   1331 	    | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
   1332 	    | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
   1333 
   1334 	/* Switch to ICT interrupt mode in driver. */
   1335 	sc->sc_flags |= IWM_FLAG_USE_ICT;
   1336 
   1337 	/* Re-enable interrupts. */
   1338 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   1339 	iwm_enable_interrupts(sc);
   1340 }
   1341 
   1342 #define IWM_HW_READY_TIMEOUT 50
   1343 int
   1344 iwm_set_hw_ready(struct iwm_softc *sc)
   1345 {
   1346 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1347 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
   1348 
   1349         return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1350 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1351 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
   1352 	    IWM_HW_READY_TIMEOUT);
   1353 }
   1354 #undef IWM_HW_READY_TIMEOUT
   1355 
   1356 int
   1357 iwm_prepare_card_hw(struct iwm_softc *sc)
   1358 {
   1359 	int rv = 0;
   1360 	int t = 0;
   1361 
   1362 	if (!iwm_set_hw_ready(sc))
   1363 		goto out;
   1364 
   1365 	/* If HW is not ready, prepare the conditions to check again */
   1366 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1367 	    IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
   1368 
   1369 	do {
   1370 		if (iwm_set_hw_ready(sc))
   1371 			goto out;
   1372 		DELAY(200);
   1373 		t += 200;
   1374 	} while (t < 150000);
   1375 
   1376 	rv = ETIMEDOUT;
   1377 
   1378  out:
   1379 	return rv;
   1380 }
   1381 
   1382 void
   1383 iwm_apm_config(struct iwm_softc *sc)
   1384 {
   1385 	pcireg_t reg;
   1386 
   1387 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
   1388 	    sc->sc_cap_off + PCIE_LCSR);
   1389 	if (reg & PCIE_LCSR_ASPM_L1) {
   1390 		/* Um the Linux driver prints "Disabling L0S for this one ... */
   1391 		IWM_SETBITS(sc, IWM_CSR_GIO_REG,
   1392 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1393 	} else {
   1394 		/* ... and "Enabling" here */
   1395 		IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
   1396 		    IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
   1397 	}
   1398 }
   1399 
   1400 /*
   1401  * Start up NIC's basic functionality after it has been reset
   1402  * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
   1403  * NOTE:  This does not load uCode nor start the embedded processor
   1404  */
   1405 int
   1406 iwm_apm_init(struct iwm_softc *sc)
   1407 {
   1408 	int error = 0;
   1409 
   1410 	DPRINTF(("iwm apm start\n"));
   1411 
   1412 	/* Disable L0S exit timer (platform NMI Work/Around) */
   1413 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1414 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
   1415 
   1416 	/*
   1417 	 * Disable L0s without affecting L1;
   1418 	 *  don't wait for ICH L0s (ICH bug W/A)
   1419 	 */
   1420 	IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
   1421 	    IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
   1422 
   1423 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
   1424 	IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
   1425 
   1426 	/*
   1427 	 * Enable HAP INTA (interrupt from management bus) to
   1428 	 * wake device's PCI Express link L1a -> L0s
   1429 	 */
   1430 	IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
   1431 	    IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
   1432 
   1433 	iwm_apm_config(sc);
   1434 
   1435 #if 0 /* not for 7k */
   1436 	/* Configure analog phase-lock-loop before activating to D0A */
   1437 	if (trans->cfg->base_params->pll_cfg_val)
   1438 		IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
   1439 		    trans->cfg->base_params->pll_cfg_val);
   1440 #endif
   1441 
   1442 	/*
   1443 	 * Set "initialization complete" bit to move adapter from
   1444 	 * D0U* --> D0A* (powered-up active) state.
   1445 	 */
   1446 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
   1447 
   1448 	/*
   1449 	 * Wait for clock stabilization; once stabilized, access to
   1450 	 * device-internal resources is supported, e.g. iwm_write_prph()
   1451 	 * and accesses to uCode SRAM.
   1452 	 */
   1453 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   1454 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
   1455 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
   1456 		aprint_error_dev(sc->sc_dev,
   1457 		    "timeout waiting for clock stabilization\n");
   1458 		goto out;
   1459 	}
   1460 
   1461 	/*
   1462 	 * This is a bit of an abuse - This is needed for 7260 / 3160
   1463 	 * only check host_interrupt_operation_mode even if this is
   1464 	 * not related to host_interrupt_operation_mode.
   1465 	 *
   1466 	 * Enable the oscillator to count wake up time for L1 exit. This
   1467 	 * consumes slightly more power (100uA) - but allows to be sure
   1468 	 * that we wake up from L1 on time.
   1469 	 *
   1470 	 * This looks weird: read twice the same register, discard the
   1471 	 * value, set a bit, and yet again, read that same register
   1472 	 * just to discard the value. But that's the way the hardware
   1473 	 * seems to like it.
   1474 	 */
   1475 	iwm_read_prph(sc, IWM_OSC_CLK);
   1476 	iwm_read_prph(sc, IWM_OSC_CLK);
   1477 	iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
   1478 	iwm_read_prph(sc, IWM_OSC_CLK);
   1479 	iwm_read_prph(sc, IWM_OSC_CLK);
   1480 
   1481 	/*
   1482 	 * Enable DMA clock and wait for it to stabilize.
   1483 	 *
   1484 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
   1485 	 * do not disable clocks.  This preserves any hardware bits already
   1486 	 * set by default in "CLK_CTRL_REG" after reset.
   1487 	 */
   1488 	iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1489 	//kpause("iwmapm", 0, mstohz(20), NULL);
   1490 	DELAY(20);
   1491 
   1492 	/* Disable L1-Active */
   1493 	iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1494 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1495 
   1496 	/* Clear the interrupt in APMG if the NIC is in RFKILL */
   1497 	iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
   1498 	    IWM_APMG_RTC_INT_STT_RFKILL);
   1499 
   1500  out:
   1501 	if (error)
   1502 		aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
   1503 	return error;
   1504 }
   1505 
   1506 /* iwlwifi/pcie/trans.c */
   1507 void
   1508 iwm_apm_stop(struct iwm_softc *sc)
   1509 {
   1510 	/* stop device's busmaster DMA activity */
   1511 	IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
   1512 
   1513 	if (!iwm_poll_bit(sc, IWM_CSR_RESET,
   1514 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
   1515 	    IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
   1516 		aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
   1517 	DPRINTF(("iwm apm stop\n"));
   1518 }
   1519 
   1520 /* iwlwifi pcie/trans.c */
   1521 int
   1522 iwm_start_hw(struct iwm_softc *sc)
   1523 {
   1524 	int error;
   1525 
   1526 	if ((error = iwm_prepare_card_hw(sc)) != 0)
   1527 		return error;
   1528 
   1529         /* Reset the entire device */
   1530 	IWM_WRITE(sc, IWM_CSR_RESET,
   1531 	    IWM_CSR_RESET_REG_FLAG_SW_RESET |
   1532 	    IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1533 	DELAY(10);
   1534 
   1535 	if ((error = iwm_apm_init(sc)) != 0)
   1536 		return error;
   1537 
   1538 	iwm_enable_rfkill_int(sc);
   1539 	iwm_check_rfkill(sc);
   1540 
   1541 	return 0;
   1542 }
   1543 
   1544 /* iwlwifi pcie/trans.c */
   1545 
   1546 void
   1547 iwm_stop_device(struct iwm_softc *sc)
   1548 {
   1549 	int chnl, ntries;
   1550 	int qid;
   1551 
   1552 	/* tell the device to stop sending interrupts */
   1553 	iwm_disable_interrupts(sc);
   1554 
   1555 	/* device going down, Stop using ICT table */
   1556 	sc->sc_flags &= ~IWM_FLAG_USE_ICT;
   1557 
   1558 	/* stop tx and rx.  tx and rx bits, as usual, are from if_iwn */
   1559 
   1560 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1561 
   1562 	/* Stop all DMA channels. */
   1563 	if (iwm_nic_lock(sc)) {
   1564 		for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1565 			IWM_WRITE(sc,
   1566 			    IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
   1567 			for (ntries = 0; ntries < 200; ntries++) {
   1568 				uint32_t r;
   1569 
   1570 				r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
   1571 				if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
   1572 				    chnl))
   1573 					break;
   1574 				DELAY(20);
   1575 			}
   1576 		}
   1577 		iwm_nic_unlock(sc);
   1578 	}
   1579 
   1580 	/* Stop RX ring. */
   1581 	iwm_reset_rx_ring(sc, &sc->rxq);
   1582 
   1583 	/* Reset all TX rings. */
   1584 	for (qid = 0; qid < __arraycount(sc->txq); qid++)
   1585 		iwm_reset_tx_ring(sc, &sc->txq[qid]);
   1586 
   1587 	/*
   1588 	 * Power-down device's busmaster DMA clocks
   1589 	 */
   1590 	iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
   1591 	DELAY(5);
   1592 
   1593 	/* Make sure (redundant) we've released our request to stay awake */
   1594 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   1595 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   1596 
   1597 	/* Stop the device, and put it in low power state */
   1598 	iwm_apm_stop(sc);
   1599 
   1600         /* Upon stop, the APM issues an interrupt if HW RF kill is set.
   1601          * Clean again the interrupt here
   1602          */
   1603 	iwm_disable_interrupts(sc);
   1604 	/* stop and reset the on-board processor */
   1605 	IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
   1606 
   1607 	/*
   1608 	 * Even if we stop the HW, we still want the RF kill
   1609 	 * interrupt
   1610 	 */
   1611 	iwm_enable_rfkill_int(sc);
   1612 	iwm_check_rfkill(sc);
   1613 }
   1614 
   1615 /* iwlwifi pcie/trans.c (always main power) */
   1616 void
   1617 iwm_set_pwr(struct iwm_softc *sc)
   1618 {
   1619 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1620 	    IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
   1621 }
   1622 
   1623 /* iwlwifi: mvm/ops.c */
   1624 void
   1625 iwm_mvm_nic_config(struct iwm_softc *sc)
   1626 {
   1627 	uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
   1628 	uint32_t reg_val = 0;
   1629 
   1630 	radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
   1631 	    IWM_FW_PHY_CFG_RADIO_TYPE_POS;
   1632 	radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
   1633 	    IWM_FW_PHY_CFG_RADIO_STEP_POS;
   1634 	radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
   1635 	    IWM_FW_PHY_CFG_RADIO_DASH_POS;
   1636 
   1637 	/* SKU control */
   1638 	reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
   1639 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
   1640 	reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
   1641 	    IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
   1642 
   1643 	/* radio configuration */
   1644 	reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
   1645 	reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
   1646 	reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
   1647 
   1648 	IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
   1649 
   1650         DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
   1651                        radio_cfg_step, radio_cfg_dash));
   1652 
   1653 	/*
   1654 	 * W/A : NIC is stuck in a reset state after Early PCIe power off
   1655 	 * (PCIe power is lost before PERST# is asserted), causing ME FW
   1656 	 * to lose ownership and not being able to obtain it back.
   1657 	 */
   1658 	iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
   1659 	    IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
   1660 	    ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
   1661 }
   1662 
   1663 int
   1664 iwm_nic_rx_init(struct iwm_softc *sc)
   1665 {
   1666 	if (!iwm_nic_lock(sc))
   1667 		return EBUSY;
   1668 
   1669 	/*
   1670 	 * Initialize RX ring.  This is from the iwn driver.
   1671 	 */
   1672 	memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
   1673 
   1674 	/* stop DMA */
   1675 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
   1676 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
   1677 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
   1678 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
   1679 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
   1680 
   1681 	/* Set physical address of RX ring (256-byte aligned). */
   1682 	IWM_WRITE(sc,
   1683 	    IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
   1684 
   1685 	/* Set physical address of RX status (16-byte aligned). */
   1686 	IWM_WRITE(sc,
   1687 	    IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
   1688 
   1689 	/* Enable RX. */
   1690 	/*
   1691 	 * Note: Linux driver also sets this:
   1692 	 *  (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
   1693 	 *
   1694 	 * It causes weird behavior.  YMMV.
   1695 	 */
   1696 	IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
   1697 	    IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL		|
   1698 	    IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY		|  /* HW bug */
   1699 	    IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL	|
   1700 	    IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K		|
   1701 	    IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
   1702 
   1703 	IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
   1704 	IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
   1705 
   1706 	/*
   1707 	 * Thus sayeth el jefe (iwlwifi) via a comment:
   1708 	 *
   1709 	 * This value should initially be 0 (before preparing any
   1710  	 * RBs), should be 8 after preparing the first 8 RBs (for example)
   1711 	 */
   1712 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
   1713 
   1714 	iwm_nic_unlock(sc);
   1715 
   1716 	return 0;
   1717 }
   1718 
   1719 int
   1720 iwm_nic_tx_init(struct iwm_softc *sc)
   1721 {
   1722 	int qid;
   1723 
   1724 	if (!iwm_nic_lock(sc))
   1725 		return EBUSY;
   1726 
   1727 	/* Deactivate TX scheduler. */
   1728 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
   1729 
   1730 	/* Set physical address of "keep warm" page (16-byte aligned). */
   1731 	IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
   1732 
   1733 	/* Initialize TX rings. */
   1734 	for (qid = 0; qid < __arraycount(sc->txq); qid++) {
   1735 		struct iwm_tx_ring *txq = &sc->txq[qid];
   1736 
   1737 		/* Set physical address of TX ring (256-byte aligned). */
   1738 		IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
   1739 		    txq->desc_dma.paddr >> 8);
   1740 		DPRINTF(("loading ring %d descriptors (%p) at %lx\n",
   1741 		    qid, txq->desc, txq->desc_dma.paddr >> 8));
   1742 	}
   1743 	iwm_nic_unlock(sc);
   1744 
   1745 	return 0;
   1746 }
   1747 
   1748 int
   1749 iwm_nic_init(struct iwm_softc *sc)
   1750 {
   1751 	int error;
   1752 
   1753 	iwm_apm_init(sc);
   1754 	iwm_set_pwr(sc);
   1755 
   1756 	iwm_mvm_nic_config(sc);
   1757 
   1758 	if ((error = iwm_nic_rx_init(sc)) != 0)
   1759 		return error;
   1760 
   1761 	/*
   1762 	 * Ditto for TX, from iwn
   1763 	 */
   1764 	if ((error = iwm_nic_tx_init(sc)) != 0)
   1765 		return error;
   1766 
   1767 	DPRINTF(("shadow registers enabled\n"));
   1768 	IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
   1769 
   1770         return 0;
   1771 }
   1772 
   1773 enum iwm_mvm_tx_fifo {
   1774 	IWM_MVM_TX_FIFO_BK = 0,
   1775 	IWM_MVM_TX_FIFO_BE,
   1776 	IWM_MVM_TX_FIFO_VI,
   1777 	IWM_MVM_TX_FIFO_VO,
   1778 	IWM_MVM_TX_FIFO_MCAST = 5,
   1779 };
   1780 
   1781 const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
   1782         IWM_MVM_TX_FIFO_VO,
   1783         IWM_MVM_TX_FIFO_VI,
   1784         IWM_MVM_TX_FIFO_BE,
   1785         IWM_MVM_TX_FIFO_BK,
   1786 };
   1787 
   1788 void
   1789 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
   1790 {
   1791 	if (!iwm_nic_lock(sc)) {
   1792 		DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
   1793 		return; /* XXX return EBUSY */
   1794 	}
   1795 
   1796 	/* unactivate before configuration */
   1797 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1798 	    (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
   1799 	    | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
   1800 
   1801 	if (qid != IWM_MVM_CMD_QUEUE) {
   1802 		iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
   1803 	}
   1804 
   1805 	iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
   1806 
   1807 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
   1808 	iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
   1809 
   1810 	iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
   1811 	/* Set scheduler window size and frame limit. */
   1812 	iwm_write_mem32(sc,
   1813 	    sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
   1814 	    sizeof(uint32_t),
   1815 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
   1816 	    IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
   1817 	    ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
   1818 	    IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
   1819 
   1820 	iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
   1821 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
   1822 	    (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
   1823 	    (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
   1824 	    IWM_SCD_QUEUE_STTS_REG_MSK);
   1825 
   1826 	iwm_nic_unlock(sc);
   1827 
   1828 	DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
   1829 }
   1830 
   1831 int
   1832 iwm_post_alive(struct iwm_softc *sc)
   1833 {
   1834 	int nwords;
   1835 	int error, chnl;
   1836 
   1837 	if (!iwm_nic_lock(sc))
   1838 		return EBUSY;
   1839 
   1840 	if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
   1841 		DPRINTF(("%s: sched addr mismatch", DEVNAME(sc)));
   1842 		error = EINVAL;
   1843 		goto out;
   1844 	}
   1845 
   1846 	iwm_ict_reset(sc);
   1847 
   1848 	/* Clear TX scheduler state in SRAM. */
   1849 	nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
   1850 	    IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
   1851 	    / sizeof(uint32_t);
   1852 	error = iwm_write_mem(sc,
   1853 	    sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
   1854 	    NULL, nwords);
   1855 	if (error)
   1856 		goto out;
   1857 
   1858 	/* Set physical address of TX scheduler rings (1KB aligned). */
   1859 	iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
   1860 
   1861 	iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
   1862 
   1863 	/* enable command channel */
   1864 	iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
   1865 
   1866 	iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
   1867 
   1868 	/* Enable DMA channels. */
   1869 	for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
   1870 		IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
   1871 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
   1872 		    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
   1873 	}
   1874 
   1875 	IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
   1876 	    IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
   1877 
   1878         /* Enable L1-Active */
   1879 	iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
   1880 	    IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
   1881 
   1882  out:
   1883  	iwm_nic_unlock(sc);
   1884 	return error;
   1885 }
   1886 
   1887 /*
   1888  * PHY db
   1889  * iwlwifi/iwl-phy-db.c
   1890  */
   1891 
   1892 /*
   1893  * BEGIN iwl-phy-db.c
   1894  */
   1895 
   1896 enum iwm_phy_db_section_type {
   1897 	IWM_PHY_DB_CFG = 1,
   1898 	IWM_PHY_DB_CALIB_NCH,
   1899 	IWM_PHY_DB_UNUSED,
   1900 	IWM_PHY_DB_CALIB_CHG_PAPD,
   1901 	IWM_PHY_DB_CALIB_CHG_TXP,
   1902 	IWM_PHY_DB_MAX
   1903 };
   1904 
   1905 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
   1906 
   1907 /*
   1908  * phy db - configure operational ucode
   1909  */
   1910 struct iwm_phy_db_cmd {
   1911 	uint16_t type;
   1912 	uint16_t length;
   1913 	uint8_t data[];
   1914 } __packed;
   1915 
   1916 /* for parsing of tx power channel group data that comes from the firmware*/
   1917 struct iwm_phy_db_chg_txp {
   1918 	uint32_t space;
   1919 	uint16_t max_channel_idx;
   1920 } __packed;
   1921 
   1922 /*
   1923  * phy db - Receive phy db chunk after calibrations
   1924  */
   1925 struct iwm_calib_res_notif_phy_db {
   1926 	uint16_t type;
   1927 	uint16_t length;
   1928 	uint8_t data[];
   1929 } __packed;
   1930 
   1931 /*
   1932  * get phy db section: returns a pointer to a phy db section specified by
   1933  * type and channel group id.
   1934  */
   1935 static struct iwm_phy_db_entry *
   1936 iwm_phy_db_get_section(struct iwm_softc *sc,
   1937 	enum iwm_phy_db_section_type type, uint16_t chg_id)
   1938 {
   1939 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   1940 
   1941 	if (type >= IWM_PHY_DB_MAX)
   1942 		return NULL;
   1943 
   1944 	switch (type) {
   1945 	case IWM_PHY_DB_CFG:
   1946 		return &phy_db->cfg;
   1947 	case IWM_PHY_DB_CALIB_NCH:
   1948 		return &phy_db->calib_nch;
   1949 	case IWM_PHY_DB_CALIB_CHG_PAPD:
   1950 		if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
   1951 			return NULL;
   1952 		return &phy_db->calib_ch_group_papd[chg_id];
   1953 	case IWM_PHY_DB_CALIB_CHG_TXP:
   1954 		if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
   1955 			return NULL;
   1956 		return &phy_db->calib_ch_group_txp[chg_id];
   1957 	default:
   1958 		return NULL;
   1959 	}
   1960 	return NULL;
   1961 }
   1962 
   1963 static int
   1964 iwm_phy_db_set_section(struct iwm_softc *sc,
   1965 	struct iwm_calib_res_notif_phy_db *phy_db_notif)
   1966 {
   1967 	enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
   1968 	uint16_t size  = le16toh(phy_db_notif->length);
   1969 	struct iwm_phy_db_entry *entry;
   1970 	uint16_t chg_id = 0;
   1971 
   1972 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
   1973 	    type == IWM_PHY_DB_CALIB_CHG_TXP)
   1974 		chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
   1975 
   1976 	entry = iwm_phy_db_get_section(sc, type, chg_id);
   1977 	if (!entry)
   1978 		return EINVAL;
   1979 
   1980 	if (entry->data)
   1981 		kmem_free(entry->data, entry->size);
   1982 	entry->data = kmem_alloc(size, KM_NOSLEEP);
   1983 	if (!entry->data) {
   1984 		entry->size = 0;
   1985 		return ENOMEM;
   1986 	}
   1987 	memcpy(entry->data, phy_db_notif->data, size);
   1988 	entry->size = size;
   1989 
   1990 	DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d , Size: %d, data: %p\n",
   1991 	    __func__, __LINE__, type, size, entry->data));
   1992 
   1993 	return 0;
   1994 }
   1995 
   1996 int
   1997 iwm_is_valid_channel(uint16_t ch_id)
   1998 {
   1999 	if (ch_id <= 14 ||
   2000 	    (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
   2001 	    (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
   2002 	    (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
   2003 		return 1;
   2004 	return 0;
   2005 }
   2006 
   2007 uint8_t
   2008 iwm_ch_id_to_ch_index(uint16_t ch_id)
   2009 {
   2010 	if (!iwm_is_valid_channel(ch_id))
   2011 		return 0xff;
   2012 
   2013 	if (ch_id <= 14)
   2014 		return ch_id - 1;
   2015 	if (ch_id <= 64)
   2016 		return (ch_id + 20) / 4;
   2017 	if (ch_id <= 140)
   2018 		return (ch_id - 12) / 4;
   2019 	return (ch_id - 13) / 4;
   2020 }
   2021 
   2022 
   2023 uint16_t
   2024 iwm_channel_id_to_papd(uint16_t ch_id)
   2025 {
   2026 	if (!iwm_is_valid_channel(ch_id))
   2027 		return 0xff;
   2028 
   2029 	if (1 <= ch_id && ch_id <= 14)
   2030 		return 0;
   2031 	if (36 <= ch_id && ch_id <= 64)
   2032 		return 1;
   2033 	if (100 <= ch_id && ch_id <= 140)
   2034 		return 2;
   2035 	return 3;
   2036 }
   2037 
   2038 uint16_t
   2039 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
   2040 {
   2041 	struct iwm_phy_db *phy_db = &sc->sc_phy_db;
   2042 	struct iwm_phy_db_chg_txp *txp_chg;
   2043 	int i;
   2044 	uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
   2045 
   2046 	if (ch_index == 0xff)
   2047 		return 0xff;
   2048 
   2049 	for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
   2050 		txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
   2051 		if (!txp_chg)
   2052 			return 0xff;
   2053 		/*
   2054 		 * Looking for the first channel group that its max channel is
   2055 		 * higher then wanted channel.
   2056 		 */
   2057 		if (le16toh(txp_chg->max_channel_idx) >= ch_index)
   2058 			return i;
   2059 	}
   2060 	return 0xff;
   2061 }
   2062 
   2063 int
   2064 iwm_phy_db_get_section_data(struct iwm_softc *sc,
   2065 	uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
   2066 {
   2067 	struct iwm_phy_db_entry *entry;
   2068 	uint16_t ch_group_id = 0;
   2069 
   2070 	/* find wanted channel group */
   2071 	if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
   2072 		ch_group_id = iwm_channel_id_to_papd(ch_id);
   2073 	else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
   2074 		ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
   2075 
   2076 	entry = iwm_phy_db_get_section(sc, type, ch_group_id);
   2077 	if (!entry)
   2078 		return EINVAL;
   2079 
   2080 	*data = entry->data;
   2081 	*size = entry->size;
   2082 
   2083 	DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
   2084 		       __func__, __LINE__, type, *size));
   2085 
   2086 	return 0;
   2087 }
   2088 
   2089 int
   2090 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
   2091 	uint16_t length, void *data)
   2092 {
   2093 	struct iwm_phy_db_cmd phy_db_cmd;
   2094 	struct iwm_host_cmd cmd = {
   2095 		.id = IWM_PHY_DB_CMD,
   2096 		.flags = IWM_CMD_SYNC,
   2097 	};
   2098 
   2099 	DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n", type, length));
   2100 
   2101 	/* Set phy db cmd variables */
   2102 	phy_db_cmd.type = le16toh(type);
   2103 	phy_db_cmd.length = le16toh(length);
   2104 
   2105 	/* Set hcmd variables */
   2106 	cmd.data[0] = &phy_db_cmd;
   2107 	cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
   2108 	cmd.data[1] = data;
   2109 	cmd.len[1] = length;
   2110 	cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
   2111 
   2112 	return iwm_send_cmd(sc, &cmd);
   2113 }
   2114 
   2115 static int
   2116 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
   2117 	enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
   2118 {
   2119 	uint16_t i;
   2120 	int err;
   2121 	struct iwm_phy_db_entry *entry;
   2122 
   2123 	/* Send all the channel-specific groups to operational fw */
   2124 	for (i = 0; i < max_ch_groups; i++) {
   2125 		entry = iwm_phy_db_get_section(sc, type, i);
   2126 		if (!entry)
   2127 			return EINVAL;
   2128 
   2129 		if (!entry->size)
   2130 			continue;
   2131 
   2132 		/* Send the requested PHY DB section */
   2133 		err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
   2134 		if (err) {
   2135 			DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
   2136 			    "err %d\n", DEVNAME(sc), type, i, err));
   2137 			return err;
   2138 		}
   2139 
   2140 		DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
   2141 	}
   2142 
   2143 	return 0;
   2144 }
   2145 
   2146 int
   2147 iwm_send_phy_db_data(struct iwm_softc *sc)
   2148 {
   2149 	uint8_t *data = NULL;
   2150 	uint16_t size = 0;
   2151 	int err;
   2152 
   2153 	DPRINTF(("Sending phy db data and configuration to runtime image\n"));
   2154 
   2155 	/* Send PHY DB CFG section */
   2156 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
   2157 	if (err) {
   2158 		DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
   2159 		    DEVNAME(sc), err));
   2160 		return err;
   2161 	}
   2162 
   2163 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
   2164 	if (err) {
   2165 		DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
   2166 		    DEVNAME(sc), err));
   2167 		return err;
   2168 	}
   2169 
   2170 	err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
   2171 	    &data, &size, 0);
   2172 	if (err) {
   2173 		DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
   2174 		    "%d\n", DEVNAME(sc), err));
   2175 		return err;
   2176 	}
   2177 
   2178 	err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
   2179 	if (err) {
   2180 		DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
   2181 		    "sect, %d\n", DEVNAME(sc), err));
   2182 		return err;
   2183 	}
   2184 
   2185 	/* Send all the TXP channel specific data */
   2186 	err = iwm_phy_db_send_all_channel_groups(sc,
   2187 	    IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
   2188 	if (err) {
   2189 		DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
   2190 		    DEVNAME(sc), err));
   2191 		return err;
   2192 	}
   2193 
   2194 	/* Send all the TXP channel specific data */
   2195 	err = iwm_phy_db_send_all_channel_groups(sc,
   2196 	    IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
   2197 	if (err) {
   2198 		DPRINTF(("%s: Cannot send channel specific TX power groups, "
   2199 		    "%d\n", DEVNAME(sc), err));
   2200 		return err;
   2201 	}
   2202 
   2203 	DPRINTF(("Finished sending phy db non channel data\n"));
   2204 	return 0;
   2205 }
   2206 
   2207 /*
   2208  * END iwl-phy-db.c
   2209  */
   2210 
   2211 /*
   2212  * BEGIN iwlwifi/mvm/time-event.c
   2213  */
   2214 
   2215 /*
   2216  * For the high priority TE use a time event type that has similar priority to
   2217  * the FW's action scan priority.
   2218  */
   2219 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
   2220 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
   2221 
   2222 /* used to convert from time event API v2 to v1 */
   2223 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
   2224 			     IWM_TE_V2_EVENT_SOCIOPATHIC)
   2225 static inline uint16_t
   2226 iwm_te_v2_get_notify(uint16_t policy)
   2227 {
   2228 	return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
   2229 }
   2230 
   2231 static inline uint16_t
   2232 iwm_te_v2_get_dep_policy(uint16_t policy)
   2233 {
   2234 	return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
   2235 		IWM_TE_V2_PLACEMENT_POS;
   2236 }
   2237 
   2238 static inline uint16_t
   2239 iwm_te_v2_get_absence(uint16_t policy)
   2240 {
   2241 	return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
   2242 }
   2243 
   2244 void
   2245 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
   2246 	struct iwm_time_event_cmd_v1 *cmd_v1)
   2247 {
   2248 	cmd_v1->id_and_color = cmd_v2->id_and_color;
   2249 	cmd_v1->action = cmd_v2->action;
   2250 	cmd_v1->id = cmd_v2->id;
   2251 	cmd_v1->apply_time = cmd_v2->apply_time;
   2252 	cmd_v1->max_delay = cmd_v2->max_delay;
   2253 	cmd_v1->depends_on = cmd_v2->depends_on;
   2254 	cmd_v1->interval = cmd_v2->interval;
   2255 	cmd_v1->duration = cmd_v2->duration;
   2256 	if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
   2257 		cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
   2258 	else
   2259 		cmd_v1->repeat = htole32(cmd_v2->repeat);
   2260 	cmd_v1->max_frags = htole32(cmd_v2->max_frags);
   2261 	cmd_v1->interval_reciprocal = 0; /* unused */
   2262 
   2263 	cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
   2264 	cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
   2265 	cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
   2266 }
   2267 
   2268 int
   2269 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
   2270 	const struct iwm_time_event_cmd_v2 *cmd)
   2271 {
   2272 	struct iwm_time_event_cmd_v1 cmd_v1;
   2273 
   2274 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
   2275 		return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
   2276 		    IWM_CMD_SYNC, sizeof(*cmd), cmd);
   2277 
   2278 	iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
   2279 	return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
   2280 	    sizeof(cmd_v1), &cmd_v1);
   2281 }
   2282 
   2283 int
   2284 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
   2285 	void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
   2286 {
   2287 	int ret;
   2288 
   2289 	DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
   2290 
   2291 	ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
   2292 	if (ret) {
   2293 		DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
   2294 		    DEVNAME(sc), ret));
   2295 	}
   2296 
   2297 	return ret;
   2298 }
   2299 
   2300 void
   2301 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
   2302 	uint32_t duration, uint32_t min_duration, uint32_t max_delay)
   2303 {
   2304 	struct iwm_time_event_cmd_v2 time_cmd;
   2305 
   2306 	memset(&time_cmd, 0, sizeof(time_cmd));
   2307 
   2308 	time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
   2309 	time_cmd.id_and_color =
   2310 	    htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   2311 	time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
   2312 
   2313 	time_cmd.apply_time = htole32(iwm_read_prph(sc,
   2314 	    IWM_DEVICE_SYSTEM_TIME_REG));
   2315 
   2316 	time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
   2317 	time_cmd.max_delay = htole32(max_delay);
   2318 	/* TODO: why do we need to interval = bi if it is not periodic? */
   2319 	time_cmd.interval = htole32(1);
   2320 	time_cmd.duration = htole32(duration);
   2321 	time_cmd.repeat = 1;
   2322 	time_cmd.policy
   2323 	    = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
   2324 	        IWM_TE_V2_NOTIF_HOST_EVENT_END);
   2325 
   2326 	iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
   2327 }
   2328 
   2329 /*
   2330  * END iwlwifi/mvm/time-event.c
   2331  */
   2332 
   2333 /*
   2334  * NVM read access and content parsing.  We do not support
   2335  * external NVM or writing NVM.
   2336  * iwlwifi/mvm/nvm.c
   2337  */
   2338 
   2339 /* list of NVM sections we are allowed/need to read */
   2340 const int nvm_to_read[] = {
   2341 	IWM_NVM_SECTION_TYPE_HW,
   2342 	IWM_NVM_SECTION_TYPE_SW,
   2343 	IWM_NVM_SECTION_TYPE_CALIBRATION,
   2344 	IWM_NVM_SECTION_TYPE_PRODUCTION,
   2345 };
   2346 
   2347 /* Default NVM size to read */
   2348 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
   2349 #define IWM_MAX_NVM_SECTION_SIZE 7000
   2350 
   2351 #define IWM_NVM_WRITE_OPCODE 1
   2352 #define IWM_NVM_READ_OPCODE 0
   2353 
   2354 int
   2355 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
   2356 	uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
   2357 {
   2358 	offset = 0;
   2359 	struct iwm_nvm_access_cmd nvm_access_cmd = {
   2360 		.offset = htole16(offset),
   2361 		.length = htole16(length),
   2362 		.type = htole16(section),
   2363 		.op_code = IWM_NVM_READ_OPCODE,
   2364 	};
   2365 	struct iwm_nvm_access_resp *nvm_resp;
   2366 	struct iwm_rx_packet *pkt;
   2367 	struct iwm_host_cmd cmd = {
   2368 		.id = IWM_NVM_ACCESS_CMD,
   2369 		.flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
   2370 		    IWM_CMD_SEND_IN_RFKILL,
   2371 		.data = { &nvm_access_cmd, },
   2372 	};
   2373 	int ret, bytes_read, offset_read;
   2374 	uint8_t *resp_data;
   2375 
   2376 	cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
   2377 
   2378 	ret = iwm_send_cmd(sc, &cmd);
   2379 	if (ret)
   2380 		return ret;
   2381 
   2382 	pkt = cmd.resp_pkt;
   2383 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   2384 		DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
   2385 		    DEVNAME(sc), pkt->hdr.flags));
   2386 		ret = EIO;
   2387 		goto exit;
   2388 	}
   2389 
   2390 	/* Extract NVM response */
   2391 	nvm_resp = (void *)pkt->data;
   2392 
   2393 	ret = le16toh(nvm_resp->status);
   2394 	bytes_read = le16toh(nvm_resp->length);
   2395 	offset_read = le16toh(nvm_resp->offset);
   2396 	resp_data = nvm_resp->data;
   2397 	if (ret) {
   2398 		DPRINTF(("%s: NVM access command failed with status %d\n",
   2399 		    DEVNAME(sc), ret));
   2400 		ret = EINVAL;
   2401 		goto exit;
   2402 	}
   2403 
   2404 	if (offset_read != offset) {
   2405 		DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
   2406 		    DEVNAME(sc), offset_read));
   2407 		ret = EINVAL;
   2408 		goto exit;
   2409 	}
   2410 
   2411 	memcpy(data + offset, resp_data, bytes_read);
   2412 	*len = bytes_read;
   2413 
   2414  exit:
   2415 	iwm_free_resp(sc, &cmd);
   2416 	return ret;
   2417 }
   2418 
   2419 /*
   2420  * Reads an NVM section completely.
   2421  * NICs prior to 7000 family doesn't have a real NVM, but just read
   2422  * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
   2423  * by uCode, we need to manually check in this case that we don't
   2424  * overflow and try to read more than the EEPROM size.
   2425  * For 7000 family NICs, we supply the maximal size we can read, and
   2426  * the uCode fills the response with as much data as we can,
   2427  * without overflowing, so no check is needed.
   2428  */
   2429 int
   2430 iwm_nvm_read_section(struct iwm_softc *sc,
   2431 	uint16_t section, uint8_t *data, uint16_t *len)
   2432 {
   2433 	uint16_t length, seglen;
   2434 	int error;
   2435 
   2436 	/* Set nvm section read length */
   2437 	length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
   2438 	*len = 0;
   2439 
   2440 	/* Read the NVM until exhausted (reading less than requested) */
   2441 	while (seglen == length) {
   2442 		error = iwm_nvm_read_chunk(sc,
   2443 		    section, *len, length, data, &seglen);
   2444 		if (error) {
   2445 			aprint_error_dev(sc->sc_dev,
   2446 			    "Cannot read NVM from section %d offset %d, "
   2447 			    "length %d\n", section, *len, length);
   2448 			return error;
   2449 		}
   2450 		*len += seglen;
   2451 	}
   2452 
   2453 	DPRINTFN(4, ("NVM section %d read completed\n", section));
   2454 	return 0;
   2455 }
   2456 
   2457 /*
   2458  * BEGIN IWM_NVM_PARSE
   2459  */
   2460 
   2461 /* iwlwifi/iwl-nvm-parse.c */
   2462 
   2463 /* NVM offsets (in words) definitions */
   2464 enum wkp_nvm_offsets {
   2465 	/* NVM HW-Section offset (in words) definitions */
   2466 	IWM_HW_ADDR = 0x15,
   2467 
   2468 /* NVM SW-Section offset (in words) definitions */
   2469 	IWM_NVM_SW_SECTION = 0x1C0,
   2470 	IWM_NVM_VERSION = 0,
   2471 	IWM_RADIO_CFG = 1,
   2472 	IWM_SKU = 2,
   2473 	IWM_N_HW_ADDRS = 3,
   2474 	IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
   2475 
   2476 /* NVM calibration section offset (in words) definitions */
   2477 	IWM_NVM_CALIB_SECTION = 0x2B8,
   2478 	IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
   2479 };
   2480 
   2481 /* SKU Capabilities (actual values from NVM definition) */
   2482 enum nvm_sku_bits {
   2483 	IWM_NVM_SKU_CAP_BAND_24GHZ	= (1 << 0),
   2484 	IWM_NVM_SKU_CAP_BAND_52GHZ	= (1 << 1),
   2485 	IWM_NVM_SKU_CAP_11N_ENABLE	= (1 << 2),
   2486 	IWM_NVM_SKU_CAP_11AC_ENABLE	= (1 << 3),
   2487 };
   2488 
   2489 /* radio config bits (actual values from NVM definition) */
   2490 #define IWM_NVM_RF_CFG_DASH_MSK(x)   (x & 0x3)         /* bits 0-1   */
   2491 #define IWM_NVM_RF_CFG_STEP_MSK(x)   ((x >> 2)  & 0x3) /* bits 2-3   */
   2492 #define IWM_NVM_RF_CFG_TYPE_MSK(x)   ((x >> 4)  & 0x3) /* bits 4-5   */
   2493 #define IWM_NVM_RF_CFG_PNUM_MSK(x)   ((x >> 6)  & 0x3) /* bits 6-7   */
   2494 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8)  & 0xF) /* bits 8-11  */
   2495 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
   2496 
   2497 #define DEFAULT_MAX_TX_POWER 16
   2498 
   2499 /**
   2500  * enum iwm_nvm_channel_flags - channel flags in NVM
   2501  * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
   2502  * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
   2503  * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
   2504  * @IWM_NVM_CHANNEL_RADAR: radar detection required
   2505  * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
   2506  * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
   2507  * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
   2508  * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
   2509  * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
   2510  */
   2511 enum iwm_nvm_channel_flags {
   2512 	IWM_NVM_CHANNEL_VALID = (1 << 0),
   2513 	IWM_NVM_CHANNEL_IBSS = (1 << 1),
   2514 	IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
   2515 	IWM_NVM_CHANNEL_RADAR = (1 << 4),
   2516 	IWM_NVM_CHANNEL_DFS = (1 << 7),
   2517 	IWM_NVM_CHANNEL_WIDE = (1 << 8),
   2518 	IWM_NVM_CHANNEL_40MHZ = (1 << 9),
   2519 	IWM_NVM_CHANNEL_80MHZ = (1 << 10),
   2520 	IWM_NVM_CHANNEL_160MHZ = (1 << 11),
   2521 };
   2522 
   2523 void
   2524 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
   2525 {
   2526 	struct ieee80211com *ic = &sc->sc_ic;
   2527 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2528 	int ch_idx;
   2529 	struct ieee80211_channel *channel;
   2530 	uint16_t ch_flags;
   2531 	int is_5ghz;
   2532 	int flags, hw_value;
   2533 
   2534 	for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
   2535 		ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
   2536 
   2537 		if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
   2538 		    !data->sku_cap_band_52GHz_enable)
   2539 			ch_flags &= ~IWM_NVM_CHANNEL_VALID;
   2540 
   2541 		if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
   2542 			DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
   2543 			    iwm_nvm_channels[ch_idx],
   2544 			    ch_flags,
   2545 			    (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
   2546 			    "5.2" : "2.4"));
   2547 			continue;
   2548 		}
   2549 
   2550 		hw_value = iwm_nvm_channels[ch_idx];
   2551 		channel = &ic->ic_channels[hw_value];
   2552 
   2553 		is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
   2554 		if (!is_5ghz) {
   2555 			flags = IEEE80211_CHAN_2GHZ;
   2556 			channel->ic_flags
   2557 			    = IEEE80211_CHAN_CCK
   2558 			    | IEEE80211_CHAN_OFDM
   2559 			    | IEEE80211_CHAN_DYN
   2560 			    | IEEE80211_CHAN_2GHZ;
   2561 		} else {
   2562 			flags = IEEE80211_CHAN_5GHZ;
   2563 			channel->ic_flags =
   2564 			    IEEE80211_CHAN_A;
   2565 		}
   2566 		channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
   2567 
   2568 		if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
   2569 			channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
   2570 	}
   2571 }
   2572 
   2573 int
   2574 iwm_parse_nvm_data(struct iwm_softc *sc,
   2575 	const uint16_t *nvm_hw, const uint16_t *nvm_sw,
   2576 	const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
   2577 {
   2578 	struct iwm_nvm_data *data = &sc->sc_nvm;
   2579 	uint8_t hw_addr[ETHER_ADDR_LEN];
   2580 	uint16_t radio_cfg, sku;
   2581 
   2582 	data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
   2583 
   2584 	radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
   2585 	data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
   2586 	data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
   2587 	data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
   2588 	data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
   2589 	data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
   2590 	data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
   2591 
   2592 	sku = le16_to_cpup(nvm_sw + IWM_SKU);
   2593 	data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
   2594 #ifndef IWM_NO_5GHZ
   2595 	data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
   2596 #else
   2597 	data->sku_cap_band_52GHz_enable = 0;
   2598 #endif
   2599 	data->sku_cap_11n_enable = 0;
   2600 
   2601 	if (!data->valid_tx_ant || !data->valid_rx_ant) {
   2602 		DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n",
   2603 			    DEVNAME(sc), data->valid_tx_ant,
   2604 			    data->valid_rx_ant));
   2605 		return EINVAL;
   2606 	}
   2607 
   2608 	data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
   2609 
   2610 	data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
   2611 	data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
   2612 
   2613 	/* The byte order is little endian 16 bit, meaning 214365 */
   2614 	memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
   2615 	data->hw_addr[0] = hw_addr[1];
   2616 	data->hw_addr[1] = hw_addr[0];
   2617 	data->hw_addr[2] = hw_addr[3];
   2618 	data->hw_addr[3] = hw_addr[2];
   2619 	data->hw_addr[4] = hw_addr[5];
   2620 	data->hw_addr[5] = hw_addr[4];
   2621 
   2622 	iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
   2623 	data->calib_version = 255;   /* TODO:
   2624 					this value will prevent some checks from
   2625 					failing, we need to check if this
   2626 					field is still needed, and if it does,
   2627 					where is it in the NVM */
   2628 
   2629 	return 0;
   2630 }
   2631 
   2632 /*
   2633  * END NVM PARSE
   2634  */
   2635 
   2636 struct iwm_nvm_section {
   2637         uint16_t length;
   2638         const uint8_t *data;
   2639 };
   2640 
   2641 #define IWM_FW_VALID_TX_ANT(sc) \
   2642     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
   2643     >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
   2644 #define IWM_FW_VALID_RX_ANT(sc) \
   2645     ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
   2646     >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
   2647 
   2648 static int
   2649 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
   2650 {
   2651 	const uint16_t *hw, *sw, *calib;
   2652 
   2653 	/* Checking for required sections */
   2654 	if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
   2655 	    !sections[IWM_NVM_SECTION_TYPE_HW].data) {
   2656 		DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
   2657 		return ENOENT;
   2658 	}
   2659 
   2660 	hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
   2661 	sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
   2662 	calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
   2663 	return iwm_parse_nvm_data(sc, hw, sw, calib,
   2664 	    IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
   2665 }
   2666 
   2667 int
   2668 iwm_nvm_init(struct iwm_softc *sc)
   2669 {
   2670 	struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
   2671 	int i, section, error;
   2672 	uint16_t len;
   2673 	uint8_t *nvm_buffer, *temp;
   2674 
   2675 	/* Read From FW NVM */
   2676 	DPRINTF(("Read NVM\n"));
   2677 
   2678 	/* TODO: find correct NVM max size for a section */
   2679 	nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
   2680 	for (i = 0; i < __arraycount(nvm_to_read); i++) {
   2681 		section = nvm_to_read[i];
   2682 		KASSERT(section <= __arraycount(nvm_sections));
   2683 
   2684 		error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
   2685 		if (error)
   2686 			break;
   2687 
   2688 		temp = kmem_alloc(len, KM_SLEEP);
   2689 		memcpy(temp, nvm_buffer, len);
   2690 		nvm_sections[section].data = temp;
   2691 		nvm_sections[section].length = len;
   2692 	}
   2693 	kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
   2694 	if (error)
   2695 		return error;
   2696 
   2697 	return iwm_parse_nvm_sections(sc, nvm_sections);
   2698 }
   2699 
   2700 /*
   2701  * Firmware loading gunk.  This is kind of a weird hybrid between the
   2702  * iwn driver and the Linux iwlwifi driver.
   2703  */
   2704 
   2705 int
   2706 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
   2707 	const uint8_t *section, uint32_t byte_cnt)
   2708 {
   2709 	struct iwm_dma_info *dma = &sc->fw_dma;
   2710 	int error;
   2711 
   2712 	/* Copy firmware section into pre-allocated DMA-safe memory. */
   2713 	memcpy(dma->vaddr, section, byte_cnt);
   2714 	bus_dmamap_sync(sc->sc_dmat,
   2715 	    dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
   2716 
   2717 	if (!iwm_nic_lock(sc))
   2718 		return EBUSY;
   2719 
   2720 	sc->sc_fw_chunk_done = 0;
   2721 
   2722 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2723 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
   2724 	IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
   2725 	    dst_addr);
   2726 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
   2727 	    dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
   2728 	IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
   2729 	    (iwm_get_dma_hi_addr(dma->paddr)
   2730 	      << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
   2731 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
   2732 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
   2733 	    1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
   2734 	    IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
   2735 	IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
   2736 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE    |
   2737 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
   2738 	    IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
   2739 
   2740 	iwm_nic_unlock(sc);
   2741 
   2742 	/* wait 1s for this segment to load */
   2743 	while (!sc->sc_fw_chunk_done)
   2744 		if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
   2745 			break;
   2746 
   2747         return error;
   2748 }
   2749 
   2750 int
   2751 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2752 {
   2753 	struct iwm_fw_sects *fws;
   2754 	int error, i, w;
   2755 	void *data;
   2756 	uint32_t dlen;
   2757 	uint32_t offset;
   2758 
   2759 	sc->sc_uc.uc_intr = 0;
   2760 
   2761 	fws = &sc->sc_fw.fw_sects[ucode_type];
   2762 	for (i = 0; i < fws->fw_count; i++) {
   2763 		data = fws->fw_sect[i].fws_data;
   2764 		dlen = fws->fw_sect[i].fws_len;
   2765 		offset = fws->fw_sect[i].fws_devoff;
   2766 		DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
   2767 		    ucode_type, offset, dlen));
   2768 		error = iwm_firmware_load_chunk(sc, offset, data, dlen);
   2769 		if (error) {
   2770 			DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u returned error %02d\n", i, fws->fw_count, error));
   2771 			return error;
   2772 		}
   2773 	}
   2774 
   2775 	/* wait for the firmware to load */
   2776 	IWM_WRITE(sc, IWM_CSR_RESET, 0);
   2777 
   2778 	for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
   2779 		error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
   2780 	}
   2781 
   2782 	return error;
   2783 }
   2784 
   2785 /* iwlwifi: pcie/trans.c */
   2786 int
   2787 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
   2788 {
   2789 	int error;
   2790 
   2791 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2792 
   2793 	if ((error = iwm_nic_init(sc)) != 0) {
   2794 		aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
   2795 		return error;
   2796 	}
   2797 
   2798 	/* make sure rfkill handshake bits are cleared */
   2799 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2800 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
   2801 	    IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
   2802 
   2803 	/* clear (again), then enable host interrupts */
   2804 	IWM_WRITE(sc, IWM_CSR_INT, ~0);
   2805 	iwm_enable_interrupts(sc);
   2806 
   2807 	/* really make sure rfkill handshake bits are cleared */
   2808 	/* maybe we should write a few times more?  just to make sure */
   2809 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2810 	IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
   2811 
   2812 	/* Load the given image to the HW */
   2813 	return iwm_load_firmware(sc, ucode_type);
   2814 }
   2815 
   2816 int
   2817 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
   2818 {
   2819 	return iwm_post_alive(sc);
   2820 }
   2821 
   2822 int
   2823 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
   2824 {
   2825 	struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
   2826 		.valid = htole32(valid_tx_ant),
   2827 	};
   2828 
   2829 	return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
   2830 	    IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
   2831 }
   2832 
   2833 /* iwlwifi: mvm/fw.c */
   2834 int
   2835 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
   2836 {
   2837 	struct iwm_phy_cfg_cmd phy_cfg_cmd;
   2838 	enum iwm_ucode_type ucode_type = sc->sc_uc_current;
   2839 
   2840 	/* Set parameters */
   2841 	phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
   2842 	phy_cfg_cmd.calib_control.event_trigger =
   2843 	    sc->sc_default_calib[ucode_type].event_trigger;
   2844 	phy_cfg_cmd.calib_control.flow_trigger =
   2845 	    sc->sc_default_calib[ucode_type].flow_trigger;
   2846 
   2847 	DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
   2848 	return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
   2849 	    sizeof(phy_cfg_cmd), &phy_cfg_cmd);
   2850 }
   2851 
   2852 int
   2853 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
   2854 	enum iwm_ucode_type ucode_type)
   2855 {
   2856 	enum iwm_ucode_type old_type = sc->sc_uc_current;
   2857 	int error;
   2858 
   2859 	if ((error = iwm_read_firmware(sc)) != 0)
   2860 		return error;
   2861 
   2862 	sc->sc_uc_current = ucode_type;
   2863         error = iwm_start_fw(sc, ucode_type);
   2864 	if (error) {
   2865 		sc->sc_uc_current = old_type;
   2866 		return error;
   2867 	}
   2868 
   2869 	return iwm_fw_alive(sc, sc->sched_base);
   2870 }
   2871 
   2872 /*
   2873  * mvm misc bits
   2874  */
   2875 
   2876 /*
   2877  * follows iwlwifi/fw.c
   2878  */
   2879 int
   2880 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
   2881 {
   2882 	int error;
   2883 
   2884 	/* do not operate with rfkill switch turned on */
   2885 	if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
   2886 		aprint_error_dev(sc->sc_dev,
   2887 		    "radio is disabled by hardware switch\n");
   2888 		return EPERM;
   2889 	}
   2890 
   2891 	sc->sc_init_complete = 0;
   2892         if ((error = iwm_mvm_load_ucode_wait_alive(sc,
   2893 	    IWM_UCODE_TYPE_INIT)) != 0)
   2894 		return error;
   2895 
   2896 	if (justnvm) {
   2897 		if ((error = iwm_nvm_init(sc)) != 0) {
   2898 			aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
   2899 			return error;
   2900 		}
   2901 		memcpy(&sc->sc_ic.ic_myaddr,
   2902 		    &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
   2903 
   2904 		sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
   2905 		    + sc->sc_capa_max_probe_len
   2906 		    + IWM_MAX_NUM_SCAN_CHANNELS
   2907 		    * sizeof(struct iwm_scan_channel);
   2908 		sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
   2909 
   2910 		return 0;
   2911 	}
   2912 
   2913 	/* Send TX valid antennas before triggering calibrations */
   2914 	if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   2915 		return error;
   2916 
   2917 	/*
   2918 	* Send phy configurations command to init uCode
   2919 	* to start the 16.0 uCode init image internal calibrations.
   2920 	*/
   2921 	if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
   2922 		DPRINTF(("%s: failed to run internal calibration: %d\n",
   2923 		    DEVNAME(sc), error));
   2924 		return error;
   2925 	}
   2926 
   2927 	/*
   2928 	 * Nothing to do but wait for the init complete notification
   2929 	 * from the firmware
   2930 	 */
   2931 	while (!sc->sc_init_complete)
   2932 		if ((error = tsleep(&sc->sc_init_complete,
   2933 		    0, "iwminit", 2*hz)) != 0)
   2934 			break;
   2935 
   2936 	return error;
   2937 }
   2938 
   2939 /*
   2940  * receive side
   2941  */
   2942 
   2943 /* (re)stock rx ring, called at init-time and at runtime */
   2944 int
   2945 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
   2946 {
   2947 	struct iwm_rx_ring *ring = &sc->rxq;
   2948 	struct iwm_rx_data *data = &ring->data[idx];
   2949 	struct mbuf *m;
   2950 	int error;
   2951 	int fatal = 0;
   2952 
   2953 	m = m_gethdr(M_DONTWAIT, MT_DATA);
   2954 	if (m == NULL)
   2955 		return ENOBUFS;
   2956 
   2957 	if (size <= MCLBYTES) {
   2958 		MCLGET(m, M_DONTWAIT);
   2959 	} else {
   2960 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   2961 	}
   2962 	if ((m->m_flags & M_EXT) == 0) {
   2963 		m_freem(m);
   2964 		return ENOBUFS;
   2965 	}
   2966 
   2967 	if (data->m != NULL) {
   2968 		bus_dmamap_unload(sc->sc_dmat, data->map);
   2969 		fatal = 1;
   2970 	}
   2971 
   2972 	m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
   2973 	if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   2974 	    BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
   2975 		/* XXX */
   2976 		if (fatal)
   2977 			panic("iwm: could not load RX mbuf");
   2978 		m_freem(m);
   2979 		return error;
   2980 	}
   2981 	data->m = m;
   2982 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
   2983 
   2984         /* Update RX descriptor. */
   2985 	ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
   2986 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   2987 	    idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
   2988 
   2989 	return 0;
   2990 }
   2991 
   2992 /* iwlwifi: mvm/rx.c */
   2993 #define IWM_RSSI_OFFSET 50
   2994 int
   2995 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   2996 {
   2997 	int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
   2998 	uint32_t agc_a, agc_b;
   2999 	uint32_t val;
   3000 
   3001 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
   3002 	agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
   3003 	agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
   3004 
   3005 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
   3006 	rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
   3007 	rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
   3008 
   3009 	/*
   3010 	 * dBm = rssi dB - agc dB - constant.
   3011 	 * Higher AGC (higher radio gain) means lower signal.
   3012 	 */
   3013 	rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
   3014 	rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
   3015 	max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
   3016 
   3017 	DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
   3018 	    rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
   3019 
   3020 	return max_rssi_dbm;
   3021 }
   3022 
   3023 /* iwlwifi: mvm/rx.c */
   3024 /*
   3025  * iwm_mvm_get_signal_strength - use new rx PHY INFO API
   3026  * values are reported by the fw as positive values - need to negate
   3027  * to obtain their dBM.  Account for missing antennas by replacing 0
   3028  * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
   3029  */
   3030 int
   3031 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
   3032 {
   3033 	int energy_a, energy_b, energy_c, max_energy;
   3034 	uint32_t val;
   3035 
   3036 	val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
   3037 	energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
   3038 	    IWM_RX_INFO_ENERGY_ANT_A_POS;
   3039 	energy_a = energy_a ? -energy_a : -256;
   3040 	energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
   3041 	    IWM_RX_INFO_ENERGY_ANT_B_POS;
   3042 	energy_b = energy_b ? -energy_b : -256;
   3043 	energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
   3044 	    IWM_RX_INFO_ENERGY_ANT_C_POS;
   3045 	energy_c = energy_c ? -energy_c : -256;
   3046 	max_energy = MAX(energy_a, energy_b);
   3047 	max_energy = MAX(max_energy, energy_c);
   3048 
   3049 	DPRINTFN(12, ("energy In A %d B %d C %d , and max %d\n",
   3050 	    energy_a, energy_b, energy_c, max_energy));
   3051 
   3052 	return max_energy;
   3053 }
   3054 
   3055 void
   3056 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
   3057 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3058 {
   3059 	struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
   3060 
   3061 	DPRINTFN(20, ("received PHY stats\n"));
   3062 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
   3063 	    sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
   3064 
   3065 	memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
   3066 }
   3067 
   3068 /*
   3069  * Retrieve the average noise (in dBm) among receivers.
   3070  */
   3071 int
   3072 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
   3073 {
   3074 	int i, total, nbant, noise;
   3075 
   3076 	total = nbant = noise = 0;
   3077 	for (i = 0; i < 3; i++) {
   3078 		noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
   3079 		if (noise) {
   3080 			total += noise;
   3081 			nbant++;
   3082 		}
   3083 	}
   3084 
   3085 	/* There should be at least one antenna but check anyway. */
   3086 	return (nbant == 0) ? -127 : (total / nbant) - 107;
   3087 }
   3088 
   3089 /*
   3090  * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
   3091  *
   3092  * Handles the actual data of the Rx packet from the fw
   3093  */
   3094 void
   3095 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
   3096 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3097 {
   3098 	struct ieee80211com *ic = &sc->sc_ic;
   3099 	struct ieee80211_frame *wh;
   3100 	struct ieee80211_node *ni;
   3101 	struct ieee80211_channel *c = NULL;
   3102 	struct mbuf *m;
   3103 	struct iwm_rx_phy_info *phy_info;
   3104 	struct iwm_rx_mpdu_res_start *rx_res;
   3105 	int device_timestamp;
   3106 	uint32_t len;
   3107 	uint32_t rx_pkt_status;
   3108 	int rssi;
   3109 
   3110 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3111 	    BUS_DMASYNC_POSTREAD);
   3112 
   3113 	phy_info = &sc->sc_last_phy_info;
   3114 	rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
   3115 	wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
   3116 	len = le16toh(rx_res->byte_count);
   3117 	rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
   3118 
   3119 	m = data->m;
   3120 	m->m_data = pkt->data + sizeof(*rx_res);
   3121 	m->m_pkthdr.len = m->m_len = len;
   3122 
   3123 	if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
   3124 		DPRINTF(("dsp size out of range [0,20]: %d\n",
   3125 		    phy_info->cfg_phy_cnt));
   3126 		return;
   3127 	}
   3128 
   3129 	if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
   3130 	    !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
   3131 		DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
   3132 		return; /* drop */
   3133 	}
   3134 
   3135 	device_timestamp = le32toh(phy_info->system_timestamp);
   3136 
   3137 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
   3138 		rssi = iwm_mvm_get_signal_strength(sc, phy_info);
   3139 	} else {
   3140 		rssi = iwm_mvm_calc_rssi(sc, phy_info);
   3141 	}
   3142 	rssi = -rssi;
   3143 
   3144 	if (ic->ic_state == IEEE80211_S_SCAN)
   3145 		iwm_fix_channel(ic, m);
   3146 
   3147 	/* replenish ring for the buffer we're going to feed to the sharks */
   3148 	if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
   3149 		return;
   3150 
   3151 	m->m_pkthdr.rcvif = IC2IFP(ic);
   3152 
   3153 	if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
   3154 		if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
   3155 			c = &ic->ic_channels[le32toh(phy_info->channel)];
   3156 	}
   3157 
   3158 	ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
   3159 	if (c)
   3160 		ni->ni_chan = c;
   3161 
   3162 	if (sc->sc_drvbpf != NULL) {
   3163 		struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
   3164 
   3165 		tap->wr_flags = 0;
   3166 		if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
   3167 			tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
   3168 		tap->wr_chan_freq =
   3169 		    htole16(ic->ic_channels[phy_info->channel].ic_freq);
   3170 		tap->wr_chan_flags =
   3171 		    htole16(ic->ic_channels[phy_info->channel].ic_flags);
   3172 		tap->wr_dbm_antsignal = (int8_t)rssi;
   3173 		tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
   3174 		tap->wr_tsft = phy_info->system_timestamp;
   3175 		switch (phy_info->rate) {
   3176 		/* CCK rates. */
   3177 		case  10: tap->wr_rate =   2; break;
   3178 		case  20: tap->wr_rate =   4; break;
   3179 		case  55: tap->wr_rate =  11; break;
   3180 		case 110: tap->wr_rate =  22; break;
   3181 		/* OFDM rates. */
   3182 		case 0xd: tap->wr_rate =  12; break;
   3183 		case 0xf: tap->wr_rate =  18; break;
   3184 		case 0x5: tap->wr_rate =  24; break;
   3185 		case 0x7: tap->wr_rate =  36; break;
   3186 		case 0x9: tap->wr_rate =  48; break;
   3187 		case 0xb: tap->wr_rate =  72; break;
   3188 		case 0x1: tap->wr_rate =  96; break;
   3189 		case 0x3: tap->wr_rate = 108; break;
   3190 		/* Unknown rate: should not happen. */
   3191 		default:  tap->wr_rate =   0;
   3192 		}
   3193 
   3194 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
   3195 	}
   3196 	ieee80211_input(ic, m, ni, rssi, device_timestamp);
   3197 	ieee80211_free_node(ni);
   3198 }
   3199 
   3200 void
   3201 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
   3202 	struct iwm_node *in)
   3203 {
   3204 	struct ieee80211com *ic = &sc->sc_ic;
   3205 	struct ifnet *ifp = IC2IFP(ic);
   3206 	struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
   3207 	int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
   3208 	int failack = tx_resp->failure_frame;
   3209 
   3210 	KASSERT(tx_resp->frame_count == 1);
   3211 
   3212 	/* Update rate control statistics. */
   3213 	in->in_amn.amn_txcnt++;
   3214 	if (failack > 0) {
   3215 		in->in_amn.amn_retrycnt++;
   3216 	}
   3217 
   3218 	if (status != IWM_TX_STATUS_SUCCESS &&
   3219 	    status != IWM_TX_STATUS_DIRECT_DONE)
   3220 		ifp->if_oerrors++;
   3221 	else
   3222 		ifp->if_opackets++;
   3223 }
   3224 
   3225 void
   3226 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
   3227 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   3228 {
   3229 	struct ieee80211com *ic = &sc->sc_ic;
   3230 	struct ifnet *ifp = IC2IFP(ic);
   3231 	struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
   3232 	int idx = cmd_hdr->idx;
   3233 	int qid = cmd_hdr->qid;
   3234 	struct iwm_tx_ring *ring = &sc->txq[qid];
   3235 	struct iwm_tx_data *txd = &ring->data[idx];
   3236 	struct iwm_node *in = txd->in;
   3237 
   3238 	if (txd->done) {
   3239 		DPRINTF(("%s: got tx interrupt that's already been handled!\n",
   3240 		    DEVNAME(sc)));
   3241 		return;
   3242 	}
   3243 
   3244 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
   3245 	    BUS_DMASYNC_POSTREAD);
   3246 
   3247 	sc->sc_tx_timer = 0;
   3248 
   3249 	iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
   3250 
   3251 	/* Unmap and free mbuf. */
   3252 	bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
   3253 	    BUS_DMASYNC_POSTWRITE);
   3254 	bus_dmamap_unload(sc->sc_dmat, txd->map);
   3255 	m_freem(txd->m);
   3256 
   3257 	DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
   3258 	KASSERT(txd->done == 0);
   3259 	txd->done = 1;
   3260 	KASSERT(txd->in);
   3261 
   3262 	txd->m = NULL;
   3263 	txd->in = NULL;
   3264 	ieee80211_free_node(&in->in_ni);
   3265 
   3266 	if (--ring->queued < IWM_TX_RING_LOMARK) {
   3267 		sc->qfullmsk &= ~(1 << ring->qid);
   3268 		if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
   3269 			ifp->if_flags &= ~IFF_OACTIVE;
   3270 			/*
   3271 			 * Well, we're in interrupt context, but then again
   3272 			 * I guess net80211 does all sorts of stunts in
   3273 			 * interrupt context, so maybe this is no biggie.
   3274 			 */
   3275 			(*ifp->if_start)(ifp);
   3276 		}
   3277 	}
   3278 }
   3279 
   3280 /*
   3281  * BEGIN iwlwifi/mvm/binding.c
   3282  */
   3283 
   3284 int
   3285 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   3286 {
   3287 	struct iwm_binding_cmd cmd;
   3288 	struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
   3289 	int i, ret;
   3290 	uint32_t status;
   3291 
   3292 	memset(&cmd, 0, sizeof(cmd));
   3293 
   3294 	cmd.id_and_color
   3295 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3296 	cmd.action = htole32(action);
   3297 	cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
   3298 
   3299 	cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   3300 	for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
   3301 		cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
   3302 
   3303 	status = 0;
   3304 	ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
   3305 	    sizeof(cmd), &cmd, &status);
   3306 	if (ret) {
   3307 		DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
   3308 		    DEVNAME(sc), action, ret));
   3309 		return ret;
   3310 	}
   3311 
   3312 	if (status) {
   3313 		DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
   3314 		    status));
   3315 		ret = EIO;
   3316 	}
   3317 
   3318 	return ret;
   3319 }
   3320 
   3321 int
   3322 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
   3323 {
   3324 	return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3325 }
   3326 
   3327 int
   3328 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
   3329 {
   3330 	return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
   3331 }
   3332 
   3333 /*
   3334  * END iwlwifi/mvm/binding.c
   3335  */
   3336 
   3337 /*
   3338  * BEGIN iwlwifi/mvm/phy-ctxt.c
   3339  */
   3340 
   3341 /*
   3342  * Construct the generic fields of the PHY context command
   3343  */
   3344 void
   3345 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3346 	struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
   3347 {
   3348 	memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
   3349 
   3350 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
   3351 	    ctxt->color));
   3352 	cmd->action = htole32(action);
   3353 	cmd->apply_time = htole32(apply_time);
   3354 }
   3355 
   3356 /*
   3357  * Add the phy configuration to the PHY context command
   3358  */
   3359 void
   3360 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
   3361 	struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
   3362 	uint8_t chains_static, uint8_t chains_dynamic)
   3363 {
   3364 	struct ieee80211com *ic = &sc->sc_ic;
   3365 	uint8_t active_cnt, idle_cnt;
   3366 
   3367 	cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
   3368 	    IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
   3369 
   3370 	cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
   3371 	cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
   3372 	cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
   3373 
   3374 	/* Set rx the chains */
   3375 	idle_cnt = chains_static;
   3376 	active_cnt = chains_dynamic;
   3377 
   3378 	cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
   3379 					IWM_PHY_RX_CHAIN_VALID_POS);
   3380 	cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
   3381 	cmd->rxchain_info |= htole32(active_cnt <<
   3382 	    IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
   3383 
   3384 	cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
   3385 }
   3386 
   3387 /*
   3388  * Send a command
   3389  * only if something in the configuration changed: in case that this is the
   3390  * first time that the phy configuration is applied or in case that the phy
   3391  * configuration changed from the previous apply.
   3392  */
   3393 int
   3394 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
   3395 	struct iwm_mvm_phy_ctxt *ctxt,
   3396 	uint8_t chains_static, uint8_t chains_dynamic,
   3397 	uint32_t action, uint32_t apply_time)
   3398 {
   3399 	struct iwm_phy_context_cmd cmd;
   3400 	int ret;
   3401 
   3402 	/* Set the command header fields */
   3403 	iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
   3404 
   3405 	/* Set the command data */
   3406 	iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
   3407 	    chains_static, chains_dynamic);
   3408 
   3409 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
   3410 	    sizeof(struct iwm_phy_context_cmd), &cmd);
   3411 	if (ret) {
   3412 		DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
   3413 	}
   3414 	return ret;
   3415 }
   3416 
   3417 /*
   3418  * Send a command to add a PHY context based on the current HW configuration.
   3419  */
   3420 int
   3421 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
   3422 	struct ieee80211_channel *chan,
   3423 	uint8_t chains_static, uint8_t chains_dynamic)
   3424 {
   3425 	ctxt->channel = chan;
   3426 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3427 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
   3428 }
   3429 
   3430 /*
   3431  * Send a command to modify the PHY context based on the current HW
   3432  * configuration. Note that the function does not check that the configuration
   3433  * changed.
   3434  */
   3435 int
   3436 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
   3437 	struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
   3438 	uint8_t chains_static, uint8_t chains_dynamic)
   3439 {
   3440 	ctxt->channel = chan;
   3441 	return iwm_mvm_phy_ctxt_apply(sc, ctxt,
   3442 	    chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
   3443 }
   3444 
   3445 /*
   3446  * END iwlwifi/mvm/phy-ctxt.c
   3447  */
   3448 
   3449 /*
   3450  * transmit side
   3451  */
   3452 
   3453 /*
   3454  * Send a command to the firmware.  We try to implement the Linux
   3455  * driver interface for the routine.
   3456  * mostly from if_iwn (iwn_cmd()).
   3457  *
   3458  * For now, we always copy the first part and map the second one (if it exists).
   3459  */
   3460 int
   3461 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3462 {
   3463 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3464 	struct iwm_tfd *desc;
   3465 	struct iwm_tx_data *data;
   3466 	struct iwm_device_cmd *cmd;
   3467 	struct mbuf *m;
   3468 	bus_addr_t paddr;
   3469 	uint32_t addr_lo;
   3470 	int error, i, paylen, off, s;
   3471 	int code;
   3472 	int async, wantresp;
   3473 
   3474 	code = hcmd->id;
   3475 	async = hcmd->flags & IWM_CMD_ASYNC;
   3476 	wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
   3477 
   3478 	for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
   3479 		paylen += hcmd->len[i];
   3480 	}
   3481 
   3482 	/* if the command wants an answer, busy sc_cmd_resp */
   3483 	if (wantresp) {
   3484 		KASSERT(!async);
   3485 		while (sc->sc_wantresp != -1)
   3486 			tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
   3487 		sc->sc_wantresp = ring->qid << 16 | ring->cur;
   3488 		DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
   3489 	}
   3490 
   3491 	/*
   3492 	 * Is the hardware still available?  (after e.g. above wait).
   3493 	 */
   3494 	s = splnet();
   3495 	if (sc->sc_flags & IWM_FLAG_STOPPED) {
   3496 		error = ENXIO;
   3497 		goto out;
   3498 	}
   3499 
   3500 	desc = &ring->desc[ring->cur];
   3501 	data = &ring->data[ring->cur];
   3502 
   3503 	if (paylen > sizeof(cmd->data)) {
   3504 		/* Command is too large */
   3505 		if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
   3506 			error = EINVAL;
   3507 			goto out;
   3508 		}
   3509 		m = m_gethdr(M_DONTWAIT, MT_DATA);
   3510 		if (m == NULL) {
   3511 			error = ENOMEM;
   3512 			goto out;
   3513 		}
   3514 		MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
   3515 		if (!(m->m_flags & M_EXT)) {
   3516 			m_freem(m);
   3517 			error = ENOMEM;
   3518 			goto out;
   3519 		}
   3520 		cmd = mtod(m, struct iwm_device_cmd *);
   3521 		error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
   3522 		    hcmd->len[0], NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3523 		if (error != 0) {
   3524 			m_freem(m);
   3525 			goto out;
   3526 		}
   3527 		data->m = m;
   3528 		paddr = data->map->dm_segs[0].ds_addr;
   3529 	} else {
   3530 		cmd = &ring->cmd[ring->cur];
   3531 		paddr = data->cmd_paddr;
   3532 	}
   3533 
   3534 	cmd->hdr.code = code;
   3535 	cmd->hdr.flags = 0;
   3536 	cmd->hdr.qid = ring->qid;
   3537 	cmd->hdr.idx = ring->cur;
   3538 
   3539 	for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
   3540 		if (hcmd->len[i] == 0)
   3541 			continue;
   3542 		memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
   3543 		off += hcmd->len[i];
   3544 	}
   3545 	KASSERT(off == paylen);
   3546 
   3547 	/* lo field is not aligned */
   3548 	addr_lo = htole32((uint32_t)paddr);
   3549 	memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
   3550 	desc->tbs[0].hi_n_len  = htole16(iwm_get_dma_hi_addr(paddr)
   3551 	    | ((sizeof(cmd->hdr) + paylen) << 4));
   3552 	desc->num_tbs = 1;
   3553 
   3554 	DPRINTFN(8, ("iwm_send_cmd 0x%x size=%lu %s\n",
   3555 	    code, hcmd->len[0] + hcmd->len[1] + sizeof(cmd->hdr),
   3556 	    async ? " (async)" : ""));
   3557 
   3558 	if (hcmd->len[0] > sizeof(cmd->data)) {
   3559 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, hcmd->len[0],
   3560 		    BUS_DMASYNC_PREWRITE);
   3561 	} else {
   3562 		bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   3563 		    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   3564 		    hcmd->len[0] + 4, BUS_DMASYNC_PREWRITE);
   3565 	}
   3566 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   3567 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   3568 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   3569 
   3570 	IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
   3571 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   3572 	if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
   3573 	    IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
   3574 	    (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
   3575 	     IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
   3576 		DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
   3577 		error = EBUSY;
   3578 		goto out;
   3579 	}
   3580 
   3581 #if 0
   3582 	iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
   3583 #endif
   3584 	DPRINTF(("sending command 0x%x qid %d, idx %d\n",
   3585 	    code, ring->qid, ring->cur));
   3586 
   3587 	/* Kick command ring. */
   3588 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   3589 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   3590 
   3591 	if (!async) {
   3592 		/* m..m-mmyy-mmyyyy-mym-ym m-my generation */
   3593 		int generation = sc->sc_generation;
   3594 		error = tsleep(desc, PCATCH, "iwmcmd", hz);
   3595 		if (error == 0) {
   3596 			/* if hardware is no longer up, return error */
   3597 			if (generation != sc->sc_generation) {
   3598 				error = ENXIO;
   3599 			} else {
   3600 				hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
   3601 			}
   3602 		}
   3603 	}
   3604  out:
   3605 	if (wantresp && error != 0) {
   3606 		iwm_free_resp(sc, hcmd);
   3607 	}
   3608 	splx(s);
   3609 
   3610 	return error;
   3611 }
   3612 
   3613 /* iwlwifi: mvm/utils.c */
   3614 int
   3615 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
   3616 	uint32_t flags, uint16_t len, const void *data)
   3617 {
   3618 	struct iwm_host_cmd cmd = {
   3619 		.id = id,
   3620 		.len = { len, },
   3621 		.data = { data, },
   3622 		.flags = flags,
   3623 	};
   3624 
   3625 	return iwm_send_cmd(sc, &cmd);
   3626 }
   3627 
   3628 /* iwlwifi: mvm/utils.c */
   3629 int
   3630 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
   3631 	struct iwm_host_cmd *cmd, uint32_t *status)
   3632 {
   3633 	struct iwm_rx_packet *pkt;
   3634 	struct iwm_cmd_response *resp;
   3635 	int error, resp_len;
   3636 
   3637 	//lockdep_assert_held(&mvm->mutex);
   3638 
   3639 	KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
   3640 	cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
   3641 
   3642 	if ((error = iwm_send_cmd(sc, cmd)) != 0)
   3643 		return error;
   3644 	pkt = cmd->resp_pkt;
   3645 
   3646 	/* Can happen if RFKILL is asserted */
   3647 	if (!pkt) {
   3648 		error = 0;
   3649 		goto out_free_resp;
   3650 	}
   3651 
   3652 	if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
   3653 		error = EIO;
   3654 		goto out_free_resp;
   3655 	}
   3656 
   3657 	resp_len = iwm_rx_packet_payload_len(pkt);
   3658 	if (resp_len != sizeof(*resp)) {
   3659 		error = EIO;
   3660 		goto out_free_resp;
   3661 	}
   3662 
   3663 	resp = (void *)pkt->data;
   3664 	*status = le32toh(resp->status);
   3665  out_free_resp:
   3666 	iwm_free_resp(sc, cmd);
   3667 	return error;
   3668 }
   3669 
   3670 /* iwlwifi/mvm/utils.c */
   3671 int
   3672 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
   3673 	uint16_t len, const void *data, uint32_t *status)
   3674 {
   3675 	struct iwm_host_cmd cmd = {
   3676 		.id = id,
   3677 		.len = { len, },
   3678 		.data = { data, },
   3679 	};
   3680 
   3681 	return iwm_mvm_send_cmd_status(sc, &cmd, status);
   3682 }
   3683 
   3684 void
   3685 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
   3686 {
   3687 	KASSERT(sc->sc_wantresp != -1);
   3688 	KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
   3689 	    == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
   3690 	sc->sc_wantresp = -1;
   3691 	wakeup(&sc->sc_wantresp);
   3692 }
   3693 
   3694 /*
   3695  * Process a "command done" firmware notification.  This is where we wakeup
   3696  * processes waiting for a synchronous command completion.
   3697  * from if_iwn
   3698  */
   3699 void
   3700 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
   3701 {
   3702 	struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
   3703 	struct iwm_tx_data *data;
   3704 
   3705 	if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
   3706 		return;	/* Not a command ack. */
   3707 	}
   3708 
   3709 	data = &ring->data[pkt->hdr.idx];
   3710 
   3711 	/* If the command was mapped in an mbuf, free it. */
   3712 	if (data->m != NULL) {
   3713 		bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   3714 		    data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
   3715 		bus_dmamap_unload(sc->sc_dmat, data->map);
   3716 		m_freem(data->m);
   3717 		data->m = NULL;
   3718 	}
   3719 	wakeup(&ring->desc[pkt->hdr.idx]);
   3720 }
   3721 
   3722 #if 0
   3723 /*
   3724  * necessary only for block ack mode
   3725  */
   3726 void
   3727 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
   3728 	uint16_t len)
   3729 {
   3730 	struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
   3731 	uint16_t w_val;
   3732 
   3733 	scd_bc_tbl = sc->sched_dma.vaddr;
   3734 
   3735 	len += 8; /* magic numbers came naturally from paris */
   3736 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
   3737 		len = roundup(len, 4) / 4;
   3738 
   3739 	w_val = htole16(sta_id << 12 | len);
   3740 
   3741 	/* Update TX scheduler. */
   3742 	scd_bc_tbl[qid].tfd_offset[idx] = w_val;
   3743 	bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3744 	    (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
   3745 	    sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
   3746 
   3747 	/* I really wonder what this is ?!? */
   3748 	if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
   3749 		scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
   3750 		bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
   3751 		    (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
   3752 		    (char *)(void *)sc->sched_dma.vaddr,
   3753 		    sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
   3754 	}
   3755 }
   3756 #endif
   3757 
   3758 /*
   3759  * Fill in various bit for management frames, and leave them
   3760  * unfilled for data frames (firmware takes care of that).
   3761  * Return the selected TX rate.
   3762  */
   3763 const struct iwm_rate *
   3764 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
   3765 	struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
   3766 {
   3767 	const struct iwm_rate *rinfo;
   3768 	int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3769 	int ridx, rate_flags;
   3770 	int nrates = in->in_ni.ni_rates.rs_nrates;
   3771 
   3772 	tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
   3773 	tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
   3774 
   3775 	/* for data frames, use RS table */
   3776 	if (type == IEEE80211_FC0_TYPE_DATA) {
   3777 		if (sc->sc_fixed_ridx != -1) {
   3778 			tx->initial_rate_index = sc->sc_fixed_ridx;
   3779 		} else {
   3780 			tx->initial_rate_index = (nrates-1) - in->in_ni.ni_txrate;
   3781 		}
   3782                 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
   3783 		DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
   3784 		return &iwm_rates[tx->initial_rate_index];
   3785 	}
   3786 
   3787 	/* for non-data, use the lowest supported rate */
   3788 	ridx = in->in_ridx[0];
   3789 	rinfo = &iwm_rates[ridx];
   3790 
   3791 	rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
   3792 	if (IWM_RIDX_IS_CCK(ridx))
   3793 		rate_flags |= IWM_RATE_MCS_CCK_MSK;
   3794 	tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
   3795 
   3796 	return rinfo;
   3797 }
   3798 
   3799 #define TB0_SIZE 16
   3800 int
   3801 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
   3802 {
   3803 	struct ieee80211com *ic = &sc->sc_ic;
   3804 	struct iwm_node *in = (void *)ni;
   3805 	struct iwm_tx_ring *ring;
   3806 	struct iwm_tx_data *data;
   3807 	struct iwm_tfd *desc;
   3808 	struct iwm_device_cmd *cmd;
   3809 	struct iwm_tx_cmd *tx;
   3810 	struct ieee80211_frame *wh;
   3811 	struct ieee80211_key *k = NULL;
   3812 	struct mbuf *m1;
   3813 	const struct iwm_rate *rinfo;
   3814 	uint32_t flags;
   3815 	u_int hdrlen;
   3816 	bus_dma_segment_t *seg;
   3817 	uint8_t tid, type;
   3818 	int i, totlen, error, pad;
   3819 	int hdrlen2;
   3820 
   3821 	wh = mtod(m, struct ieee80211_frame *);
   3822 	hdrlen = ieee80211_anyhdrsize(wh);
   3823 	type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
   3824 
   3825 	hdrlen2 = (ieee80211_has_qos(wh)) ?
   3826 	    sizeof (struct ieee80211_qosframe) :
   3827 	    sizeof (struct ieee80211_frame);
   3828 
   3829 	if (hdrlen != hdrlen2)
   3830 		DPRINTF(("%s: hdrlen error (%d != %d)\n",
   3831 		    DEVNAME(sc), hdrlen, hdrlen2));
   3832 
   3833 	tid = 0;
   3834 
   3835 	ring = &sc->txq[ac];
   3836 	desc = &ring->desc[ring->cur];
   3837 	memset(desc, 0, sizeof(*desc));
   3838 	data = &ring->data[ring->cur];
   3839 
   3840 	/* Fill out iwm_tx_cmd to send to the firmware */
   3841 	cmd = &ring->cmd[ring->cur];
   3842 	cmd->hdr.code = IWM_TX_CMD;
   3843 	cmd->hdr.flags = 0;
   3844 	cmd->hdr.qid = ring->qid;
   3845 	cmd->hdr.idx = ring->cur;
   3846 
   3847 	tx = (void *)cmd->data;
   3848 	memset(tx, 0, sizeof(*tx));
   3849 
   3850 	rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
   3851 
   3852 	if (sc->sc_drvbpf != NULL) {
   3853 		struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
   3854 
   3855 		tap->wt_flags = 0;
   3856 		tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
   3857 		tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
   3858 		tap->wt_rate = rinfo->rate;
   3859 		tap->wt_hwqueue = ac;
   3860 		if (wh->i_fc[1] & IEEE80211_FC1_WEP)
   3861 			tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
   3862 
   3863 		bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
   3864 	}
   3865 
   3866 	/* Encrypt the frame if need be. */
   3867 	if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
   3868 		k = ieee80211_crypto_encap(ic, ni, m);
   3869 		if (k == NULL) {
   3870 			m_freem(m);
   3871 			return ENOBUFS;
   3872 		}
   3873 		/* Packet header may have moved, reset our local pointer. */
   3874 		wh = mtod(m, struct ieee80211_frame *);
   3875 	}
   3876 	totlen = m->m_pkthdr.len;
   3877 
   3878 	flags = 0;
   3879 	if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3880 		flags |= IWM_TX_CMD_FLG_ACK;
   3881 	}
   3882 
   3883 	if (type != IEEE80211_FC0_TYPE_DATA
   3884 	    && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
   3885 	    && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
   3886 		flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
   3887 	}
   3888 
   3889 	if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
   3890 	    type != IEEE80211_FC0_TYPE_DATA)
   3891 		tx->sta_id = sc->sc_aux_sta.sta_id;
   3892 	else
   3893 		tx->sta_id = IWM_STATION_ID;
   3894 
   3895 	if (type == IEEE80211_FC0_TYPE_MGT) {
   3896 		uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
   3897 
   3898 		if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
   3899 		    subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
   3900 			tx->pm_frame_timeout = htole16(3);
   3901 		else
   3902 			tx->pm_frame_timeout = htole16(2);
   3903 	} else {
   3904 		tx->pm_frame_timeout = htole16(0);
   3905 	}
   3906 
   3907         if (hdrlen & 3) {
   3908                 /* First segment length must be a multiple of 4. */
   3909                 flags |= IWM_TX_CMD_FLG_MH_PAD;
   3910                 pad = 4 - (hdrlen & 3);
   3911         } else
   3912                 pad = 0;
   3913 
   3914 	tx->driver_txop = 0;
   3915 	tx->next_frame_len = 0;
   3916 
   3917 	tx->len = htole16(totlen);
   3918 	tx->tid_tspec = tid;
   3919 	tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   3920 
   3921 	/* Set physical address of "scratch area". */
   3922 	tx->dram_lsb_ptr = htole32(data->scratch_paddr);
   3923 	tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
   3924 
   3925 	/* Copy 802.11 header in TX command. */
   3926 	memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
   3927 
   3928 	flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
   3929 
   3930 	tx->sec_ctl = 0;
   3931 	tx->tx_flags |= htole32(flags);
   3932 
   3933 	/* Trim 802.11 header. */
   3934 	m_adj(m, hdrlen);
   3935 
   3936 	error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3937 	    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3938 	if (error != 0) {
   3939 		if (error != EFBIG) {
   3940 			aprint_error_dev(sc->sc_dev,
   3941 			    "can't map mbuf (error %d)\n", error);
   3942 			m_freem(m);
   3943 			return error;
   3944 		}
   3945 		/* Too many DMA segments, linearize mbuf. */
   3946 		MGETHDR(m1, M_DONTWAIT, MT_DATA);
   3947 		if (m1 == NULL) {
   3948 			m_freem(m);
   3949 			return ENOBUFS;
   3950 		}
   3951 		if (m->m_pkthdr.len > MHLEN) {
   3952 			MCLGET(m1, M_DONTWAIT);
   3953 			if (!(m1->m_flags & M_EXT)) {
   3954 				m_freem(m);
   3955 				m_freem(m1);
   3956 				return ENOBUFS;
   3957 			}
   3958 		}
   3959 		m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
   3960 		m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
   3961 		m_freem(m);
   3962 		m = m1;
   3963 
   3964 		error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
   3965 		    BUS_DMA_NOWAIT | BUS_DMA_WRITE);
   3966 		if (error != 0) {
   3967 			aprint_error_dev(sc->sc_dev,
   3968 			    "can't map mbuf (error %d)\n", error);
   3969 			m_freem(m);
   3970 			return error;
   3971 		}
   3972 	}
   3973 	data->m = m;
   3974 	data->in = in;
   3975 	data->done = 0;
   3976 
   3977 	DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
   3978 	KASSERT(data->in != NULL);
   3979 
   3980 	DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
   3981 	    ring->qid, ring->cur, totlen, data->map->dm_nsegs));
   3982 
   3983 	/* Fill TX descriptor. */
   3984 	desc->num_tbs = 2 + data->map->dm_nsegs;
   3985 
   3986 	desc->tbs[0].lo = htole32(data->cmd_paddr);
   3987 	desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   3988 	    (TB0_SIZE << 4);
   3989 	desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
   3990 	desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
   3991 	    ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
   3992 	      + hdrlen + pad - TB0_SIZE) << 4);
   3993 
   3994 	/* Other DMA segments are for data payload. */
   3995 	seg = data->map->dm_segs;
   3996 	for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
   3997 		desc->tbs[i+2].lo = htole32(seg->ds_addr);
   3998 		desc->tbs[i+2].hi_n_len = \
   3999 		    htole16(iwm_get_dma_hi_addr(seg->ds_addr))
   4000 		    | ((seg->ds_len) << 4);
   4001 	}
   4002 
   4003 	bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
   4004 	    BUS_DMASYNC_PREWRITE);
   4005 	bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
   4006 	    (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
   4007 	    sizeof (*cmd), BUS_DMASYNC_PREWRITE);
   4008 	bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
   4009 	    (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
   4010 	    sizeof (*desc), BUS_DMASYNC_PREWRITE);
   4011 
   4012 #if 0
   4013 	iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
   4014 #endif
   4015 
   4016 	/* Kick TX ring. */
   4017 	ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
   4018 	IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
   4019 
   4020 	/* Mark TX ring as full if we reach a certain threshold. */
   4021 	if (++ring->queued > IWM_TX_RING_HIMARK) {
   4022 		sc->qfullmsk |= 1 << ring->qid;
   4023 	}
   4024 
   4025 	return 0;
   4026 }
   4027 
   4028 #if 0
   4029 /* not necessary? */
   4030 int
   4031 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
   4032 {
   4033 	struct iwm_tx_path_flush_cmd flush_cmd = {
   4034 		.queues_ctl = htole32(tfd_msk),
   4035 		.flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
   4036 	};
   4037 	int ret;
   4038 
   4039 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
   4040 	    sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
   4041 	    sizeof(flush_cmd), &flush_cmd);
   4042 	if (ret)
   4043 		aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
   4044 		    ret);
   4045 	return ret;
   4046 }
   4047 #endif
   4048 
   4049 
   4050 /*
   4051  * BEGIN mvm/power.c
   4052  */
   4053 
   4054 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC    25
   4055 
   4056 int
   4057 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
   4058 	struct iwm_beacon_filter_cmd *cmd)
   4059 {
   4060 	int ret;
   4061 
   4062 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
   4063 	    IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
   4064 
   4065 	if (!ret) {
   4066 		DPRINTF(("ba_enable_beacon_abort is: %d\n",
   4067 		    le32toh(cmd->ba_enable_beacon_abort)));
   4068 		DPRINTF(("ba_escape_timer is: %d\n",
   4069 		    le32toh(cmd->ba_escape_timer)));
   4070 		DPRINTF(("bf_debug_flag is: %d\n",
   4071 		    le32toh(cmd->bf_debug_flag)));
   4072 		DPRINTF(("bf_enable_beacon_filter is: %d\n",
   4073 		    le32toh(cmd->bf_enable_beacon_filter)));
   4074 		DPRINTF(("bf_energy_delta is: %d\n",
   4075 		    le32toh(cmd->bf_energy_delta)));
   4076 		DPRINTF(("bf_escape_timer is: %d\n",
   4077 		    le32toh(cmd->bf_escape_timer)));
   4078 		DPRINTF(("bf_roaming_energy_delta is: %d\n",
   4079 		    le32toh(cmd->bf_roaming_energy_delta)));
   4080 		DPRINTF(("bf_roaming_state is: %d\n",
   4081 		    le32toh(cmd->bf_roaming_state)));
   4082 		DPRINTF(("bf_temp_threshold is: %d\n",
   4083 		    le32toh(cmd->bf_temp_threshold)));
   4084 		DPRINTF(("bf_temp_fast_filter is: %d\n",
   4085 		    le32toh(cmd->bf_temp_fast_filter)));
   4086 		DPRINTF(("bf_temp_slow_filter is: %d\n",
   4087 		    le32toh(cmd->bf_temp_slow_filter)));
   4088 	}
   4089 	return ret;
   4090 }
   4091 
   4092 void
   4093 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
   4094 	struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
   4095 {
   4096 	cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
   4097 }
   4098 
   4099 int
   4100 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
   4101 	int enable)
   4102 {
   4103 	struct iwm_beacon_filter_cmd cmd = {
   4104 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4105 		.bf_enable_beacon_filter = htole32(1),
   4106 		.ba_enable_beacon_abort = htole32(enable),
   4107 	};
   4108 
   4109 	if (!sc->sc_bf.bf_enabled)
   4110 		return 0;
   4111 
   4112 	sc->sc_bf.ba_enabled = enable;
   4113 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4114 	return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4115 }
   4116 
   4117 void
   4118 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
   4119 {
   4120 	DPRINTF(("Sending power table command on mac id 0x%X for "
   4121 	    "power level %d, flags = 0x%X\n",
   4122 	    cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
   4123 	DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
   4124 
   4125 	if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
   4126 		DPRINTF(("Disable power management\n"));
   4127 		return;
   4128 	}
   4129 	KASSERT(0);
   4130 
   4131 #if 0
   4132 	DPRINTF(mvm, "Rx timeout = %u usec\n",
   4133 			le32_to_cpu(cmd->rx_data_timeout));
   4134 	DPRINTF(mvm, "Tx timeout = %u usec\n",
   4135 			le32_to_cpu(cmd->tx_data_timeout));
   4136 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
   4137 		DPRINTF(mvm, "DTIM periods to skip = %u\n",
   4138 				cmd->skip_dtim_periods);
   4139 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
   4140 		DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
   4141 				cmd->lprx_rssi_threshold);
   4142 	if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
   4143 		DPRINTF(mvm, "uAPSD enabled\n");
   4144 		DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
   4145 				le32_to_cpu(cmd->rx_data_timeout_uapsd));
   4146 		DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
   4147 				le32_to_cpu(cmd->tx_data_timeout_uapsd));
   4148 		DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
   4149 		DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
   4150 		DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
   4151 	}
   4152 #endif
   4153 }
   4154 
   4155 void
   4156 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
   4157 	struct iwm_mac_power_cmd *cmd)
   4158 {
   4159 	struct ieee80211com *ic = &sc->sc_ic;
   4160 	struct ieee80211_node *ni = &in->in_ni;
   4161 	int dtimper, dtimper_msec;
   4162 	int keep_alive;
   4163 
   4164 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4165 	    in->in_color));
   4166 	dtimper = ic->ic_dtim_period ?: 1;
   4167 
   4168 	/*
   4169 	 * Regardless of power management state the driver must set
   4170 	 * keep alive period. FW will use it for sending keep alive NDPs
   4171 	 * immediately after association. Check that keep alive period
   4172 	 * is at least 3 * DTIM
   4173 	 */
   4174 	dtimper_msec = dtimper * ni->ni_intval;
   4175 	keep_alive
   4176 	    = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
   4177 	keep_alive = roundup(keep_alive, 1000) / 1000;
   4178 	cmd->keep_alive_seconds = htole16(keep_alive);
   4179 }
   4180 
   4181 int
   4182 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
   4183 {
   4184 	int ret;
   4185 	int ba_enable;
   4186 	struct iwm_mac_power_cmd cmd;
   4187 
   4188 	memset(&cmd, 0, sizeof(cmd));
   4189 
   4190 	iwm_mvm_power_build_cmd(sc, in, &cmd);
   4191 	iwm_mvm_power_log(sc, &cmd);
   4192 
   4193 	if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
   4194 	    IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
   4195 		return ret;
   4196 
   4197 	ba_enable = !!(cmd.flags &
   4198 	    htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
   4199 	return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
   4200 }
   4201 
   4202 int
   4203 iwm_mvm_power_update_device(struct iwm_softc *sc)
   4204 {
   4205 	struct iwm_device_power_cmd cmd = {
   4206 		.flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
   4207 	};
   4208 
   4209 	if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
   4210 		return 0;
   4211 
   4212 	cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
   4213 	DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
   4214 
   4215 	return iwm_mvm_send_cmd_pdu(sc,
   4216 	    IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   4217 }
   4218 
   4219 int
   4220 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4221 {
   4222 	struct iwm_beacon_filter_cmd cmd = {
   4223 		IWM_BF_CMD_CONFIG_DEFAULTS,
   4224 		.bf_enable_beacon_filter = htole32(1),
   4225 	};
   4226 	int ret;
   4227 
   4228 	iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
   4229 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4230 
   4231 	if (ret == 0)
   4232 		sc->sc_bf.bf_enabled = 1;
   4233 
   4234 	return ret;
   4235 }
   4236 
   4237 int
   4238 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4239 {
   4240 	struct iwm_beacon_filter_cmd cmd;
   4241 	int ret;
   4242 
   4243 	memset(&cmd, 0, sizeof(cmd));
   4244 	if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
   4245 		return 0;
   4246 
   4247 	ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
   4248 	if (ret == 0)
   4249 		sc->sc_bf.bf_enabled = 0;
   4250 
   4251 	return ret;
   4252 }
   4253 
   4254 #if 0
   4255 int
   4256 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
   4257 {
   4258 	if (!sc->sc_bf.bf_enabled)
   4259 		return 0;
   4260 
   4261 	return iwm_mvm_enable_beacon_filter(sc, in);
   4262 }
   4263 #endif
   4264 
   4265 /*
   4266  * END mvm/power.c
   4267  */
   4268 
   4269 /*
   4270  * BEGIN mvm/sta.c
   4271  */
   4272 
   4273 void
   4274 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
   4275 	struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
   4276 {
   4277 	memset(cmd_v5, 0, sizeof(*cmd_v5));
   4278 
   4279 	cmd_v5->add_modify = cmd_v6->add_modify;
   4280 	cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
   4281 	cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
   4282 	memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
   4283 	cmd_v5->sta_id = cmd_v6->sta_id;
   4284 	cmd_v5->modify_mask = cmd_v6->modify_mask;
   4285 	cmd_v5->station_flags = cmd_v6->station_flags;
   4286 	cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
   4287 	cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
   4288 	cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
   4289 	cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
   4290 	cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
   4291 	cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
   4292 	cmd_v5->assoc_id = cmd_v6->assoc_id;
   4293 	cmd_v5->beamform_flags = cmd_v6->beamform_flags;
   4294 	cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
   4295 }
   4296 
   4297 int
   4298 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
   4299 	struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
   4300 {
   4301 	struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
   4302 
   4303 	if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
   4304 		return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
   4305 		    sizeof(*cmd), cmd, status);
   4306 	}
   4307 
   4308 	iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
   4309 
   4310 	return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
   4311 	    &cmd_v5, status);
   4312 }
   4313 
   4314 /* send station add/update command to firmware */
   4315 int
   4316 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
   4317 {
   4318 	struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
   4319 	int ret;
   4320 	uint32_t status;
   4321 
   4322 	memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
   4323 
   4324 	add_sta_cmd.sta_id = IWM_STATION_ID;
   4325 	add_sta_cmd.mac_id_n_color
   4326 	    = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
   4327 	if (!update) {
   4328 		add_sta_cmd.tfd_queue_msk = htole32(0xf);
   4329 		IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
   4330 	}
   4331 	add_sta_cmd.add_modify = update ? 1 : 0;
   4332 	add_sta_cmd.station_flags_msk
   4333 	    |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
   4334 
   4335 	status = IWM_ADD_STA_SUCCESS;
   4336 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
   4337 	if (ret)
   4338 		return ret;
   4339 
   4340 	switch (status) {
   4341 	case IWM_ADD_STA_SUCCESS:
   4342 		break;
   4343 	default:
   4344 		ret = EIO;
   4345 		DPRINTF(("IWM_ADD_STA failed\n"));
   4346 		break;
   4347 	}
   4348 
   4349 	return ret;
   4350 }
   4351 
   4352 int
   4353 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
   4354 {
   4355 	int ret;
   4356 
   4357 	ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
   4358 	if (ret)
   4359 		return ret;
   4360 
   4361 	return 0;
   4362 }
   4363 
   4364 int
   4365 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
   4366 {
   4367 	return iwm_mvm_sta_send_to_fw(sc, in, 1);
   4368 }
   4369 
   4370 int
   4371 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
   4372 	const uint8_t *addr, uint16_t mac_id, uint16_t color)
   4373 {
   4374 	struct iwm_mvm_add_sta_cmd_v6 cmd;
   4375 	int ret;
   4376 	uint32_t status;
   4377 
   4378 	memset(&cmd, 0, sizeof(cmd));
   4379 	cmd.sta_id = sta->sta_id;
   4380 	cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
   4381 
   4382 	cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
   4383 
   4384 	if (addr)
   4385 		memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
   4386 
   4387 	ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
   4388 	if (ret)
   4389 		return ret;
   4390 
   4391 	switch (status) {
   4392 	case IWM_ADD_STA_SUCCESS:
   4393 		DPRINTF(("Internal station added.\n"));
   4394 		return 0;
   4395 	default:
   4396 		DPRINTF(("%s: Add internal station failed, status=0x%x\n",
   4397 		    DEVNAME(sc), status));
   4398 		ret = EIO;
   4399 		break;
   4400 	}
   4401 	return ret;
   4402 }
   4403 
   4404 int
   4405 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
   4406 {
   4407 	int ret;
   4408 
   4409 	sc->sc_aux_sta.sta_id = 3;
   4410 	sc->sc_aux_sta.tfd_queue_msk = 0;
   4411 
   4412 	ret = iwm_mvm_add_int_sta_common(sc,
   4413 	    &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
   4414 
   4415 	if (ret)
   4416 		memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
   4417 	return ret;
   4418 }
   4419 
   4420 /*
   4421  * END mvm/sta.c
   4422  */
   4423 
   4424 /*
   4425  * BEGIN mvm/scan.c
   4426  */
   4427 
   4428 #define IWM_PLCP_QUIET_THRESH 1
   4429 #define IWM_ACTIVE_QUIET_TIME 10
   4430 #define LONG_OUT_TIME_PERIOD 600
   4431 #define SHORT_OUT_TIME_PERIOD 200
   4432 #define SUSPEND_TIME_PERIOD 100
   4433 
   4434 uint16_t
   4435 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
   4436 {
   4437 	uint16_t rx_chain;
   4438 	uint8_t rx_ant;
   4439 
   4440 	rx_ant = IWM_FW_VALID_RX_ANT(sc);
   4441 	rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
   4442 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
   4443 	rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
   4444 	rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
   4445 	return htole16(rx_chain);
   4446 }
   4447 
   4448 #define ieee80211_tu_to_usec(a) (1024*(a))
   4449 
   4450 uint32_t
   4451 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
   4452 {
   4453 	if (!is_assoc)
   4454 		return 0;
   4455 	if (flags & 0x1)
   4456 		return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
   4457 	return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
   4458 }
   4459 
   4460 uint32_t
   4461 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
   4462 {
   4463 	if (!is_assoc)
   4464 		return 0;
   4465 	return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
   4466 }
   4467 
   4468 uint32_t
   4469 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
   4470 {
   4471 	if (flags & IEEE80211_CHAN_2GHZ)
   4472 		return htole32(IWM_PHY_BAND_24);
   4473 	else
   4474 		return htole32(IWM_PHY_BAND_5);
   4475 }
   4476 
   4477 uint32_t
   4478 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
   4479 {
   4480 	uint32_t tx_ant;
   4481 	int i, ind;
   4482 
   4483 	for (i = 0, ind = sc->sc_scan_last_antenna;
   4484 	    i < IWM_RATE_MCS_ANT_NUM; i++) {
   4485 		ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
   4486 		if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
   4487 			sc->sc_scan_last_antenna = ind;
   4488 			break;
   4489 		}
   4490 	}
   4491 	tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
   4492 
   4493 	if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
   4494 		return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
   4495 				   tx_ant);
   4496 	else
   4497 		return htole32(IWM_RATE_6M_PLCP | tx_ant);
   4498 }
   4499 
   4500 /*
   4501  * If req->n_ssids > 0, it means we should do an active scan.
   4502  * In case of active scan w/o directed scan, we receive a zero-length SSID
   4503  * just to notify that this scan is active and not passive.
   4504  * In order to notify the FW of the number of SSIDs we wish to scan (including
   4505  * the zero-length one), we need to set the corresponding bits in chan->type,
   4506  * one for each SSID, and set the active bit (first). If the first SSID is
   4507  * already included in the probe template, so we need to set only
   4508  * req->n_ssids - 1 bits in addition to the first bit.
   4509  */
   4510 uint16_t
   4511 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
   4512 {
   4513 	if (flags & IEEE80211_CHAN_2GHZ)
   4514 		return 30  + 3 * (n_ssids + 1);
   4515 	return 20  + 2 * (n_ssids + 1);
   4516 }
   4517 
   4518 uint16_t
   4519 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
   4520 {
   4521 	return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
   4522 }
   4523 
   4524 int
   4525 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
   4526 	int flags, int n_ssids, int basic_ssid)
   4527 {
   4528 	struct ieee80211com *ic = &sc->sc_ic;
   4529 	uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
   4530 	uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
   4531 	struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
   4532 		(cmd->data + le16toh(cmd->tx_cmd.len));
   4533 	int type = (1 << n_ssids) - 1;
   4534 	struct ieee80211_channel *c;
   4535 	int nchan;
   4536 
   4537 	if (!basic_ssid)
   4538 		type |= (1 << n_ssids);
   4539 
   4540 	for (nchan = 0, c = &ic->ic_channels[1];
   4541 	    c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
   4542 	    c++) {
   4543 		if ((c->ic_flags & flags) != flags)
   4544 			continue;
   4545 
   4546 		chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
   4547 		chan->type = htole32(type);
   4548 		if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
   4549 			chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
   4550 		chan->active_dwell = htole16(active_dwell);
   4551 		chan->passive_dwell = htole16(passive_dwell);
   4552 		chan->iteration_count = htole16(1);
   4553 		chan++;
   4554 		nchan++;
   4555 	}
   4556 	if (nchan == 0)
   4557 		DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
   4558 	return nchan;
   4559 }
   4560 
   4561 /*
   4562  * Fill in probe request with the following parameters:
   4563  * TA is our vif HW address, which mac80211 ensures we have.
   4564  * Packet is broadcasted, so this is both SA and DA.
   4565  * The probe request IE is made out of two: first comes the most prioritized
   4566  * SSID if a directed scan is requested. Second comes whatever extra
   4567  * information was given to us as the scan request IE.
   4568  */
   4569 uint16_t
   4570 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
   4571 	const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
   4572 	const uint8_t *ie, int ie_len, int left)
   4573 {
   4574 	int len = 0;
   4575 	uint8_t *pos = NULL;
   4576 
   4577 	/* Make sure there is enough space for the probe request,
   4578 	 * two mandatory IEs and the data */
   4579 	left -= sizeof(*frame);
   4580 	if (left < 0)
   4581 		return 0;
   4582 
   4583 	frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
   4584 	    IEEE80211_FC0_SUBTYPE_PROBE_REQ;
   4585 	frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
   4586 	IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
   4587 	memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
   4588 	IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
   4589 
   4590 	len += sizeof(*frame);
   4591 	CTASSERT(sizeof(*frame) == 24);
   4592 
   4593 	/* for passive scans, no need to fill anything */
   4594 	if (n_ssids == 0)
   4595 		return (uint16_t)len;
   4596 
   4597 	/* points to the payload of the request */
   4598 	pos = (uint8_t *)frame + sizeof(*frame);
   4599 
   4600 	/* fill in our SSID IE */
   4601 	left -= ssid_len + 2;
   4602 	if (left < 0)
   4603 		return 0;
   4604 	*pos++ = IEEE80211_ELEMID_SSID;
   4605 	*pos++ = ssid_len;
   4606 	if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
   4607 		memcpy(pos, ssid, ssid_len);
   4608 		pos += ssid_len;
   4609 	}
   4610 
   4611 	len += ssid_len + 2;
   4612 
   4613 	if (left < ie_len)
   4614 		return len;
   4615 
   4616 	if (ie && ie_len) {
   4617 		memcpy(pos, ie, ie_len);
   4618 		len += ie_len;
   4619 	}
   4620 
   4621 	return (uint16_t)len;
   4622 }
   4623 
   4624 int
   4625 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
   4626 	int n_ssids, uint8_t *ssid, int ssid_len)
   4627 {
   4628 	struct ieee80211com *ic = &sc->sc_ic;
   4629 	struct iwm_host_cmd hcmd = {
   4630 		.id = IWM_SCAN_REQUEST_CMD,
   4631 		.len = { 0, },
   4632 		.data = { sc->sc_scan_cmd, },
   4633 		.flags = IWM_CMD_SYNC,
   4634 		.dataflags = { IWM_HCMD_DFL_NOCOPY, },
   4635 	};
   4636 	struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
   4637 	int is_assoc = 0;
   4638 	int ret;
   4639 	uint32_t status;
   4640 	int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
   4641 
   4642 	//lockdep_assert_held(&mvm->mutex);
   4643 
   4644 	sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
   4645 
   4646 	DPRINTF(("Handling ieee80211 scan request\n"));
   4647 	memset(cmd, 0, sc->sc_scan_cmd_len);
   4648 
   4649 	cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
   4650 	cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
   4651 	cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
   4652 	cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
   4653 	cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
   4654 	cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
   4655 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
   4656 	    IWM_MAC_FILTER_IN_BEACON);
   4657 
   4658 	cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
   4659 	cmd->repeats = htole32(1);
   4660 
   4661 	/*
   4662 	 * If the user asked for passive scan, don't change to active scan if
   4663 	 * you see any activity on the channel - remain passive.
   4664 	 */
   4665 	if (n_ssids > 0) {
   4666 		cmd->passive2active = htole16(1);
   4667 		cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4668 #if 0
   4669 		if (basic_ssid) {
   4670 			ssid = req->ssids[0].ssid;
   4671 			ssid_len = req->ssids[0].ssid_len;
   4672 		}
   4673 #endif
   4674 	} else {
   4675 		cmd->passive2active = 0;
   4676 		cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
   4677 	}
   4678 
   4679 	cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
   4680 	    IWM_TX_CMD_FLG_BT_DIS);
   4681 	cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
   4682 	cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
   4683 	cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
   4684 
   4685 	cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
   4686 			    (struct ieee80211_frame *)cmd->data,
   4687 			    ic->ic_myaddr, n_ssids, ssid, ssid_len,
   4688 			    NULL, 0, sc->sc_capa_max_probe_len));
   4689 
   4690 	cmd->channel_count
   4691 	    = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
   4692 
   4693 	cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
   4694 		le16toh(cmd->tx_cmd.len) +
   4695 		(cmd->channel_count * sizeof(struct iwm_scan_channel)));
   4696 	hcmd.len[0] = le16toh(cmd->len);
   4697 
   4698 	status = IWM_SCAN_RESPONSE_OK;
   4699 	ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
   4700 	if (!ret && status == IWM_SCAN_RESPONSE_OK) {
   4701 		DPRINTF(("Scan request was sent successfully\n"));
   4702 	} else {
   4703 		/*
   4704 		 * If the scan failed, it usually means that the FW was unable
   4705 		 * to allocate the time events. Warn on it, but maybe we
   4706 		 * should try to send the command again with different params.
   4707 		 */
   4708 		sc->sc_scanband = 0;
   4709 		ret = EIO;
   4710 	}
   4711 	return ret;
   4712 }
   4713 
   4714 /*
   4715  * END mvm/scan.c
   4716  */
   4717 
   4718 /*
   4719  * BEGIN mvm/mac-ctxt.c
   4720  */
   4721 
   4722 void
   4723 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
   4724 	int *cck_rates, int *ofdm_rates)
   4725 {
   4726 	int lowest_present_ofdm = 100;
   4727 	int lowest_present_cck = 100;
   4728 	uint8_t cck = 0;
   4729 	uint8_t ofdm = 0;
   4730 	int i;
   4731 
   4732 	for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
   4733 		cck |= (1 << i);
   4734 		if (lowest_present_cck > i)
   4735 			lowest_present_cck = i;
   4736 	}
   4737 	for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
   4738 		int adj = i - IWM_FIRST_OFDM_RATE;
   4739 		ofdm |= (1 << adj);
   4740 		if (lowest_present_cck > adj)
   4741 			lowest_present_cck = adj;
   4742 	}
   4743 
   4744 	/*
   4745 	 * Now we've got the basic rates as bitmaps in the ofdm and cck
   4746 	 * variables. This isn't sufficient though, as there might not
   4747 	 * be all the right rates in the bitmap. E.g. if the only basic
   4748 	 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
   4749 	 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
   4750 	 *
   4751 	 *    [...] a STA responding to a received frame shall transmit
   4752 	 *    its Control Response frame [...] at the highest rate in the
   4753 	 *    BSSBasicRateSet parameter that is less than or equal to the
   4754 	 *    rate of the immediately previous frame in the frame exchange
   4755 	 *    sequence ([...]) and that is of the same modulation class
   4756 	 *    ([...]) as the received frame. If no rate contained in the
   4757 	 *    BSSBasicRateSet parameter meets these conditions, then the
   4758 	 *    control frame sent in response to a received frame shall be
   4759 	 *    transmitted at the highest mandatory rate of the PHY that is
   4760 	 *    less than or equal to the rate of the received frame, and
   4761 	 *    that is of the same modulation class as the received frame.
   4762 	 *
   4763 	 * As a consequence, we need to add all mandatory rates that are
   4764 	 * lower than all of the basic rates to these bitmaps.
   4765 	 */
   4766 
   4767 	if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
   4768 		ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
   4769 	if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
   4770 		ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
   4771 	/* 6M already there or needed so always add */
   4772 	ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
   4773 
   4774 	/*
   4775 	 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
   4776 	 * Note, however:
   4777 	 *  - if no CCK rates are basic, it must be ERP since there must
   4778 	 *    be some basic rates at all, so they're OFDM => ERP PHY
   4779 	 *    (or we're in 5 GHz, and the cck bitmap will never be used)
   4780 	 *  - if 11M is a basic rate, it must be ERP as well, so add 5.5M
   4781 	 *  - if 5.5M is basic, 1M and 2M are mandatory
   4782 	 *  - if 2M is basic, 1M is mandatory
   4783 	 *  - if 1M is basic, that's the only valid ACK rate.
   4784 	 * As a consequence, it's not as complicated as it sounds, just add
   4785 	 * any lower rates to the ACK rate bitmap.
   4786 	 */
   4787 	if (IWM_RATE_11M_INDEX < lowest_present_cck)
   4788 		cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
   4789 	if (IWM_RATE_5M_INDEX < lowest_present_cck)
   4790 		cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
   4791 	if (IWM_RATE_2M_INDEX < lowest_present_cck)
   4792 		cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
   4793 	/* 1M already there or needed so always add */
   4794 	cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
   4795 
   4796 	*cck_rates = cck;
   4797 	*ofdm_rates = ofdm;
   4798 }
   4799 
   4800 void
   4801 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
   4802 	struct iwm_mac_ctx_cmd *cmd, uint32_t action)
   4803 {
   4804 	struct ieee80211com *ic = &sc->sc_ic;
   4805 	struct ieee80211_node *ni = ic->ic_bss;
   4806 	int cck_ack_rates, ofdm_ack_rates;
   4807 	int i;
   4808 
   4809 	cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4810 	    in->in_color));
   4811 	cmd->action = htole32(action);
   4812 
   4813 	cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
   4814 	cmd->tsf_id = htole32(in->in_tsfid);
   4815 
   4816 	IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
   4817 	if (in->in_assoc) {
   4818 		IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
   4819 	} else {
   4820 		memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
   4821 	}
   4822 	iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
   4823 	cmd->cck_rates = htole32(cck_ack_rates);
   4824 	cmd->ofdm_rates = htole32(ofdm_ack_rates);
   4825 
   4826 	cmd->cck_short_preamble
   4827 	    = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
   4828 	      ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
   4829 	cmd->short_slot
   4830 	    = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
   4831 	      ? IWM_MAC_FLG_SHORT_SLOT : 0);
   4832 
   4833 	for (i = 0; i < IWM_AC_NUM+1; i++) {
   4834 		int txf = i;
   4835 
   4836 		cmd->ac[txf].cw_min = htole16(0x0f);
   4837 		cmd->ac[txf].cw_max = htole16(0x3f);
   4838 		cmd->ac[txf].aifsn = 1;
   4839 		cmd->ac[txf].fifos_mask = (1 << txf);
   4840 		cmd->ac[txf].edca_txop = 0;
   4841 	}
   4842 
   4843 	cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
   4844 	cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
   4845 
   4846 	cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
   4847 }
   4848 
   4849 int
   4850 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
   4851 {
   4852 	int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
   4853 				       sizeof(*cmd), cmd);
   4854 	if (ret)
   4855 		DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
   4856 		    DEVNAME(sc), le32toh(cmd->action), ret));
   4857 	return ret;
   4858 }
   4859 
   4860 /*
   4861  * Fill the specific data for mac context of type station or p2p client
   4862  */
   4863 void
   4864 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
   4865 	struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
   4866 {
   4867 	struct ieee80211_node *ni = &in->in_ni;
   4868 	unsigned dtim_period, dtim_count;
   4869 
   4870 	dtim_period = ni->ni_dtim_period;
   4871 	dtim_count = ni->ni_dtim_count;
   4872 
   4873 	/* We need the dtim_period to set the MAC as associated */
   4874 	if (in->in_assoc && dtim_period && !force_assoc_off) {
   4875 		uint64_t tsf;
   4876 		uint32_t dtim_offs;
   4877 
   4878 		/*
   4879 		 * The DTIM count counts down, so when it is N that means N
   4880 		 * more beacon intervals happen until the DTIM TBTT. Therefore
   4881 		 * add this to the current time. If that ends up being in the
   4882 		 * future, the firmware will handle it.
   4883 		 *
   4884 		 * Also note that the system_timestamp (which we get here as
   4885 		 * "sync_device_ts") and TSF timestamp aren't at exactly the
   4886 		 * same offset in the frame -- the TSF is at the first symbol
   4887 		 * of the TSF, the system timestamp is at signal acquisition
   4888 		 * time. This means there's an offset between them of at most
   4889 		 * a few hundred microseconds (24 * 8 bits + PLCP time gives
   4890 		 * 384us in the longest case), this is currently not relevant
   4891 		 * as the firmware wakes up around 2ms before the TBTT.
   4892 		 */
   4893 		dtim_offs = dtim_count * ni->ni_intval;
   4894 		/* convert TU to usecs */
   4895 		dtim_offs *= 1024;
   4896 
   4897 		tsf = ni->ni_tstamp.tsf;
   4898 
   4899 		ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
   4900 		ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
   4901 
   4902 		DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
   4903 		    (long long)le64toh(ctxt_sta->dtim_tsf),
   4904 		    le32toh(ctxt_sta->dtim_time), dtim_offs));
   4905 
   4906 		ctxt_sta->is_assoc = htole32(1);
   4907 	} else {
   4908 		ctxt_sta->is_assoc = htole32(0);
   4909 	}
   4910 
   4911 	ctxt_sta->bi = htole32(ni->ni_intval);
   4912 	ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
   4913 	ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
   4914 	ctxt_sta->dtim_reciprocal =
   4915 	    htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
   4916 
   4917 	/* 10 = CONN_MAX_LISTEN_INTERVAL */
   4918 	ctxt_sta->listen_interval = htole32(10);
   4919 	ctxt_sta->assoc_id = htole32(ni->ni_associd);
   4920 }
   4921 
   4922 int
   4923 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
   4924 	uint32_t action)
   4925 {
   4926 	struct iwm_mac_ctx_cmd cmd;
   4927 
   4928 	memset(&cmd, 0, sizeof(cmd));
   4929 
   4930 	/* Fill the common data for all mac context types */
   4931 	iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
   4932 
   4933 	if (in->in_assoc)
   4934 		cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
   4935 	else
   4936 		cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
   4937 
   4938 	/* Fill the data specific for station mode */
   4939 	iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
   4940 	    &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
   4941 
   4942 	return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
   4943 }
   4944 
   4945 int
   4946 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
   4947 {
   4948 	return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
   4949 }
   4950 
   4951 int
   4952 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
   4953 {
   4954 	int ret;
   4955 
   4956 	ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
   4957 	if (ret)
   4958 		return ret;
   4959 
   4960 	return 0;
   4961 }
   4962 
   4963 int
   4964 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
   4965 {
   4966 	return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
   4967 }
   4968 
   4969 #if 0
   4970 int
   4971 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
   4972 {
   4973 	struct iwm_mac_ctx_cmd cmd;
   4974 	int ret;
   4975 
   4976 	if (!in->in_uploaded) {
   4977 		print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
   4978 		return EIO;
   4979 	}
   4980 
   4981 	memset(&cmd, 0, sizeof(cmd));
   4982 
   4983 	cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
   4984 	    in->in_color));
   4985 	cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
   4986 
   4987 	ret = iwm_mvm_send_cmd_pdu(sc,
   4988 	    IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
   4989 	if (ret) {
   4990 		aprint_error_dev(sc->sc_dev,
   4991 		    "Failed to remove MAC context: %d\n", ret);
   4992 		return ret;
   4993 	}
   4994 	in->in_uploaded = 0;
   4995 
   4996 	return 0;
   4997 }
   4998 #endif
   4999 
   5000 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
   5001 
   5002 static void
   5003 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
   5004 	struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
   5005 {
   5006 	struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
   5007 
   5008 	DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
   5009 	    le32toh(mb->mac_id),
   5010 	    le32toh(mb->consec_missed_beacons),
   5011 	    le32toh(mb->consec_missed_beacons_since_last_rx),
   5012 	    le32toh(mb->num_recvd_beacons),
   5013 	    le32toh(mb->num_expected_beacons)));
   5014 
   5015 	/*
   5016 	 * TODO: the threshold should be adjusted based on latency conditions,
   5017 	 * and/or in case of a CS flow on one of the other AP vifs.
   5018 	 */
   5019 	if (le32toh(mb->consec_missed_beacons_since_last_rx) >
   5020 	    IWM_MVM_MISSED_BEACONS_THRESHOLD)
   5021 		ieee80211_beacon_miss(&sc->sc_ic);
   5022 }
   5023 
   5024 /*
   5025  * END mvm/mac-ctxt.c
   5026  */
   5027 
   5028 /*
   5029  * BEGIN mvm/quota.c
   5030  */
   5031 
   5032 int
   5033 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
   5034 {
   5035 	struct iwm_time_quota_cmd cmd;
   5036 	int i, idx, ret, num_active_macs, quota, quota_rem;
   5037 	int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
   5038 	int n_ifs[IWM_MAX_BINDINGS] = {0, };
   5039 	uint16_t id;
   5040 
   5041 	memset(&cmd, 0, sizeof(cmd));
   5042 
   5043 	/* currently, PHY ID == binding ID */
   5044 	if (in) {
   5045 		id = in->in_phyctxt->id;
   5046 		KASSERT(id < IWM_MAX_BINDINGS);
   5047 		colors[id] = in->in_phyctxt->color;
   5048 
   5049 		if (1)
   5050 			n_ifs[id] = 1;
   5051 	}
   5052 
   5053 	/*
   5054 	 * The FW's scheduling session consists of
   5055 	 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
   5056 	 * equally between all the bindings that require quota
   5057 	 */
   5058 	num_active_macs = 0;
   5059 	for (i = 0; i < IWM_MAX_BINDINGS; i++) {
   5060 		cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
   5061 		num_active_macs += n_ifs[i];
   5062 	}
   5063 
   5064 	quota = 0;
   5065 	quota_rem = 0;
   5066 	if (num_active_macs) {
   5067 		quota = IWM_MVM_MAX_QUOTA / num_active_macs;
   5068 		quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
   5069 	}
   5070 
   5071 	for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
   5072 		if (colors[i] < 0)
   5073 			continue;
   5074 
   5075 		cmd.quotas[idx].id_and_color =
   5076 			htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
   5077 
   5078 		if (n_ifs[i] <= 0) {
   5079 			cmd.quotas[idx].quota = htole32(0);
   5080 			cmd.quotas[idx].max_duration = htole32(0);
   5081 		} else {
   5082 			cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
   5083 			cmd.quotas[idx].max_duration = htole32(0);
   5084 		}
   5085 		idx++;
   5086 	}
   5087 
   5088 	/* Give the remainder of the session to the first binding */
   5089 	cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
   5090 
   5091 	ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
   5092 	    sizeof(cmd), &cmd);
   5093 	if (ret)
   5094 		DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
   5095 	return ret;
   5096 }
   5097 
   5098 /*
   5099  * END mvm/quota.c
   5100  */
   5101 
   5102 /*
   5103  * aieee80211 routines
   5104  */
   5105 
   5106 /*
   5107  * Change to AUTH state in 80211 state machine.  Roughly matches what
   5108  * Linux does in bss_info_changed().
   5109  */
   5110 int
   5111 iwm_auth(struct iwm_softc *sc)
   5112 {
   5113 	struct ieee80211com *ic = &sc->sc_ic;
   5114 	struct iwm_node *in = (void *)ic->ic_bss;
   5115 	uint32_t duration;
   5116 	uint32_t min_duration;
   5117 	int error;
   5118 
   5119 	in->in_assoc = 0;
   5120 	if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
   5121 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5122 		return error;
   5123 	}
   5124 
   5125 	if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
   5126 	    in->in_ni.ni_chan, 1, 1)) != 0) {
   5127 		DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
   5128 		return error;
   5129 	}
   5130 	in->in_phyctxt = &sc->sc_phyctxt[0];
   5131 
   5132 	if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
   5133 		DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
   5134 		return error;
   5135 	}
   5136 
   5137 	if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
   5138 		DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
   5139 		return error;
   5140 	}
   5141 
   5142 	/* a bit superfluous? */
   5143 	while (sc->sc_auth_prot)
   5144 		tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
   5145 	sc->sc_auth_prot = 1;
   5146 
   5147 	duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
   5148 	    200 + in->in_ni.ni_intval);
   5149 	min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
   5150 	    100 + in->in_ni.ni_intval);
   5151 	iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
   5152 
   5153 	while (sc->sc_auth_prot != 2) {
   5154 		/*
   5155 		 * well, meh, but if the kernel is sleeping for half a
   5156 		 * second, we have bigger problems
   5157 		 */
   5158 		if (sc->sc_auth_prot == 0) {
   5159 			DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
   5160 			return ETIMEDOUT;
   5161 		} else if (sc->sc_auth_prot == -1) {
   5162 			DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
   5163 			sc->sc_auth_prot = 0;
   5164 			return EAUTH;
   5165 		}
   5166 		tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
   5167 	}
   5168 
   5169 	return 0;
   5170 }
   5171 
   5172 int
   5173 iwm_assoc(struct iwm_softc *sc)
   5174 {
   5175 	struct ieee80211com *ic = &sc->sc_ic;
   5176 	struct iwm_node *in = (void *)ic->ic_bss;
   5177 	int error;
   5178 
   5179 	if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
   5180 		DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
   5181 		return error;
   5182 	}
   5183 
   5184 	in->in_assoc = 1;
   5185 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5186 		DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
   5187 		return error;
   5188 	}
   5189 
   5190 	return 0;
   5191 }
   5192 
   5193 int
   5194 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
   5195 {
   5196 	/*
   5197 	 * Ok, so *technically* the proper set of calls for going
   5198 	 * from RUN back to SCAN is:
   5199 	 *
   5200 	 * iwm_mvm_power_mac_disable(sc, in);
   5201 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5202 	 * iwm_mvm_rm_sta(sc, in);
   5203 	 * iwm_mvm_update_quotas(sc, NULL);
   5204 	 * iwm_mvm_mac_ctxt_changed(sc, in);
   5205 	 * iwm_mvm_binding_remove_vif(sc, in);
   5206 	 * iwm_mvm_mac_ctxt_remove(sc, in);
   5207 	 *
   5208 	 * However, that freezes the device not matter which permutations
   5209 	 * and modifications are attempted.  Obviously, this driver is missing
   5210 	 * something since it works in the Linux driver, but figuring out what
   5211 	 * is missing is a little more complicated.  Now, since we're going
   5212 	 * back to nothing anyway, we'll just do a complete device reset.
   5213 	 * Up your's, device!
   5214 	 */
   5215 	//iwm_mvm_flush_tx_path(sc, 0xf, 1);
   5216 	iwm_stop_device(sc);
   5217 	iwm_init_hw(sc);
   5218 	if (in)
   5219 		in->in_assoc = 0;
   5220 	return 0;
   5221 
   5222 #if 0
   5223 	int error;
   5224 
   5225 	iwm_mvm_power_mac_disable(sc, in);
   5226 
   5227 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5228 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
   5229 		    error);
   5230 		return error;
   5231 	}
   5232 
   5233 	if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
   5234 		aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
   5235 		return error;
   5236 	}
   5237 	error = iwm_mvm_rm_sta(sc, in);
   5238 	in->in_assoc = 0;
   5239 	iwm_mvm_update_quotas(sc, NULL);
   5240 	if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
   5241 		aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
   5242 		    error);
   5243 		return error;
   5244 	}
   5245 	iwm_mvm_binding_remove_vif(sc, in);
   5246 
   5247 	iwm_mvm_mac_ctxt_remove(sc, in);
   5248 
   5249 	return error;
   5250 #endif
   5251 }
   5252 
   5253 
   5254 static struct ieee80211_node *
   5255 iwm_node_alloc(struct ieee80211_node_table *nt)
   5256 {
   5257 
   5258 	return kmem_zalloc(sizeof (struct iwm_node), KM_NOSLEEP | M_ZERO);
   5259 }
   5260 
   5261 void
   5262 iwm_calib_timeout(void *arg)
   5263 {
   5264 	struct iwm_softc *sc = arg;
   5265 	struct ieee80211com *ic = &sc->sc_ic;
   5266 	int s;
   5267 
   5268 	s = splnet();
   5269 	if (ic->ic_fixed_rate == -1
   5270 	    && ic->ic_opmode == IEEE80211_M_STA
   5271 	    && ic->ic_bss) {
   5272 		struct iwm_node *in = (void *)ic->ic_bss;
   5273 		ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
   5274 	}
   5275 	splx(s);
   5276 
   5277 	callout_schedule(&sc->sc_calib_to, hz/2);
   5278 }
   5279 
   5280 void
   5281 iwm_setrates(struct iwm_node *in)
   5282 {
   5283 	struct ieee80211_node *ni = &in->in_ni;
   5284 	struct ieee80211com *ic = ni->ni_ic;
   5285 	struct iwm_softc *sc = IC2IFP(ic)->if_softc;
   5286 	struct iwm_lq_cmd *lq = &in->in_lq;
   5287 	int nrates = ni->ni_rates.rs_nrates;
   5288 	int i, ridx, tab = 0;
   5289 	int txant = 0;
   5290 
   5291 	if (nrates > __arraycount(lq->rs_table)) {
   5292 		DPRINTF(("%s: node supports %d rates, driver handles only "
   5293 		    "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
   5294 		return;
   5295 	}
   5296 
   5297 	/* first figure out which rates we should support */
   5298 	memset(&in->in_ridx, -1, sizeof(in->in_ridx));
   5299 	for (i = 0; i < nrates; i++) {
   5300 		int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
   5301 
   5302 		/* Map 802.11 rate to HW rate index. */
   5303 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5304 			if (iwm_rates[ridx].rate == rate)
   5305 				break;
   5306 		if (ridx > IWM_RIDX_MAX)
   5307 			DPRINTF(("%s: WARNING: device rate for %d not found!\n",
   5308 			    DEVNAME(sc), rate));
   5309 		else
   5310 			in->in_ridx[i] = ridx;
   5311 	}
   5312 
   5313 	/* then construct a lq_cmd based on those */
   5314 	memset(lq, 0, sizeof(*lq));
   5315 	lq->sta_id = IWM_STATION_ID;
   5316 
   5317 	/*
   5318 	 * are these used? (we don't do SISO or MIMO)
   5319 	 * need to set them to non-zero, though, or we get an error.
   5320 	 */
   5321 	lq->single_stream_ant_msk = 1;
   5322 	lq->dual_stream_ant_msk = 1;
   5323 
   5324 	/*
   5325 	 * Build the actual rate selection table.
   5326 	 * The lowest bits are the rates.  Additionally,
   5327 	 * CCK needs bit 9 to be set.  The rest of the bits
   5328 	 * we add to the table select the tx antenna
   5329 	 * Note that we add the rates in the highest rate first
   5330 	 * (opposite of ni_rates).
   5331 	 */
   5332 	for (i = 0; i < nrates; i++) {
   5333 		int nextant;
   5334 
   5335 		if (txant == 0)
   5336 			txant = IWM_FW_VALID_TX_ANT(sc);
   5337 		nextant = 1<<(ffs(txant)-1);
   5338 		txant &= ~nextant;
   5339 
   5340 		ridx = in->in_ridx[(nrates-1)-i];
   5341 		tab = iwm_rates[ridx].plcp;
   5342 		tab |= nextant << IWM_RATE_MCS_ANT_POS;
   5343 		if (IWM_RIDX_IS_CCK(ridx))
   5344 			tab |= IWM_RATE_MCS_CCK_MSK;
   5345 		DPRINTFN(2, ("station rate %d %x\n", i, tab));
   5346 		lq->rs_table[i] = htole32(tab);
   5347 	}
   5348 	/* then fill the rest with the lowest possible rate */
   5349 	for (i = nrates; i < __arraycount(lq->rs_table); i++) {
   5350 		KASSERT(tab != 0);
   5351 		lq->rs_table[i] = htole32(tab);
   5352 	}
   5353 
   5354 	/* init amrr */
   5355 	ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
   5356 	ni->ni_txrate = nrates-1;
   5357 }
   5358 
   5359 int
   5360 iwm_media_change(struct ifnet *ifp)
   5361 {
   5362 	struct iwm_softc *sc = ifp->if_softc;
   5363 	struct ieee80211com *ic = &sc->sc_ic;
   5364 	uint8_t rate, ridx;
   5365 	int error;
   5366 
   5367 	error = ieee80211_media_change(ifp);
   5368 	if (error != ENETRESET)
   5369 		return error;
   5370 
   5371 	if (ic->ic_fixed_rate != -1) {
   5372 		rate = ic->ic_sup_rates[ic->ic_curmode].
   5373 		    rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
   5374 		/* Map 802.11 rate to HW rate index. */
   5375 		for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
   5376 			if (iwm_rates[ridx].rate == rate)
   5377 				break;
   5378 		sc->sc_fixed_ridx = ridx;
   5379 	}
   5380 
   5381 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5382 	    (IFF_UP | IFF_RUNNING)) {
   5383 		iwm_stop(ifp, 0);
   5384 		error = iwm_init(ifp);
   5385 	}
   5386 	return error;
   5387 }
   5388 
   5389 void
   5390 iwm_newstate_cb(void *wk)
   5391 {
   5392 	struct iwm_newstate_state *iwmns = (void *)wk;
   5393 	struct ieee80211com *ic = iwmns->ns_ic;
   5394 	enum ieee80211_state nstate = iwmns->ns_nstate;
   5395 	int generation = iwmns->ns_generation;
   5396 	struct iwm_node *in;
   5397 	int arg = iwmns->ns_arg;
   5398 	struct ifnet *ifp = IC2IFP(ic);
   5399 	struct iwm_softc *sc = ifp->if_softc;
   5400 	int error;
   5401 
   5402 	kmem_free(iwmns, sizeof(*iwmns));
   5403 
   5404 	DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
   5405 	if (sc->sc_generation != generation) {
   5406 		DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
   5407 		if (nstate == IEEE80211_S_INIT) {
   5408 			DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
   5409 			sc->sc_newstate(ic, nstate, arg);
   5410 		}
   5411 		return;
   5412 	}
   5413 
   5414 	DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
   5415 
   5416 	/* disable beacon filtering if we're hopping out of RUN */
   5417 	if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
   5418 		iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
   5419 
   5420 		if (((in = (void *)ic->ic_bss) != NULL))
   5421 			in->in_assoc = 0;
   5422 		iwm_release(sc, NULL);
   5423 
   5424 		/*
   5425 		 * It's impossible to directly go RUN->SCAN. If we iwm_release()
   5426 		 * above then the card will be completely reinitialized,
   5427 		 * so the driver must do everything necessary to bring the card
   5428 		 * from INIT to SCAN.
   5429 		 *
   5430 		 * Additionally, upon receiving deauth frame from AP,
   5431 		 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
   5432 		 * state. This will also fail with this driver, so bring the FSM
   5433 		 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
   5434 		 */
   5435 		if (nstate == IEEE80211_S_SCAN ||
   5436 		    nstate == IEEE80211_S_AUTH ||
   5437 		    nstate == IEEE80211_S_ASSOC) {
   5438 			DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
   5439 			sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
   5440 			DPRINTF(("Going INIT->SCAN\n"));
   5441 			nstate = IEEE80211_S_SCAN;
   5442 		}
   5443 	}
   5444 
   5445 	switch (nstate) {
   5446 	case IEEE80211_S_INIT:
   5447 		sc->sc_scanband = 0;
   5448 		break;
   5449 
   5450 	case IEEE80211_S_SCAN:
   5451 		if (sc->sc_scanband)
   5452 			break;
   5453 
   5454 		if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
   5455 		    ic->ic_des_esslen != 0,
   5456 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5457                         DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5458 			return;
   5459 		}
   5460 		ic->ic_state = nstate;
   5461 		return;
   5462 
   5463 	case IEEE80211_S_AUTH:
   5464 		if ((error = iwm_auth(sc)) != 0) {
   5465 			DPRINTF(("%s: could not move to auth state: %d\n",
   5466 			    DEVNAME(sc), error));
   5467 			return;
   5468 		}
   5469 
   5470 		break;
   5471 
   5472 	case IEEE80211_S_ASSOC:
   5473 		if ((error = iwm_assoc(sc)) != 0) {
   5474 			DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
   5475 			    error));
   5476 			return;
   5477 		}
   5478 		break;
   5479 
   5480 	case IEEE80211_S_RUN: {
   5481 		struct iwm_host_cmd cmd = {
   5482 			.id = IWM_LQ_CMD,
   5483 			.len = { sizeof(in->in_lq), },
   5484 			.flags = IWM_CMD_SYNC,
   5485 		};
   5486 
   5487 		in = (struct iwm_node *)ic->ic_bss;
   5488 		iwm_mvm_power_mac_update_mode(sc, in);
   5489 		iwm_mvm_enable_beacon_filter(sc, in);
   5490 		iwm_mvm_update_quotas(sc, in);
   5491 		iwm_setrates(in);
   5492 
   5493 		cmd.data[0] = &in->in_lq;
   5494 		if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
   5495 			DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
   5496 		}
   5497 
   5498 		callout_schedule(&sc->sc_calib_to, hz/2);
   5499 
   5500 		break; }
   5501 
   5502 	default:
   5503 		break;
   5504 	}
   5505 
   5506 	sc->sc_newstate(ic, nstate, arg);
   5507 }
   5508 
   5509 int
   5510 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
   5511 {
   5512 	struct iwm_newstate_state *iwmns;
   5513 	struct ifnet *ifp = IC2IFP(ic);
   5514 	struct iwm_softc *sc = ifp->if_softc;
   5515 
   5516 	callout_stop(&sc->sc_calib_to);
   5517 
   5518 	iwmns = kmem_alloc(sizeof(*iwmns), KM_NOSLEEP);
   5519 	if (!iwmns) {
   5520 		DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
   5521 		return ENOMEM;
   5522 	}
   5523 
   5524 	iwmns->ns_ic = ic;
   5525 	iwmns->ns_nstate = nstate;
   5526 	iwmns->ns_arg = arg;
   5527 	iwmns->ns_generation = sc->sc_generation;
   5528 
   5529 	workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
   5530 
   5531 	return 0;
   5532 }
   5533 
   5534 void
   5535 iwm_endscan_cb(void *arg)
   5536 {
   5537 	struct iwm_softc *sc = arg;
   5538 	struct ieee80211com *ic = &sc->sc_ic;
   5539 	int done;
   5540 
   5541 	DPRINTF(("scan ended\n"));
   5542 
   5543 	if (sc->sc_scanband == IEEE80211_CHAN_2GHZ) {
   5544 #ifndef IWM_NO_5GHZ
   5545 		int error;
   5546 		done = 0;
   5547 		if ((error = iwm_mvm_scan_request(sc,
   5548 		    IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
   5549 		    ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
   5550 			DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
   5551 			done = 1;
   5552 		}
   5553 #else
   5554 		done = 1;
   5555 #endif
   5556 	} else {
   5557 		done = 1;
   5558 	}
   5559 
   5560 	if (done) {
   5561 		if (!sc->sc_scanband) {
   5562 			ieee80211_cancel_scan(ic);
   5563 		} else {
   5564 			ieee80211_end_scan(ic);
   5565 		}
   5566 		sc->sc_scanband = 0;
   5567 	}
   5568 }
   5569 
   5570 int
   5571 iwm_init_hw(struct iwm_softc *sc)
   5572 {
   5573 	struct ieee80211com *ic = &sc->sc_ic;
   5574 	int error, i, qid;
   5575 
   5576 	if ((error = iwm_preinit(sc)) != 0)
   5577 		return error;
   5578 
   5579 	if ((error = iwm_start_hw(sc)) != 0)
   5580 		return error;
   5581 
   5582 	if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
   5583 		return error;
   5584 	}
   5585 
   5586 	/*
   5587 	 * should stop and start HW since that INIT
   5588 	 * image just loaded
   5589 	 */
   5590 	iwm_stop_device(sc);
   5591 	if ((error = iwm_start_hw(sc)) != 0) {
   5592 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   5593 		return error;
   5594 	}
   5595 
   5596 	/* omstart, this time with the regular firmware */
   5597 	error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
   5598 	if (error) {
   5599 		aprint_error_dev(sc->sc_dev, "could not load firmware\n");
   5600 		goto error;
   5601 	}
   5602 
   5603         if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
   5604                 goto error;
   5605 
   5606         /* Send phy db control command and then phy db calibration*/
   5607         if ((error = iwm_send_phy_db_data(sc)) != 0)
   5608                 goto error;
   5609 
   5610         if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
   5611                 goto error;
   5612 
   5613 	/* Add auxiliary station for scanning */
   5614 	if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
   5615 		goto error;
   5616 
   5617 	for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
   5618 		/*
   5619 		 * The channel used here isn't relevant as it's
   5620 		 * going to be overwritten in the other flows.
   5621 		 * For now use the first channel we have.
   5622 		 */
   5623 		if ((error = iwm_mvm_phy_ctxt_add(sc,
   5624 		    &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
   5625 			goto error;
   5626 	}
   5627 
   5628         error = iwm_mvm_power_update_device(sc);
   5629         if (error)
   5630                 goto error;
   5631 
   5632 	/* Mark TX rings as active. */
   5633 	for (qid = 0; qid < 4; qid++) {
   5634 		iwm_enable_txq(sc, qid, qid);
   5635 	}
   5636 
   5637 	return 0;
   5638 
   5639  error:
   5640 	iwm_stop_device(sc);
   5641 	return error;
   5642 }
   5643 
   5644 /*
   5645  * ifnet interfaces
   5646  */
   5647 
   5648 int
   5649 iwm_init(struct ifnet *ifp)
   5650 {
   5651 	struct iwm_softc *sc = ifp->if_softc;
   5652 	int error;
   5653 
   5654 	if (sc->sc_flags & IWM_FLAG_HW_INITED) {
   5655 		return 0;
   5656 	}
   5657 	sc->sc_generation++;
   5658 	sc->sc_flags &= ~IWM_FLAG_STOPPED;
   5659 
   5660 	if ((error = iwm_init_hw(sc)) != 0) {
   5661 		iwm_stop(ifp, 1);
   5662 		return error;
   5663 	}
   5664 
   5665 	/*
   5666  	 * Ok, firmware loaded and we are jogging
   5667 	 */
   5668 
   5669 	ifp->if_flags &= ~IFF_OACTIVE;
   5670 	ifp->if_flags |= IFF_RUNNING;
   5671 
   5672 	ieee80211_begin_scan(&sc->sc_ic, 0);
   5673 	sc->sc_flags |= IWM_FLAG_HW_INITED;
   5674 
   5675 	return 0;
   5676 }
   5677 
   5678 /*
   5679  * Dequeue packets from sendq and call send.
   5680  * mostly from iwn
   5681  */
   5682 void
   5683 iwm_start(struct ifnet *ifp)
   5684 {
   5685 	struct iwm_softc *sc = ifp->if_softc;
   5686 	struct ieee80211com *ic = &sc->sc_ic;
   5687 	struct ieee80211_node *ni;
   5688         struct ether_header *eh;
   5689 	struct mbuf *m;
   5690 	int ac;
   5691 
   5692 	if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
   5693 		return;
   5694 
   5695 	for (;;) {
   5696 		/* why isn't this done per-queue? */
   5697 		if (sc->qfullmsk != 0) {
   5698 			ifp->if_flags |= IFF_OACTIVE;
   5699 			break;
   5700 		}
   5701 
   5702 		/* need to send management frames even if we're not RUNning */
   5703 		IF_DEQUEUE(&ic->ic_mgtq, m);
   5704 		if (m) {
   5705 			ni = (void *)m->m_pkthdr.rcvif;
   5706 			ac = 0;
   5707 			goto sendit;
   5708 		}
   5709 		if (ic->ic_state != IEEE80211_S_RUN) {
   5710 			break;
   5711 		}
   5712 
   5713 		IFQ_DEQUEUE(&ifp->if_snd, m);
   5714 		if (!m)
   5715 			break;
   5716                 if (m->m_len < sizeof (*eh) &&
   5717                     (m = m_pullup(m, sizeof (*eh))) == NULL) {
   5718                         ifp->if_oerrors++;
   5719                         continue;
   5720                 }
   5721 		if (ifp->if_bpf != NULL)
   5722 			bpf_mtap(ifp, m);
   5723 
   5724 		eh = mtod(m, struct ether_header *);
   5725 		ni = ieee80211_find_txnode(ic, eh->ether_dhost);
   5726 		if (ni == NULL) {
   5727 			m_freem(m);
   5728 			ifp->if_oerrors++;
   5729 			continue;
   5730 		}
   5731 		/* classify mbuf so we can find which tx ring to use */
   5732 		if (ieee80211_classify(ic, m, ni) != 0) {
   5733 			m_freem(m);
   5734 			ieee80211_free_node(ni);
   5735 			ifp->if_oerrors++;
   5736 			continue;
   5737 		}
   5738 
   5739 		/* No QoS encapsulation for EAPOL frames. */
   5740 		ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
   5741 		    M_WME_GETAC(m) : WME_AC_BE;
   5742 
   5743 		if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
   5744 			ieee80211_free_node(ni);
   5745 			ifp->if_oerrors++;
   5746 			continue;
   5747 		}
   5748 
   5749  sendit:
   5750 		if (ic->ic_rawbpf != NULL)
   5751 			bpf_mtap3(ic->ic_rawbpf, m);
   5752 		if (iwm_tx(sc, m, ni, ac) != 0) {
   5753 			ieee80211_free_node(ni);
   5754 			ifp->if_oerrors++;
   5755 			continue;
   5756 		}
   5757 
   5758 		if (ifp->if_flags & IFF_UP) {
   5759 			sc->sc_tx_timer = 15;
   5760 			ifp->if_timer = 1;
   5761 		}
   5762 	}
   5763 
   5764 	return;
   5765 }
   5766 
   5767 void
   5768 iwm_stop(struct ifnet *ifp, int disable)
   5769 {
   5770 	struct iwm_softc *sc = ifp->if_softc;
   5771 	struct ieee80211com *ic = &sc->sc_ic;
   5772 
   5773 	sc->sc_flags &= ~IWM_FLAG_HW_INITED;
   5774 	sc->sc_flags |= IWM_FLAG_STOPPED;
   5775 	sc->sc_generation++;
   5776 	sc->sc_scanband = 0;
   5777 	sc->sc_auth_prot = 0;
   5778 	ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
   5779 
   5780 	if (ic->ic_state != IEEE80211_S_INIT)
   5781 		ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
   5782 
   5783 	ifp->if_timer = sc->sc_tx_timer = 0;
   5784 	iwm_stop_device(sc);
   5785 }
   5786 
   5787 void
   5788 iwm_watchdog(struct ifnet *ifp)
   5789 {
   5790 	struct iwm_softc *sc = ifp->if_softc;
   5791 
   5792 	ifp->if_timer = 0;
   5793 	if (sc->sc_tx_timer > 0) {
   5794 		if (--sc->sc_tx_timer == 0) {
   5795                         aprint_error_dev(sc->sc_dev, "device timeout\n");
   5796 #ifdef IWM_DEBUG
   5797 			iwm_nic_error(sc);
   5798 #endif
   5799 			ifp->if_flags &= ~IFF_UP;
   5800 			iwm_stop(ifp, 1);
   5801 			ifp->if_oerrors++;
   5802 			return;
   5803 		}
   5804 		ifp->if_timer = 1;
   5805 	}
   5806 
   5807 	ieee80211_watchdog(&sc->sc_ic);
   5808 }
   5809 
   5810 int
   5811 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
   5812 {
   5813 	struct iwm_softc *sc = ifp->if_softc;
   5814 	struct ieee80211com *ic = &sc->sc_ic;
   5815 	const struct sockaddr *sa;
   5816 	int s, error = 0;
   5817 
   5818 	s = splnet();
   5819 
   5820 	switch (cmd) {
   5821 	case SIOCSIFADDR:
   5822 		ifp->if_flags |= IFF_UP;
   5823 		/* FALLTHROUGH */
   5824 	case SIOCSIFFLAGS:
   5825 		if ((error = ifioctl_common(ifp, cmd, data)) != 0)
   5826 			break;
   5827 		if (ifp->if_flags & IFF_UP) {
   5828 			if (!(ifp->if_flags & IFF_RUNNING)) {
   5829 				if ((error = iwm_init(ifp)) != 0)
   5830 					ifp->if_flags &= ~IFF_UP;
   5831 			}
   5832 		} else {
   5833 			if (ifp->if_flags & IFF_RUNNING)
   5834 				iwm_stop(ifp, 1);
   5835 		}
   5836 		break;
   5837 
   5838 	case SIOCADDMULTI:
   5839 	case SIOCDELMULTI:
   5840 		sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
   5841 		error = (cmd == SIOCADDMULTI) ?
   5842 		    ether_addmulti(sa, &sc->sc_ec) :
   5843 		    ether_delmulti(sa, &sc->sc_ec);
   5844 
   5845 		if (error == ENETRESET)
   5846 			error = 0;
   5847 		break;
   5848 
   5849 	default:
   5850 		error = ieee80211_ioctl(ic, cmd, data);
   5851 	}
   5852 
   5853 	if (error == ENETRESET) {
   5854 		error = 0;
   5855 		if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
   5856 		    (IFF_UP | IFF_RUNNING)) {
   5857 			iwm_stop(ifp, 0);
   5858 			error = iwm_init(ifp);
   5859 		}
   5860 	}
   5861 
   5862 	splx(s);
   5863 	return error;
   5864 }
   5865 
   5866 /*
   5867  * The interrupt side of things
   5868  */
   5869 
   5870 /*
   5871  * error dumping routines are from iwlwifi/mvm/utils.c
   5872  */
   5873 
   5874 /*
   5875  * Note: This structure is read from the device with IO accesses,
   5876  * and the reading already does the endian conversion. As it is
   5877  * read with uint32_t-sized accesses, any members with a different size
   5878  * need to be ordered correctly though!
   5879  */
   5880 struct iwm_error_event_table {
   5881 	uint32_t valid;		/* (nonzero) valid, (0) log is empty */
   5882 	uint32_t error_id;		/* type of error */
   5883 	uint32_t pc;			/* program counter */
   5884 	uint32_t blink1;		/* branch link */
   5885 	uint32_t blink2;		/* branch link */
   5886 	uint32_t ilink1;		/* interrupt link */
   5887 	uint32_t ilink2;		/* interrupt link */
   5888 	uint32_t data1;		/* error-specific data */
   5889 	uint32_t data2;		/* error-specific data */
   5890 	uint32_t data3;		/* error-specific data */
   5891 	uint32_t bcon_time;		/* beacon timer */
   5892 	uint32_t tsf_low;		/* network timestamp function timer */
   5893 	uint32_t tsf_hi;		/* network timestamp function timer */
   5894 	uint32_t gp1;		/* GP1 timer register */
   5895 	uint32_t gp2;		/* GP2 timer register */
   5896 	uint32_t gp3;		/* GP3 timer register */
   5897 	uint32_t ucode_ver;		/* uCode version */
   5898 	uint32_t hw_ver;		/* HW Silicon version */
   5899 	uint32_t brd_ver;		/* HW board version */
   5900 	uint32_t log_pc;		/* log program counter */
   5901 	uint32_t frame_ptr;		/* frame pointer */
   5902 	uint32_t stack_ptr;		/* stack pointer */
   5903 	uint32_t hcmd;		/* last host command header */
   5904 	uint32_t isr0;		/* isr status register LMPM_NIC_ISR0:
   5905 				 * rxtx_flag */
   5906 	uint32_t isr1;		/* isr status register LMPM_NIC_ISR1:
   5907 				 * host_flag */
   5908 	uint32_t isr2;		/* isr status register LMPM_NIC_ISR2:
   5909 				 * enc_flag */
   5910 	uint32_t isr3;		/* isr status register LMPM_NIC_ISR3:
   5911 				 * time_flag */
   5912 	uint32_t isr4;		/* isr status register LMPM_NIC_ISR4:
   5913 				 * wico interrupt */
   5914 	uint32_t isr_pref;		/* isr status register LMPM_NIC_PREF_STAT */
   5915 	uint32_t wait_event;		/* wait event() caller address */
   5916 	uint32_t l2p_control;	/* L2pControlField */
   5917 	uint32_t l2p_duration;	/* L2pDurationField */
   5918 	uint32_t l2p_mhvalid;	/* L2pMhValidBits */
   5919 	uint32_t l2p_addr_match;	/* L2pAddrMatchStat */
   5920 	uint32_t lmpm_pmg_sel;	/* indicate which clocks are turned on
   5921 				 * (LMPM_PMG_SEL) */
   5922 	uint32_t u_timestamp;	/* indicate when the date and time of the
   5923 				 * compilation */
   5924 	uint32_t flow_handler;	/* FH read/write pointers, RX credit */
   5925 } __packed;
   5926 
   5927 #define ERROR_START_OFFSET  (1 * sizeof(uint32_t))
   5928 #define ERROR_ELEM_SIZE     (7 * sizeof(uint32_t))
   5929 
   5930 struct {
   5931 	const char *name;
   5932 	uint8_t num;
   5933 } advanced_lookup[] = {
   5934 	{ "NMI_INTERRUPT_WDG", 0x34 },
   5935 	{ "SYSASSERT", 0x35 },
   5936 	{ "UCODE_VERSION_MISMATCH", 0x37 },
   5937 	{ "BAD_COMMAND", 0x38 },
   5938 	{ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
   5939 	{ "FATAL_ERROR", 0x3D },
   5940 	{ "NMI_TRM_HW_ERR", 0x46 },
   5941 	{ "NMI_INTERRUPT_TRM", 0x4C },
   5942 	{ "NMI_INTERRUPT_BREAK_POINT", 0x54 },
   5943 	{ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
   5944 	{ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
   5945 	{ "NMI_INTERRUPT_HOST", 0x66 },
   5946 	{ "NMI_INTERRUPT_ACTION_PT", 0x7C },
   5947 	{ "NMI_INTERRUPT_UNKNOWN", 0x84 },
   5948 	{ "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
   5949 	{ "ADVANCED_SYSASSERT", 0 },
   5950 };
   5951 
   5952 const char *
   5953 iwm_desc_lookup(uint32_t num)
   5954 {
   5955 	int i;
   5956 
   5957 	for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
   5958 		if (advanced_lookup[i].num == num)
   5959 			return advanced_lookup[i].name;
   5960 
   5961 	/* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
   5962 	return advanced_lookup[i].name;
   5963 }
   5964 
   5965 #ifdef IWM_DEBUG
   5966 /*
   5967  * Support for dumping the error log seemed like a good idea ...
   5968  * but it's mostly hex junk and the only sensible thing is the
   5969  * hw/ucode revision (which we know anyway).  Since it's here,
   5970  * I'll just leave it in, just in case e.g. the Intel guys want to
   5971  * help us decipher some "ADVANCED_SYSASSERT" later.
   5972  */
   5973 void
   5974 iwm_nic_error(struct iwm_softc *sc)
   5975 {
   5976 	struct iwm_error_event_table table;
   5977 	uint32_t base;
   5978 
   5979 	aprint_error_dev(sc->sc_dev, "dumping device error log\n");
   5980 	base = sc->sc_uc.uc_error_event_table;
   5981 	if (base < 0x800000 || base >= 0x80C000) {
   5982 		aprint_error_dev(sc->sc_dev,
   5983 		    "Not valid error log pointer 0x%08x\n", base);
   5984 		return;
   5985 	}
   5986 
   5987 	if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
   5988 		aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
   5989 		return;
   5990 	}
   5991 
   5992 	if (!table.valid) {
   5993 		aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
   5994 		return;
   5995 	}
   5996 
   5997 	if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
   5998 		aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
   5999 		aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
   6000 		    sc->sc_flags, table.valid);
   6001 	}
   6002 
   6003 	aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
   6004 		iwm_desc_lookup(table.error_id));
   6005 	aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
   6006 	aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
   6007 	aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
   6008 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
   6009 	aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
   6010 	aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
   6011 	aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
   6012 	aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
   6013 	aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
   6014 	aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
   6015 	aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
   6016 	aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
   6017 	aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
   6018 	aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
   6019 	aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
   6020 	aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
   6021 	aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
   6022 	aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
   6023 	aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
   6024 	aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
   6025 	aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
   6026 	aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
   6027 	aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
   6028 	aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
   6029 	aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
   6030 	aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
   6031 	aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
   6032 	    table.l2p_duration);
   6033 	aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
   6034 	aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
   6035 	    table.l2p_addr_match);
   6036 	aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
   6037 	    table.lmpm_pmg_sel);
   6038 	aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
   6039 	aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
   6040 	    table.flow_handler);
   6041 }
   6042 #endif
   6043 
   6044 #define SYNC_RESP_STRUCT(_var_, _pkt_)					\
   6045 do {									\
   6046 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6047 	    sizeof(*(_var_)), BUS_DMASYNC_POSTREAD);			\
   6048 	_var_ = (void *)((_pkt_)+1);					\
   6049 } while (/*CONSTCOND*/0)
   6050 
   6051 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_)				\
   6052 do {									\
   6053 	bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)),	\
   6054 	    sizeof(len), BUS_DMASYNC_POSTREAD);				\
   6055 	_ptr_ = (void *)((_pkt_)+1);					\
   6056 } while (/*CONSTCOND*/0)
   6057 
   6058 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
   6059 
   6060 /*
   6061  * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
   6062  * Basic structure from if_iwn
   6063  */
   6064 void
   6065 iwm_notif_intr(struct iwm_softc *sc)
   6066 {
   6067 	uint16_t hw;
   6068 
   6069 	bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
   6070 	    0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
   6071 
   6072 	hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
   6073 	while (sc->rxq.cur != hw) {
   6074 		struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
   6075 		struct iwm_rx_packet *pkt;
   6076 		struct iwm_cmd_response *cresp;
   6077 		int qid, idx;
   6078 
   6079 		bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
   6080 		    BUS_DMASYNC_POSTREAD);
   6081 		pkt = mtod(data->m, struct iwm_rx_packet *);
   6082 
   6083 		qid = pkt->hdr.qid & ~0x80;
   6084 		idx = pkt->hdr.idx;
   6085 
   6086 		DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
   6087 		    pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
   6088 		    pkt->hdr.code, sc->rxq.cur, hw));
   6089 
   6090 		/*
   6091 		 * randomly get these from the firmware, no idea why.
   6092 		 * they at least seem harmless, so just ignore them for now
   6093 		 */
   6094 		if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
   6095 		    || pkt->len_n_flags == htole32(0x55550000))) {
   6096 			ADVANCE_RXQ(sc);
   6097 			continue;
   6098 		}
   6099 
   6100 		switch (pkt->hdr.code) {
   6101 		case IWM_REPLY_RX_PHY_CMD:
   6102 			iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
   6103 			break;
   6104 
   6105 		case IWM_REPLY_RX_MPDU_CMD:
   6106 			iwm_mvm_rx_rx_mpdu(sc, pkt, data);
   6107 			break;
   6108 
   6109 		case IWM_TX_CMD:
   6110 			iwm_mvm_rx_tx_cmd(sc, pkt, data);
   6111 			break;
   6112 
   6113 		case IWM_MISSED_BEACONS_NOTIFICATION:
   6114 			iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
   6115 			break;
   6116 
   6117 		case IWM_MVM_ALIVE: {
   6118 			struct iwm_mvm_alive_resp *resp;
   6119 			SYNC_RESP_STRUCT(resp, pkt);
   6120 
   6121 			sc->sc_uc.uc_error_event_table
   6122 			    = le32toh(resp->error_event_table_ptr);
   6123 			sc->sc_uc.uc_log_event_table
   6124 			    = le32toh(resp->log_event_table_ptr);
   6125 			sc->sched_base = le32toh(resp->scd_base_ptr);
   6126 			sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
   6127 
   6128 			sc->sc_uc.uc_intr = 1;
   6129 			wakeup(&sc->sc_uc);
   6130 			break; }
   6131 
   6132 		case IWM_CALIB_RES_NOTIF_PHY_DB: {
   6133 			struct iwm_calib_res_notif_phy_db *phy_db_notif;
   6134 			SYNC_RESP_STRUCT(phy_db_notif, pkt);
   6135 
   6136 			iwm_phy_db_set_section(sc, phy_db_notif);
   6137 
   6138 			break; }
   6139 
   6140 		case IWM_STATISTICS_NOTIFICATION: {
   6141 			struct iwm_notif_statistics *stats;
   6142 			SYNC_RESP_STRUCT(stats, pkt);
   6143 			memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
   6144 			sc->sc_noise = iwm_get_noise(&stats->rx.general);
   6145 			break; }
   6146 
   6147 		case IWM_NVM_ACCESS_CMD:
   6148 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6149 				bus_dmamap_sync(sc->sc_dmat, data->map, 0,
   6150 				    sizeof(sc->sc_cmd_resp),
   6151 				    BUS_DMASYNC_POSTREAD);
   6152 				memcpy(sc->sc_cmd_resp,
   6153 				    pkt, sizeof(sc->sc_cmd_resp));
   6154 			}
   6155 			break;
   6156 
   6157 		case IWM_PHY_CONFIGURATION_CMD:
   6158 		case IWM_TX_ANT_CONFIGURATION_CMD:
   6159 		case IWM_ADD_STA:
   6160 		case IWM_MAC_CONTEXT_CMD:
   6161 		case IWM_REPLY_SF_CFG_CMD:
   6162 		case IWM_POWER_TABLE_CMD:
   6163 		case IWM_PHY_CONTEXT_CMD:
   6164 		case IWM_BINDING_CONTEXT_CMD:
   6165 		case IWM_TIME_EVENT_CMD:
   6166 		case IWM_SCAN_REQUEST_CMD:
   6167 		case IWM_REPLY_BEACON_FILTERING_CMD:
   6168 		case IWM_MAC_PM_POWER_TABLE:
   6169 		case IWM_TIME_QUOTA_CMD:
   6170 		case IWM_REMOVE_STA:
   6171 		case IWM_TXPATH_FLUSH:
   6172 		case IWM_LQ_CMD:
   6173 			SYNC_RESP_STRUCT(cresp, pkt);
   6174 			if (sc->sc_wantresp == ((qid << 16) | idx)) {
   6175 				memcpy(sc->sc_cmd_resp,
   6176 				    pkt, sizeof(*pkt)+sizeof(*cresp));
   6177 			}
   6178 			break;
   6179 
   6180 		/* ignore */
   6181 		case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
   6182 			break;
   6183 
   6184 		case IWM_INIT_COMPLETE_NOTIF:
   6185 			sc->sc_init_complete = 1;
   6186 			wakeup(&sc->sc_init_complete);
   6187 			break;
   6188 
   6189 		case IWM_SCAN_COMPLETE_NOTIFICATION: {
   6190 			struct iwm_scan_complete_notif *notif;
   6191 			SYNC_RESP_STRUCT(notif, pkt);
   6192 
   6193 			workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
   6194 			break; }
   6195 
   6196 		case IWM_REPLY_ERROR: {
   6197 			struct iwm_error_resp *resp;
   6198 			SYNC_RESP_STRUCT(resp, pkt);
   6199 
   6200 			aprint_error_dev(sc->sc_dev,
   6201 			    "firmware error 0x%x, cmd 0x%x\n",
   6202 			    le32toh(resp->error_type), resp->cmd_id);
   6203 			break; }
   6204 
   6205 		case IWM_TIME_EVENT_NOTIFICATION: {
   6206 			struct iwm_time_event_notif *notif;
   6207 			SYNC_RESP_STRUCT(notif, pkt);
   6208 
   6209 			if (notif->status) {
   6210 				if (le32toh(notif->action) &
   6211 				    IWM_TE_V2_NOTIF_HOST_EVENT_START)
   6212 					sc->sc_auth_prot = 2;
   6213 				else
   6214 					sc->sc_auth_prot = 0;
   6215 			} else {
   6216 				sc->sc_auth_prot = -1;
   6217 			}
   6218 			wakeup(&sc->sc_auth_prot);
   6219 			break; }
   6220 
   6221 		default:
   6222 			aprint_error_dev(sc->sc_dev,
   6223 			    "frame %d/%d %x UNHANDLED (this should "
   6224 			    "not happen)\n", qid, idx, pkt->len_n_flags);
   6225 			break;
   6226 		}
   6227 
   6228 		/*
   6229 		 * Why test bit 0x80?  The Linux driver:
   6230 		 *
   6231 		 * There is one exception:  uCode sets bit 15 when it
   6232 		 * originates the response/notification, i.e. when the
   6233 		 * response/notification is not a direct response to a
   6234 		 * command sent by the driver.  For example, uCode issues
   6235 		 * IWM_REPLY_RX when it sends a received frame to the driver;
   6236 		 * it is not a direct response to any driver command.
   6237 		 *
   6238 		 * Ok, so since when is 7 == 15?  Well, the Linux driver
   6239 		 * uses a slightly different format for pkt->hdr, and "qid"
   6240 		 * is actually the upper byte of a two-byte field.
   6241 		 */
   6242 		if (!(pkt->hdr.qid & (1 << 7))) {
   6243 			iwm_cmd_done(sc, pkt);
   6244 		}
   6245 
   6246 		ADVANCE_RXQ(sc);
   6247 	}
   6248 
   6249 	IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
   6250 	    IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
   6251 
   6252 	/*
   6253 	 * Tell the firmware what we have processed.
   6254 	 * Seems like the hardware gets upset unless we align
   6255 	 * the write by 8??
   6256 	 */
   6257 	hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
   6258 	IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
   6259 }
   6260 
   6261 int
   6262 iwm_intr(void *arg)
   6263 {
   6264 	struct iwm_softc *sc = arg;
   6265 	struct ifnet *ifp = IC2IFP(&sc->sc_ic);
   6266 	int handled = 0;
   6267 	int r1, r2, rv = 0;
   6268 	int isperiodic = 0;
   6269 
   6270 	IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
   6271 
   6272 	if (sc->sc_flags & IWM_FLAG_USE_ICT) {
   6273 		uint32_t *ict = sc->ict_dma.vaddr;
   6274 		int tmp;
   6275 
   6276 		tmp = htole32(ict[sc->ict_cur]);
   6277 		if (!tmp)
   6278 			goto out_ena;
   6279 
   6280 		/*
   6281 		 * ok, there was something.  keep plowing until we have all.
   6282 		 */
   6283 		r1 = r2 = 0;
   6284 		while (tmp) {
   6285 			r1 |= tmp;
   6286 			ict[sc->ict_cur] = 0;
   6287 			sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
   6288 			tmp = htole32(ict[sc->ict_cur]);
   6289 		}
   6290 
   6291 		/* this is where the fun begins.  don't ask */
   6292 		if (r1 == 0xffffffff)
   6293 			r1 = 0;
   6294 
   6295 		/* i am not expected to understand this */
   6296 		if (r1 & 0xc0000)
   6297 			r1 |= 0x8000;
   6298 		r1 = (0xff & r1) | ((0xff00 & r1) << 16);
   6299 	} else {
   6300 		r1 = IWM_READ(sc, IWM_CSR_INT);
   6301 		/* "hardware gone" (where, fishing?) */
   6302 		if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
   6303 			goto out;
   6304 		r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
   6305 	}
   6306 	if (r1 == 0 && r2 == 0) {
   6307 		goto out_ena;
   6308 	}
   6309 
   6310 	IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
   6311 
   6312 	/* ignored */
   6313 	handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
   6314 
   6315 	if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
   6316 #ifdef IWM_DEBUG
   6317 		int i;
   6318 
   6319 		iwm_nic_error(sc);
   6320 
   6321 		/* Dump driver status (TX and RX rings) while we're here. */
   6322 		DPRINTF(("driver status:\n"));
   6323 		for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
   6324 			struct iwm_tx_ring *ring = &sc->txq[i];
   6325 			DPRINTF(("  tx ring %2d: qid=%-2d cur=%-3d "
   6326 			    "queued=%-3d\n",
   6327 			    i, ring->qid, ring->cur, ring->queued));
   6328 		}
   6329 		DPRINTF(("  rx ring: cur=%d\n", sc->rxq.cur));
   6330 		DPRINTF(("  802.11 state %d\n", sc->sc_ic.ic_state));
   6331 #endif
   6332 
   6333 		aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
   6334 		ifp->if_flags &= ~IFF_UP;
   6335 		iwm_stop(ifp, 1);
   6336 		rv = 1;
   6337 		goto out;
   6338 
   6339 	}
   6340 
   6341 	if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
   6342 		handled |= IWM_CSR_INT_BIT_HW_ERR;
   6343 		aprint_error_dev(sc->sc_dev,
   6344 		    "hardware error, stopping device\n");
   6345 		ifp->if_flags &= ~IFF_UP;
   6346 		iwm_stop(ifp, 1);
   6347 		rv = 1;
   6348 		goto out;
   6349 	}
   6350 
   6351 	/* firmware chunk loaded */
   6352 	if (r1 & IWM_CSR_INT_BIT_FH_TX) {
   6353 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
   6354 		handled |= IWM_CSR_INT_BIT_FH_TX;
   6355 
   6356 		sc->sc_fw_chunk_done = 1;
   6357 		wakeup(&sc->sc_fw);
   6358 	}
   6359 
   6360 	if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
   6361 		handled |= IWM_CSR_INT_BIT_RF_KILL;
   6362 		if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
   6363 			DPRINTF(("%s: rfkill switch, disabling interface\n",
   6364 			    DEVNAME(sc)));
   6365 			ifp->if_flags &= ~IFF_UP;
   6366 			iwm_stop(ifp, 1);
   6367 		}
   6368 	}
   6369 
   6370 	/*
   6371 	 * The Linux driver uses periodic interrupts to avoid races.
   6372 	 * We cargo-cult like it's going out of fashion.
   6373 	 */
   6374 	if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
   6375 		handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
   6376 		IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
   6377 		if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
   6378 			IWM_WRITE_1(sc,
   6379 			    IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
   6380 		isperiodic = 1;
   6381 	}
   6382 
   6383 	if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
   6384 		handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
   6385 		IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
   6386 
   6387 		iwm_notif_intr(sc);
   6388 
   6389 		/* enable periodic interrupt, see above */
   6390 		if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
   6391 			IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
   6392 			    IWM_CSR_INT_PERIODIC_ENA);
   6393 	}
   6394 
   6395 	if (__predict_false(r1 & ~handled))
   6396 		DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
   6397 	rv = 1;
   6398 
   6399  out_ena:
   6400 	iwm_restore_interrupts(sc);
   6401  out:
   6402 	return rv;
   6403 }
   6404 
   6405 /*
   6406  * Autoconf glue-sniffing
   6407  */
   6408 
   6409 static const pci_product_id_t iwm_devices[] = {
   6410 	0x08b1,
   6411 #if 0
   6412 	PCI_PRODUCT_INTEL_WL_7260_1,
   6413 	PCI_PRODUCT_INTEL_WL_7260_2,
   6414 #endif
   6415 };
   6416 
   6417 static int
   6418 iwm_match(struct device *parent, cfdata_t match __unused, void *aux)
   6419 {
   6420 	struct pci_attach_args *pa = aux;
   6421 	size_t i;
   6422 
   6423 	if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
   6424 		return 0;
   6425 
   6426 	for (i = 0; i < __arraycount(iwm_devices); i++)
   6427 		if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
   6428 			return 1;
   6429 
   6430 	return 0;
   6431 }
   6432 
   6433 int
   6434 iwm_preinit(struct iwm_softc *sc)
   6435 {
   6436 	int error;
   6437 
   6438 	if ((error = iwm_prepare_card_hw(sc)) != 0) {
   6439 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6440 		return error;
   6441 	}
   6442 
   6443 	if (sc->sc_flags & IWM_FLAG_ATTACHED)
   6444 		return 0;
   6445 
   6446 	if ((error = iwm_start_hw(sc)) != 0) {
   6447 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6448 		return error;
   6449 	}
   6450 
   6451 	error = iwm_run_init_mvm_ucode(sc, 1);
   6452 	iwm_stop_device(sc);
   6453 	return error;
   6454 }
   6455 
   6456 void
   6457 iwm_attach_hook(struct device *dev)
   6458 {
   6459 	struct iwm_softc *sc = device_private(dev);
   6460 	struct ieee80211com *ic = &sc->sc_ic;
   6461 	struct ifnet *ifp = &sc->sc_ec.ec_if;
   6462 
   6463 	KASSERT(!cold);
   6464 
   6465 	sc->sc_wantresp = -1;
   6466 
   6467 	if (iwm_preinit(sc) != 0)
   6468 		return;
   6469 
   6470 	sc->sc_flags |= IWM_FLAG_ATTACHED;
   6471 
   6472 	ic->ic_ifp = ifp;
   6473 	aprint_normal_dev(sc->sc_dev,
   6474 	    "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
   6475 	    sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
   6476 	    IWM_UCODE_MAJOR(sc->sc_fwver),
   6477 	    IWM_UCODE_MINOR(sc->sc_fwver),
   6478 	    IWM_UCODE_API(sc->sc_fwver),
   6479 	    ether_sprintf(sc->sc_nvm.hw_addr));
   6480 
   6481 	ic->ic_phytype = IEEE80211_T_OFDM;	/* not only, but not used */
   6482 	ic->ic_opmode = IEEE80211_M_STA;	/* default to BSS mode */
   6483 	ic->ic_state = IEEE80211_S_INIT;
   6484 
   6485 	/* Set device capabilities. */
   6486 	ic->ic_caps =
   6487 	    IEEE80211_C_WEP |		/* WEP */
   6488 	    IEEE80211_C_WPA |		/* 802.11i */
   6489 	    IEEE80211_C_SHSLOT |	/* short slot time supported */
   6490 	    IEEE80211_C_SHPREAMBLE;	/* short preamble supported */
   6491 
   6492 #ifndef IWM_NO_5GHZ
   6493 	ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
   6494 #endif
   6495 	ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
   6496 	ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
   6497 
   6498 	for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
   6499 		sc->sc_phyctxt[i].id = i;
   6500 	}
   6501 
   6502 	sc->sc_amrr.amrr_min_success_threshold =  1;
   6503 	sc->sc_amrr.amrr_max_success_threshold = 15;
   6504 
   6505 	/* IBSS channel undefined for now. */
   6506 	ic->ic_ibss_chan = &ic->ic_channels[1];
   6507 
   6508 #if 0
   6509 	/* Max RSSI */
   6510 	ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
   6511 #endif
   6512 
   6513 	ifp->if_softc = sc;
   6514 	ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
   6515 	ifp->if_init = iwm_init;
   6516 	ifp->if_stop = iwm_stop;
   6517 	ifp->if_ioctl = iwm_ioctl;
   6518 	ifp->if_start = iwm_start;
   6519 	ifp->if_watchdog = iwm_watchdog;
   6520 	IFQ_SET_READY(&ifp->if_snd);
   6521 	memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
   6522 
   6523 	if_attach(ifp);
   6524 	ic->ic_debug = 0;
   6525 	ieee80211_ifattach(ic);
   6526 
   6527 	ic->ic_node_alloc = iwm_node_alloc;
   6528 
   6529 	/* Override 802.11 state transition machine. */
   6530 	sc->sc_newstate = ic->ic_newstate;
   6531 	ic->ic_newstate = iwm_newstate;
   6532 	ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
   6533 	ieee80211_announce(ic);
   6534 
   6535 	iwm_radiotap_attach(sc);
   6536 	callout_init(&sc->sc_calib_to, 0);
   6537 	callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
   6538 	iwm_radiotap_attach(sc);
   6539 
   6540 	//task_set(&sc->init_task, iwm_init_task, sc);
   6541 }
   6542 
   6543 void
   6544 iwm_attach(struct device *parent, struct device *self, void *aux)
   6545 {
   6546 	struct iwm_softc *sc = device_private(self);
   6547 	struct pci_attach_args *pa = aux;
   6548 	pci_intr_handle_t ih;
   6549 	pcireg_t reg, memtype;
   6550 	const char *intrstr;
   6551 	int error;
   6552 	int txq_i;
   6553 
   6554 	sc->sc_dev = self;
   6555 	sc->sc_pct = pa->pa_pc;
   6556 	sc->sc_pcitag = pa->pa_tag;
   6557 	sc->sc_dmat = pa->pa_dmat;
   6558 
   6559 	pci_aprint_devinfo(pa, NULL);
   6560 
   6561 	/*
   6562 	 * Get the offset of the PCI Express Capability Structure in PCI
   6563 	 * Configuration Space.
   6564 	 */
   6565 	error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
   6566 	    PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
   6567 	if (error == 0) {
   6568 		aprint_error_dev(self,
   6569 		    "PCIe capability structure not found!\n");
   6570 		return;
   6571 	}
   6572 
   6573 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6574 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6575 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6576 
   6577 	/* Enable bus-mastering and hardware bug workaround. */
   6578 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
   6579 	reg |= PCI_COMMAND_MASTER_ENABLE;
   6580 	/* if !MSI */
   6581 	if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
   6582 		reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
   6583 	}
   6584 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
   6585 
   6586 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
   6587 	error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
   6588 	    &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
   6589 	if (error != 0) {
   6590 		aprint_error_dev(self, "can't map mem space\n");
   6591 		return;
   6592 	}
   6593 
   6594 	/* Install interrupt handler. */
   6595 	if (pci_intr_map(pa, &ih)) {
   6596 		aprint_error_dev(self, "can't map interrupt\n");
   6597 		return;
   6598 	}
   6599 
   6600 	char intrbuf[PCI_INTRSTR_LEN];
   6601 	intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
   6602 	sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
   6603 	if (sc->sc_ih == NULL) {
   6604 		aprint_error_dev(self, "can't establish interrupt");
   6605 		if (intrstr != NULL)
   6606 			aprint_error(" at %s", intrstr);
   6607 		aprint_error("\n");
   6608 		return;
   6609 	}
   6610 	aprint_normal_dev(self, "interrupting at %s\n", intrstr);
   6611 
   6612 	/* only one firmware possibility for now */
   6613 	sc->sc_fwname = IWM_FWNAME;
   6614 	sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
   6615 
   6616 	/*
   6617 	 * We now start fiddling with the hardware
   6618 	 */
   6619 
   6620 	sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
   6621 	if (iwm_prepare_card_hw(sc) != 0) {
   6622 		aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
   6623 		return;
   6624 	}
   6625 
   6626 	/* Allocate DMA memory for firmware transfers. */
   6627 	if ((error = iwm_alloc_fwmem(sc)) != 0) {
   6628 		aprint_error_dev(sc->sc_dev,
   6629 		    "could not allocate memory for firmware\n");
   6630 		return;
   6631 	}
   6632 
   6633 	/* Allocate "Keep Warm" page. */
   6634 	if ((error = iwm_alloc_kw(sc)) != 0) {
   6635 		aprint_error_dev(sc->sc_dev,
   6636 		    "could not allocate keep warm page\n");
   6637 		goto fail1;
   6638 	}
   6639 
   6640 	/* We use ICT interrupts */
   6641 	if ((error = iwm_alloc_ict(sc)) != 0) {
   6642 		aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
   6643 		goto fail2;
   6644 	}
   6645 
   6646 	/* Allocate TX scheduler "rings". */
   6647 	if ((error = iwm_alloc_sched(sc)) != 0) {
   6648 		aprint_error_dev(sc->sc_dev,
   6649 		    "could not allocate TX scheduler rings\n");
   6650 		goto fail3;
   6651 	}
   6652 
   6653 	/* Allocate TX rings */
   6654 	for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
   6655 		if ((error = iwm_alloc_tx_ring(sc,
   6656 		    &sc->txq[txq_i], txq_i)) != 0) {
   6657 			aprint_error_dev(sc->sc_dev,
   6658 			    "could not allocate TX ring %d\n", txq_i);
   6659 			goto fail4;
   6660 		}
   6661 	}
   6662 
   6663 	/* Allocate RX ring. */
   6664 	if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
   6665 		aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
   6666 		goto fail4;
   6667 	}
   6668 
   6669 	workqueue_create(&sc->sc_eswq, "iwmes",
   6670 	    (void *)iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
   6671 	workqueue_create(&sc->sc_nswq, "iwmns",
   6672 	    (void *)iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
   6673 
   6674 	/* Clear pending interrupts. */
   6675 	IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
   6676 
   6677 	/*
   6678 	 * We can't do normal attach before the file system is mounted
   6679 	 * because we cannot read the MAC address without loading the
   6680 	 * firmware from disk.  So we postpone until mountroot is done.
   6681 	 * Notably, this will require a full driver unload/load cycle
   6682 	 * (or reboot) in case the firmware is not present when the
   6683 	 * hook runs.
   6684 	 */
   6685 	config_mountroot(self, iwm_attach_hook);
   6686 
   6687 	return;
   6688 
   6689 	/* Free allocated memory if something failed during attachment. */
   6690 fail4:	while (--txq_i >= 0)
   6691 		iwm_free_tx_ring(sc, &sc->txq[txq_i]);
   6692 	iwm_free_sched(sc);
   6693 fail3:	if (sc->ict_dma.vaddr != NULL)
   6694 		iwm_free_ict(sc);
   6695 fail2:	iwm_free_kw(sc);
   6696 fail1:	iwm_free_fwmem(sc);
   6697 }
   6698 
   6699 /*
   6700  * Attach the interface to 802.11 radiotap.
   6701  */
   6702 void
   6703 iwm_radiotap_attach(struct iwm_softc *sc)
   6704 {
   6705 	struct ifnet *ifp = sc->sc_ic.ic_ifp;
   6706 
   6707 	bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
   6708 	    sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
   6709 	    &sc->sc_drvbpf);
   6710 
   6711 	sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
   6712 	sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
   6713 	sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
   6714 
   6715 	sc->sc_txtap_len = sizeof sc->sc_txtapu;
   6716 	sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
   6717 	sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
   6718 }
   6719 
   6720 #if 0
   6721 void
   6722 iwm_init_task(void *arg1)
   6723 {
   6724 	struct iwm_softc *sc = arg1;
   6725 	struct ifnet *ifp = &sc->sc_ic.ic_if;
   6726 	int s;
   6727 
   6728 	s = splnet();
   6729 	while (sc->sc_flags & IWM_FLAG_BUSY)
   6730 		tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
   6731 	sc->sc_flags |= IWM_FLAG_BUSY;
   6732 
   6733 	iwm_stop(ifp, 0);
   6734 	if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
   6735 		iwm_init(ifp);
   6736 
   6737 	sc->sc_flags &= ~IWM_FLAG_BUSY;
   6738 	wakeup(&sc->sc_flags);
   6739 	splx(s);
   6740 }
   6741 
   6742 void
   6743 iwm_wakeup(struct iwm_softc *sc)
   6744 {
   6745 	pcireg_t reg;
   6746 
   6747 	/* Clear device-specific "PCI retry timeout" register (41h). */
   6748 	reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
   6749 	pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
   6750 
   6751 	iwm_init_task(sc);
   6752 
   6753 }
   6754 
   6755 int
   6756 iwm_activate(struct device *self, int act)
   6757 {
   6758 	struct iwm_softc *sc = (struct iwm_softc *)self;
   6759 	struct ifnet *ifp = &sc->sc_ic.ic_if;
   6760 
   6761 	switch (act) {
   6762 	case DVACT_SUSPEND:
   6763 		if (ifp->if_flags & IFF_RUNNING)
   6764 			iwm_stop(ifp, 0);
   6765 		break;
   6766 	case DVACT_WAKEUP:
   6767 		iwm_wakeup(sc);
   6768 		break;
   6769 	}
   6770 
   6771 	return 0;
   6772 }
   6773 #endif
   6774 
   6775 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
   6776 	NULL, NULL);
   6777