if_iwm.c revision 1.62 1 /* $NetBSD: if_iwm.c,v 1.62 2017/01/17 08:35:16 nonaka Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp */
3 #define IEEE80211_NO_HT
4 /*
5 * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 * Author: Stefan Sperling <stsp (at) openbsd.org>
7 * Copyright (c) 2014 Fixup Software Ltd.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 /*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 * Copyright(c) 2016 Intel Deutschland GmbH
64 * All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 *
70 * * Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * * Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in
74 * the documentation and/or other materials provided with the
75 * distribution.
76 * * Neither the name Intel Corporation nor the names of its
77 * contributors may be used to endorse or promote products derived
78 * from this software without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93 /*-
94 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
95 *
96 * Permission to use, copy, modify, and distribute this software for any
97 * purpose with or without fee is hereby granted, provided that the above
98 * copyright notice and this permission notice appear in all copies.
99 *
100 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 */
108
109 #include <sys/cdefs.h>
110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.62 2017/01/17 08:35:16 nonaka Exp $");
111
112 #include <sys/param.h>
113 #include <sys/conf.h>
114 #include <sys/kernel.h>
115 #include <sys/kmem.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/sysctl.h>
122 #include <sys/systm.h>
123
124 #include <sys/cpu.h>
125 #include <sys/bus.h>
126 #include <sys/workqueue.h>
127 #include <machine/endian.h>
128 #include <machine/intr.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 #include <dev/firmload.h>
134
135 #include <net/bpf.h>
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_ether.h>
140
141 #include <netinet/in.h>
142 #include <netinet/ip.h>
143
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147
148 #define DEVNAME(_s) device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
150
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 0;
158 #else
159 #define DPRINTF(x) do { ; } while (0)
160 #define DPRINTFN(n, x) do { ; } while (0)
161 #endif
162
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165
166 static const uint8_t iwm_nvm_channels[] = {
167 /* 2.4 GHz */
168 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 /* 5 GHz */
170 36, 40, 44, 48, 52, 56, 60, 64,
171 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 149, 153, 157, 161, 165
173 };
174
175 static const uint8_t iwm_nvm_channels_8000[] = {
176 /* 2.4 GHz */
177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 /* 5 GHz */
179 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
180 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 149, 153, 157, 161, 165, 169, 173, 177, 181
182 };
183
184 #define IWM_NUM_2GHZ_CHANNELS 14
185
186 static const struct iwm_rate {
187 uint8_t rate;
188 uint8_t plcp;
189 uint8_t ht_plcp;
190 } iwm_rates[] = {
191 /* Legacy */ /* HT */
192 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
197 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
199 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
200 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
201 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
202 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
203 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
204 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
205 };
206 #define IWM_RIDX_CCK 0
207 #define IWM_RIDX_OFDM 4
208 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
211
212 #ifndef IEEE80211_NO_HT
213 /* Convert an MCS index into an iwm_rates[] index. */
214 static const int iwm_mcs2ridx[] = {
215 IWM_RATE_MCS_0_INDEX,
216 IWM_RATE_MCS_1_INDEX,
217 IWM_RATE_MCS_2_INDEX,
218 IWM_RATE_MCS_3_INDEX,
219 IWM_RATE_MCS_4_INDEX,
220 IWM_RATE_MCS_5_INDEX,
221 IWM_RATE_MCS_6_INDEX,
222 IWM_RATE_MCS_7_INDEX,
223 };
224 #endif
225
226 struct iwm_nvm_section {
227 uint16_t length;
228 uint8_t *data;
229 };
230
231 struct iwm_newstate_state {
232 struct work ns_wk;
233 enum ieee80211_state ns_nstate;
234 int ns_arg;
235 int ns_generation;
236 };
237
238 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
239 static int iwm_firmware_store_section(struct iwm_softc *,
240 enum iwm_ucode_type, uint8_t *, size_t);
241 static int iwm_set_default_calib(struct iwm_softc *, const void *);
242 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
244 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
245 #ifdef IWM_DEBUG
246 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
247 #endif
248 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
249 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
250 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
251 static int iwm_nic_lock(struct iwm_softc *);
252 static void iwm_nic_unlock(struct iwm_softc *);
253 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
254 uint32_t);
255 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
257 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
258 bus_size_t, bus_size_t);
259 static void iwm_dma_contig_free(struct iwm_dma_info *);
260 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
261 static void iwm_disable_rx_dma(struct iwm_softc *);
262 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
265 int);
266 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
268 static void iwm_enable_rfkill_int(struct iwm_softc *);
269 static int iwm_check_rfkill(struct iwm_softc *);
270 static void iwm_enable_interrupts(struct iwm_softc *);
271 static void iwm_restore_interrupts(struct iwm_softc *);
272 static void iwm_disable_interrupts(struct iwm_softc *);
273 static void iwm_ict_reset(struct iwm_softc *);
274 static int iwm_set_hw_ready(struct iwm_softc *);
275 static int iwm_prepare_card_hw(struct iwm_softc *);
276 static void iwm_apm_config(struct iwm_softc *);
277 static int iwm_apm_init(struct iwm_softc *);
278 static void iwm_apm_stop(struct iwm_softc *);
279 static int iwm_allow_mcast(struct iwm_softc *);
280 static int iwm_start_hw(struct iwm_softc *);
281 static void iwm_stop_device(struct iwm_softc *);
282 static void iwm_nic_config(struct iwm_softc *);
283 static int iwm_nic_rx_init(struct iwm_softc *);
284 static int iwm_nic_tx_init(struct iwm_softc *);
285 static int iwm_nic_init(struct iwm_softc *);
286 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
287 static int iwm_post_alive(struct iwm_softc *);
288 static struct iwm_phy_db_entry *
289 iwm_phy_db_get_section(struct iwm_softc *,
290 enum iwm_phy_db_section_type, uint16_t);
291 static int iwm_phy_db_set_section(struct iwm_softc *,
292 struct iwm_calib_res_notif_phy_db *, uint16_t);
293 static int iwm_is_valid_channel(uint16_t);
294 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
295 static uint16_t iwm_channel_id_to_papd(uint16_t);
296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
297 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
298 uint8_t **, uint16_t *, uint16_t);
299 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
300 void *);
301 static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
302 enum iwm_phy_db_section_type, uint8_t);
303 static int iwm_send_phy_db_data(struct iwm_softc *);
304 static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
305 struct iwm_time_event_cmd_v1 *);
306 static int iwm_send_time_event_cmd(struct iwm_softc *,
307 const struct iwm_time_event_cmd_v2 *);
308 static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
309 uint32_t, uint32_t);
310 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
311 uint16_t, uint8_t *, uint16_t *);
312 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
313 uint16_t *, size_t);
314 static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
315 const uint8_t *, size_t);
316 #ifndef IEEE80211_NO_HT
317 static void iwm_setup_ht_rates(struct iwm_softc *);
318 static void iwm_htprot_task(void *);
319 static void iwm_update_htprot(struct ieee80211com *,
320 struct ieee80211_node *);
321 static int iwm_ampdu_rx_start(struct ieee80211com *,
322 struct ieee80211_node *, uint8_t);
323 static void iwm_ampdu_rx_stop(struct ieee80211com *,
324 struct ieee80211_node *, uint8_t);
325 static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
326 uint8_t, uint16_t, int);
327 #ifdef notyet
328 static int iwm_ampdu_tx_start(struct ieee80211com *,
329 struct ieee80211_node *, uint8_t);
330 static void iwm_ampdu_tx_stop(struct ieee80211com *,
331 struct ieee80211_node *, uint8_t);
332 #endif
333 static void iwm_ba_task(void *);
334 #endif
335
336 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
337 const uint16_t *, const uint16_t *, const uint16_t *,
338 const uint16_t *, const uint16_t *);
339 static void iwm_set_hw_address_8000(struct iwm_softc *,
340 struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
341 static int iwm_parse_nvm_sections(struct iwm_softc *,
342 struct iwm_nvm_section *);
343 static int iwm_nvm_init(struct iwm_softc *);
344 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
345 const uint8_t *, uint32_t);
346 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
347 const uint8_t *, uint32_t);
348 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
349 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
350 struct iwm_fw_sects *, int , int *);
351 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
352 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
353 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
354 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
355 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
356 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
357 enum iwm_ucode_type);
358 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
359 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
360 static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
361 static int iwm_get_signal_strength(struct iwm_softc *,
362 struct iwm_rx_phy_info *);
363 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
364 struct iwm_rx_packet *, struct iwm_rx_data *);
365 static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
366 static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
367 struct iwm_rx_data *);
368 static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
369 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
370 struct iwm_rx_data *);
371 static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
372 uint32_t);
373 #if 0
374 static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
375 static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
376 #endif
377 static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
380 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
381 uint8_t, uint8_t);
382 static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
383 uint8_t, uint8_t, uint32_t, uint32_t);
384 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
385 static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
386 uint16_t, const void *);
387 static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
388 uint32_t *);
389 static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
390 const void *, uint32_t *);
391 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
392 static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
393 #if 0
394 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
395 uint16_t);
396 #endif
397 static const struct iwm_rate *
398 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
399 struct ieee80211_frame *, struct iwm_tx_cmd *);
400 static int iwm_tx(struct iwm_softc *, struct mbuf *,
401 struct ieee80211_node *, int);
402 static void iwm_led_enable(struct iwm_softc *);
403 static void iwm_led_disable(struct iwm_softc *);
404 static int iwm_led_is_enabled(struct iwm_softc *);
405 static void iwm_led_blink_timeout(void *);
406 static void iwm_led_blink_start(struct iwm_softc *);
407 static void iwm_led_blink_stop(struct iwm_softc *);
408 static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
409 struct iwm_beacon_filter_cmd *);
410 static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
411 struct iwm_node *, struct iwm_beacon_filter_cmd *);
412 static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
413 int);
414 static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
415 struct iwm_mac_power_cmd *);
416 static int iwm_power_mac_update_mode(struct iwm_softc *,
417 struct iwm_node *);
418 static int iwm_power_update_device(struct iwm_softc *);
419 #ifdef notyet
420 static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
421 #endif
422 static int iwm_disable_beacon_filter(struct iwm_softc *);
423 static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 static int iwm_add_aux_sta(struct iwm_softc *);
425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
427 #ifdef notyet
428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
430 #endif
431 static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
432 struct iwm_scan_channel_cfg_lmac *, int);
433 static int iwm_fill_probe_req(struct iwm_softc *,
434 struct iwm_scan_probe_req *);
435 static int iwm_lmac_scan(struct iwm_softc *);
436 static int iwm_config_umac_scan(struct iwm_softc *);
437 static int iwm_umac_scan(struct iwm_softc *);
438 static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
439 static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
440 int *);
441 static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
442 struct iwm_mac_ctx_cmd *, uint32_t, int);
443 static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
444 struct iwm_mac_data_sta *, int);
445 static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
446 uint32_t, int);
447 static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
448 static int iwm_auth(struct iwm_softc *);
449 static int iwm_assoc(struct iwm_softc *);
450 static void iwm_calib_timeout(void *);
451 #ifndef IEEE80211_NO_HT
452 static void iwm_setrates_task(void *);
453 static int iwm_setrates(struct iwm_node *);
454 #endif
455 static int iwm_media_change(struct ifnet *);
456 static void iwm_newstate_cb(struct work *, void *);
457 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
458 static void iwm_endscan(struct iwm_softc *);
459 static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
460 struct ieee80211_node *);
461 static int iwm_sf_config(struct iwm_softc *, int);
462 static int iwm_send_bt_init_conf(struct iwm_softc *);
463 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
464 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
465 static int iwm_init_hw(struct iwm_softc *);
466 static int iwm_init(struct ifnet *);
467 static void iwm_start(struct ifnet *);
468 static void iwm_stop(struct ifnet *, int);
469 static void iwm_watchdog(struct ifnet *);
470 static int iwm_ioctl(struct ifnet *, u_long, void *);
471 #ifdef IWM_DEBUG
472 static const char *iwm_desc_lookup(uint32_t);
473 static void iwm_nic_error(struct iwm_softc *);
474 static void iwm_nic_umac_error(struct iwm_softc *);
475 #endif
476 static void iwm_notif_intr(struct iwm_softc *);
477 static int iwm_intr(void *);
478 static void iwm_softintr(void *);
479 static int iwm_preinit(struct iwm_softc *);
480 static void iwm_attach_hook(device_t);
481 static void iwm_attach(device_t, device_t, void *);
482 #if 0
483 static void iwm_init_task(void *);
484 static int iwm_activate(device_t, enum devact);
485 static void iwm_wakeup(struct iwm_softc *);
486 #endif
487 static void iwm_radiotap_attach(struct iwm_softc *);
488 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
489
490 static int iwm_sysctl_root_num;
491 static int iwm_lar_disable;
492
493 #ifndef IWM_DEFAULT_MCC
494 #define IWM_DEFAULT_MCC "ZZ"
495 #endif
496 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
497
498 static int
499 iwm_firmload(struct iwm_softc *sc)
500 {
501 struct iwm_fw_info *fw = &sc->sc_fw;
502 firmware_handle_t fwh;
503 int err;
504
505 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
506 return 0;
507
508 /* Open firmware image. */
509 err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
510 if (err) {
511 aprint_error_dev(sc->sc_dev,
512 "could not get firmware handle %s\n", sc->sc_fwname);
513 return err;
514 }
515
516 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
517 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
518 fw->fw_rawdata = NULL;
519 }
520
521 fw->fw_rawsize = firmware_get_size(fwh);
522 /*
523 * Well, this is how the Linux driver checks it ....
524 */
525 if (fw->fw_rawsize < sizeof(uint32_t)) {
526 aprint_error_dev(sc->sc_dev,
527 "firmware too short: %zd bytes\n", fw->fw_rawsize);
528 err = EINVAL;
529 goto out;
530 }
531
532 /* Read the firmware. */
533 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
534 if (fw->fw_rawdata == NULL) {
535 aprint_error_dev(sc->sc_dev,
536 "not enough memory to stock firmware %s\n", sc->sc_fwname);
537 err = ENOMEM;
538 goto out;
539 }
540 err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
541 if (err) {
542 aprint_error_dev(sc->sc_dev,
543 "could not read firmware %s\n", sc->sc_fwname);
544 goto out;
545 }
546
547 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
548 out:
549 /* caller will release memory, if necessary */
550
551 firmware_close(fwh);
552 return err;
553 }
554
555 /*
556 * just maintaining status quo.
557 */
558 static void
559 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
560 {
561 struct ieee80211com *ic = &sc->sc_ic;
562 struct ieee80211_frame *wh;
563 uint8_t subtype;
564
565 wh = mtod(m, struct ieee80211_frame *);
566
567 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
568 return;
569
570 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
571
572 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
573 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
574 return;
575
576 int chan = le32toh(sc->sc_last_phy_info.channel);
577 if (chan < __arraycount(ic->ic_channels))
578 ic->ic_curchan = &ic->ic_channels[chan];
579 }
580
581 static int
582 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
583 {
584 struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
585
586 if (dlen < sizeof(*l) ||
587 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
588 return EINVAL;
589
590 /* we don't actually store anything for now, always use s/w crypto */
591
592 return 0;
593 }
594
595 static int
596 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
597 uint8_t *data, size_t dlen)
598 {
599 struct iwm_fw_sects *fws;
600 struct iwm_fw_onesect *fwone;
601
602 if (type >= IWM_UCODE_TYPE_MAX)
603 return EINVAL;
604 if (dlen < sizeof(uint32_t))
605 return EINVAL;
606
607 fws = &sc->sc_fw.fw_sects[type];
608 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
609 return EINVAL;
610
611 fwone = &fws->fw_sect[fws->fw_count];
612
613 /* first 32bit are device load offset */
614 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
615
616 /* rest is data */
617 fwone->fws_data = data + sizeof(uint32_t);
618 fwone->fws_len = dlen - sizeof(uint32_t);
619
620 /* for freeing the buffer during driver unload */
621 fwone->fws_alloc = data;
622 fwone->fws_allocsize = dlen;
623
624 fws->fw_count++;
625 fws->fw_totlen += fwone->fws_len;
626
627 return 0;
628 }
629
630 struct iwm_tlv_calib_data {
631 uint32_t ucode_type;
632 struct iwm_tlv_calib_ctrl calib;
633 } __packed;
634
635 static int
636 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
637 {
638 const struct iwm_tlv_calib_data *def_calib = data;
639 uint32_t ucode_type = le32toh(def_calib->ucode_type);
640
641 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
642 DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
643 DEVNAME(sc), ucode_type));
644 return EINVAL;
645 }
646
647 sc->sc_default_calib[ucode_type].flow_trigger =
648 def_calib->calib.flow_trigger;
649 sc->sc_default_calib[ucode_type].event_trigger =
650 def_calib->calib.event_trigger;
651
652 return 0;
653 }
654
655 static int
656 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
657 {
658 struct iwm_fw_info *fw = &sc->sc_fw;
659 struct iwm_tlv_ucode_header *uhdr;
660 struct iwm_ucode_tlv tlv;
661 enum iwm_ucode_tlv_type tlv_type;
662 uint8_t *data;
663 int err, status;
664 size_t len;
665
666 if (ucode_type != IWM_UCODE_TYPE_INIT &&
667 fw->fw_status == IWM_FW_STATUS_DONE)
668 return 0;
669
670 if (fw->fw_status == IWM_FW_STATUS_NONE) {
671 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
672 } else {
673 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
674 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
675 }
676 status = fw->fw_status;
677
678 if (status == IWM_FW_STATUS_DONE)
679 return 0;
680
681 err = iwm_firmload(sc);
682 if (err) {
683 aprint_error_dev(sc->sc_dev,
684 "could not read firmware %s (error %d)\n",
685 sc->sc_fwname, err);
686 goto out;
687 }
688
689 sc->sc_capaflags = 0;
690 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
691 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
692 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
693
694 uhdr = (void *)fw->fw_rawdata;
695 if (*(uint32_t *)fw->fw_rawdata != 0
696 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
697 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
698 sc->sc_fwname);
699 err = EINVAL;
700 goto out;
701 }
702
703 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
704 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
705 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
706 IWM_UCODE_API(le32toh(uhdr->ver)));
707 data = uhdr->data;
708 len = fw->fw_rawsize - sizeof(*uhdr);
709
710 while (len >= sizeof(tlv)) {
711 size_t tlv_len;
712 void *tlv_data;
713
714 memcpy(&tlv, data, sizeof(tlv));
715 tlv_len = le32toh(tlv.length);
716 tlv_type = le32toh(tlv.type);
717
718 len -= sizeof(tlv);
719 data += sizeof(tlv);
720 tlv_data = data;
721
722 if (len < tlv_len) {
723 aprint_error_dev(sc->sc_dev,
724 "firmware too short: %zu bytes\n", len);
725 err = EINVAL;
726 goto parse_out;
727 }
728
729 switch (tlv_type) {
730 case IWM_UCODE_TLV_PROBE_MAX_LEN:
731 if (tlv_len < sizeof(uint32_t)) {
732 err = EINVAL;
733 goto parse_out;
734 }
735 sc->sc_capa_max_probe_len
736 = le32toh(*(uint32_t *)tlv_data);
737 /* limit it to something sensible */
738 if (sc->sc_capa_max_probe_len >
739 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
740 err = EINVAL;
741 goto parse_out;
742 }
743 break;
744 case IWM_UCODE_TLV_PAN:
745 if (tlv_len) {
746 err = EINVAL;
747 goto parse_out;
748 }
749 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
750 break;
751 case IWM_UCODE_TLV_FLAGS:
752 if (tlv_len < sizeof(uint32_t)) {
753 err = EINVAL;
754 goto parse_out;
755 }
756 /*
757 * Apparently there can be many flags, but Linux driver
758 * parses only the first one, and so do we.
759 *
760 * XXX: why does this override IWM_UCODE_TLV_PAN?
761 * Intentional or a bug? Observations from
762 * current firmware file:
763 * 1) TLV_PAN is parsed first
764 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
765 * ==> this resets TLV_PAN to itself... hnnnk
766 */
767 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
768 break;
769 case IWM_UCODE_TLV_CSCHEME:
770 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
771 if (err)
772 goto parse_out;
773 break;
774 case IWM_UCODE_TLV_NUM_OF_CPU: {
775 uint32_t num_cpu;
776 if (tlv_len != sizeof(uint32_t)) {
777 err = EINVAL;
778 goto parse_out;
779 }
780 num_cpu = le32toh(*(uint32_t *)tlv_data);
781 if (num_cpu < 1 || num_cpu > 2) {
782 err = EINVAL;
783 goto parse_out;
784 }
785 break;
786 }
787 case IWM_UCODE_TLV_SEC_RT:
788 err = iwm_firmware_store_section(sc,
789 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
790 if (err)
791 goto parse_out;
792 break;
793 case IWM_UCODE_TLV_SEC_INIT:
794 err = iwm_firmware_store_section(sc,
795 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
796 if (err)
797 goto parse_out;
798 break;
799 case IWM_UCODE_TLV_SEC_WOWLAN:
800 err = iwm_firmware_store_section(sc,
801 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
802 if (err)
803 goto parse_out;
804 break;
805 case IWM_UCODE_TLV_DEF_CALIB:
806 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
807 err = EINVAL;
808 goto parse_out;
809 }
810 err = iwm_set_default_calib(sc, tlv_data);
811 if (err)
812 goto parse_out;
813 break;
814 case IWM_UCODE_TLV_PHY_SKU:
815 if (tlv_len != sizeof(uint32_t)) {
816 err = EINVAL;
817 goto parse_out;
818 }
819 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
820 break;
821
822 case IWM_UCODE_TLV_API_CHANGES_SET: {
823 struct iwm_ucode_api *api;
824 if (tlv_len != sizeof(*api)) {
825 err = EINVAL;
826 goto parse_out;
827 }
828 api = (struct iwm_ucode_api *)tlv_data;
829 /* Flags may exceed 32 bits in future firmware. */
830 if (le32toh(api->api_index) > 0) {
831 err = EINVAL;
832 goto parse_out;
833 }
834 sc->sc_ucode_api = le32toh(api->api_flags);
835 break;
836 }
837
838 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
839 struct iwm_ucode_capa *capa;
840 int idx, i;
841 if (tlv_len != sizeof(*capa)) {
842 err = EINVAL;
843 goto parse_out;
844 }
845 capa = (struct iwm_ucode_capa *)tlv_data;
846 idx = le32toh(capa->api_index);
847 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
848 err = EINVAL;
849 goto parse_out;
850 }
851 for (i = 0; i < 32; i++) {
852 if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
853 continue;
854 setbit(sc->sc_enabled_capa, i + (32 * idx));
855 }
856 break;
857 }
858
859 case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
860 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
861 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
862 /* ignore, not used by current driver */
863 break;
864
865 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
866 err = iwm_firmware_store_section(sc,
867 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
868 tlv_len);
869 if (err)
870 goto parse_out;
871 break;
872
873 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
874 if (tlv_len != sizeof(uint32_t)) {
875 err = EINVAL;
876 goto parse_out;
877 }
878 sc->sc_capa_n_scan_channels =
879 le32toh(*(uint32_t *)tlv_data);
880 break;
881
882 case IWM_UCODE_TLV_FW_VERSION:
883 if (tlv_len != sizeof(uint32_t) * 3) {
884 err = EINVAL;
885 goto parse_out;
886 }
887 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
888 "%d.%d.%d",
889 le32toh(((uint32_t *)tlv_data)[0]),
890 le32toh(((uint32_t *)tlv_data)[1]),
891 le32toh(((uint32_t *)tlv_data)[2]));
892 break;
893
894 default:
895 DPRINTF(("%s: unknown firmware section %d, abort\n",
896 DEVNAME(sc), tlv_type));
897 err = EINVAL;
898 goto parse_out;
899 }
900
901 len -= roundup(tlv_len, 4);
902 data += roundup(tlv_len, 4);
903 }
904
905 KASSERT(err == 0);
906
907 parse_out:
908 if (err) {
909 aprint_error_dev(sc->sc_dev,
910 "firmware parse error, section type %d\n", tlv_type);
911 }
912
913 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
914 aprint_error_dev(sc->sc_dev,
915 "device uses unsupported power ops\n");
916 err = ENOTSUP;
917 }
918
919 out:
920 if (err)
921 fw->fw_status = IWM_FW_STATUS_NONE;
922 else
923 fw->fw_status = IWM_FW_STATUS_DONE;
924 wakeup(&sc->sc_fw);
925
926 if (err && fw->fw_rawdata != NULL) {
927 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
928 fw->fw_rawdata = NULL;
929 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
930 /* don't touch fw->fw_status */
931 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
932 }
933 return err;
934 }
935
936 static uint32_t
937 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
938 {
939 IWM_WRITE(sc,
940 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
941 IWM_BARRIER_READ_WRITE(sc);
942 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
943 }
944
945 static void
946 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
947 {
948 IWM_WRITE(sc,
949 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
950 IWM_BARRIER_WRITE(sc);
951 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
952 }
953
954 #ifdef IWM_DEBUG
955 static int
956 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
957 {
958 int offs;
959 uint32_t *vals = buf;
960
961 if (iwm_nic_lock(sc)) {
962 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
963 for (offs = 0; offs < dwords; offs++)
964 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
965 iwm_nic_unlock(sc);
966 return 0;
967 }
968 return EBUSY;
969 }
970 #endif
971
972 static int
973 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
974 {
975 int offs;
976 const uint32_t *vals = buf;
977
978 if (iwm_nic_lock(sc)) {
979 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
980 /* WADDR auto-increments */
981 for (offs = 0; offs < dwords; offs++) {
982 uint32_t val = vals ? vals[offs] : 0;
983 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
984 }
985 iwm_nic_unlock(sc);
986 return 0;
987 }
988 return EBUSY;
989 }
990
991 static int
992 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
993 {
994 return iwm_write_mem(sc, addr, &val, 1);
995 }
996
997 static int
998 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
999 int timo)
1000 {
1001 for (;;) {
1002 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1003 return 1;
1004 }
1005 if (timo < 10) {
1006 return 0;
1007 }
1008 timo -= 10;
1009 DELAY(10);
1010 }
1011 }
1012
1013 static int
1014 iwm_nic_lock(struct iwm_softc *sc)
1015 {
1016 int rv = 0;
1017
1018 if (sc->sc_cmd_hold_nic_awake)
1019 return 1;
1020
1021 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1022 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1023
1024 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1025 DELAY(2);
1026
1027 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1028 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1029 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1030 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1031 rv = 1;
1032 } else {
1033 aprint_error_dev(sc->sc_dev, "resetting device via NMI\n");
1034 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1035 }
1036
1037 return rv;
1038 }
1039
1040 static void
1041 iwm_nic_unlock(struct iwm_softc *sc)
1042 {
1043
1044 if (sc->sc_cmd_hold_nic_awake)
1045 return;
1046
1047 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1048 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1049 }
1050
1051 static void
1052 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1053 uint32_t mask)
1054 {
1055 uint32_t val;
1056
1057 /* XXX: no error path? */
1058 if (iwm_nic_lock(sc)) {
1059 val = iwm_read_prph(sc, reg) & mask;
1060 val |= bits;
1061 iwm_write_prph(sc, reg, val);
1062 iwm_nic_unlock(sc);
1063 }
1064 }
1065
1066 static void
1067 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1068 {
1069 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1070 }
1071
1072 static void
1073 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1074 {
1075 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1076 }
1077
1078 static int
1079 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1080 bus_size_t size, bus_size_t alignment)
1081 {
1082 int nsegs, err;
1083 void *va;
1084
1085 dma->tag = tag;
1086 dma->size = size;
1087
1088 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1089 &dma->map);
1090 if (err)
1091 goto fail;
1092
1093 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1094 BUS_DMA_NOWAIT);
1095 if (err)
1096 goto fail;
1097
1098 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1099 if (err)
1100 goto fail;
1101 dma->vaddr = va;
1102
1103 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1104 BUS_DMA_NOWAIT);
1105 if (err)
1106 goto fail;
1107
1108 memset(dma->vaddr, 0, size);
1109 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1110 dma->paddr = dma->map->dm_segs[0].ds_addr;
1111
1112 return 0;
1113
1114 fail: iwm_dma_contig_free(dma);
1115 return err;
1116 }
1117
1118 static void
1119 iwm_dma_contig_free(struct iwm_dma_info *dma)
1120 {
1121 if (dma->map != NULL) {
1122 if (dma->vaddr != NULL) {
1123 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1124 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1125 bus_dmamap_unload(dma->tag, dma->map);
1126 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1127 bus_dmamem_free(dma->tag, &dma->seg, 1);
1128 dma->vaddr = NULL;
1129 }
1130 bus_dmamap_destroy(dma->tag, dma->map);
1131 dma->map = NULL;
1132 }
1133 }
1134
1135 static int
1136 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1137 {
1138 bus_size_t size;
1139 int i, err;
1140
1141 ring->cur = 0;
1142
1143 /* Allocate RX descriptors (256-byte aligned). */
1144 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1145 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1146 if (err) {
1147 aprint_error_dev(sc->sc_dev,
1148 "could not allocate RX ring DMA memory\n");
1149 goto fail;
1150 }
1151 ring->desc = ring->desc_dma.vaddr;
1152
1153 /* Allocate RX status area (16-byte aligned). */
1154 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1155 sizeof(*ring->stat), 16);
1156 if (err) {
1157 aprint_error_dev(sc->sc_dev,
1158 "could not allocate RX status DMA memory\n");
1159 goto fail;
1160 }
1161 ring->stat = ring->stat_dma.vaddr;
1162
1163 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1164 struct iwm_rx_data *data = &ring->data[i];
1165
1166 memset(data, 0, sizeof(*data));
1167 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1168 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1169 &data->map);
1170 if (err) {
1171 aprint_error_dev(sc->sc_dev,
1172 "could not create RX buf DMA map\n");
1173 goto fail;
1174 }
1175
1176 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1177 if (err)
1178 goto fail;
1179 }
1180 return 0;
1181
1182 fail: iwm_free_rx_ring(sc, ring);
1183 return err;
1184 }
1185
1186 static void
1187 iwm_disable_rx_dma(struct iwm_softc *sc)
1188 {
1189 int ntries;
1190
1191 if (iwm_nic_lock(sc)) {
1192 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1193 for (ntries = 0; ntries < 1000; ntries++) {
1194 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1195 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1196 break;
1197 DELAY(10);
1198 }
1199 iwm_nic_unlock(sc);
1200 }
1201 }
1202
1203 void
1204 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1205 {
1206 ring->cur = 0;
1207 memset(ring->stat, 0, sizeof(*ring->stat));
1208 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1209 ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210 }
1211
1212 static void
1213 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1214 {
1215 int i;
1216
1217 iwm_dma_contig_free(&ring->desc_dma);
1218 iwm_dma_contig_free(&ring->stat_dma);
1219
1220 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1221 struct iwm_rx_data *data = &ring->data[i];
1222
1223 if (data->m != NULL) {
1224 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1225 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1226 bus_dmamap_unload(sc->sc_dmat, data->map);
1227 m_freem(data->m);
1228 data->m = NULL;
1229 }
1230 if (data->map != NULL) {
1231 bus_dmamap_destroy(sc->sc_dmat, data->map);
1232 data->map = NULL;
1233 }
1234 }
1235 }
1236
1237 static int
1238 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1239 {
1240 bus_addr_t paddr;
1241 bus_size_t size;
1242 int i, err, nsegs;
1243
1244 ring->qid = qid;
1245 ring->queued = 0;
1246 ring->cur = 0;
1247
1248 /* Allocate TX descriptors (256-byte aligned). */
1249 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1250 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1251 if (err) {
1252 aprint_error_dev(sc->sc_dev,
1253 "could not allocate TX ring DMA memory\n");
1254 goto fail;
1255 }
1256 ring->desc = ring->desc_dma.vaddr;
1257
1258 /*
1259 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1260 * to allocate commands space for other rings.
1261 */
1262 if (qid > IWM_CMD_QUEUE)
1263 return 0;
1264
1265 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1266 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1267 if (err) {
1268 aprint_error_dev(sc->sc_dev,
1269 "could not allocate TX cmd DMA memory\n");
1270 goto fail;
1271 }
1272 ring->cmd = ring->cmd_dma.vaddr;
1273
1274 paddr = ring->cmd_dma.paddr;
1275 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1276 struct iwm_tx_data *data = &ring->data[i];
1277 size_t mapsize;
1278
1279 data->cmd_paddr = paddr;
1280 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1281 + offsetof(struct iwm_tx_cmd, scratch);
1282 paddr += sizeof(struct iwm_device_cmd);
1283
1284 /* FW commands may require more mapped space than packets. */
1285 if (qid == IWM_CMD_QUEUE) {
1286 mapsize = IWM_RBUF_SIZE;
1287 nsegs = 1;
1288 } else {
1289 mapsize = MCLBYTES;
1290 nsegs = IWM_NUM_OF_TBS - 2;
1291 }
1292 err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1293 0, BUS_DMA_NOWAIT, &data->map);
1294 if (err) {
1295 aprint_error_dev(sc->sc_dev,
1296 "could not create TX buf DMA map\n");
1297 goto fail;
1298 }
1299 }
1300 KASSERT(paddr == ring->cmd_dma.paddr + size);
1301 return 0;
1302
1303 fail: iwm_free_tx_ring(sc, ring);
1304 return err;
1305 }
1306
1307 static void
1308 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1309 {
1310
1311 if (!sc->apmg_wake_up_wa)
1312 return;
1313
1314 if (!sc->sc_cmd_hold_nic_awake) {
1315 aprint_error_dev(sc->sc_dev,
1316 "cmd_hold_nic_awake not set\n");
1317 return;
1318 }
1319
1320 sc->sc_cmd_hold_nic_awake = 0;
1321 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1322 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1323 }
1324
1325 static int
1326 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1327 {
1328 int ret;
1329
1330 /*
1331 * wake up the NIC to make sure that the firmware will see the host
1332 * command - we will let the NIC sleep once all the host commands
1333 * returned. This needs to be done only on NICs that have
1334 * apmg_wake_up_wa set.
1335 */
1336 if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1337
1338 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1339 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1340
1341 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1342 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1343 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1344 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1345 15000);
1346 if (ret == 0) {
1347 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1348 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1349 aprint_error_dev(sc->sc_dev,
1350 "failed to wake NIC for hcmd\n");
1351 return EIO;
1352 }
1353 sc->sc_cmd_hold_nic_awake = 1;
1354 }
1355
1356 return 0;
1357 }
1358 static void
1359 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1360 {
1361 int i;
1362
1363 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1364 struct iwm_tx_data *data = &ring->data[i];
1365
1366 if (data->m != NULL) {
1367 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1368 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1369 bus_dmamap_unload(sc->sc_dmat, data->map);
1370 m_freem(data->m);
1371 data->m = NULL;
1372 }
1373 }
1374 /* Clear TX descriptors. */
1375 memset(ring->desc, 0, ring->desc_dma.size);
1376 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1377 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1378 sc->qfullmsk &= ~(1 << ring->qid);
1379 ring->queued = 0;
1380 ring->cur = 0;
1381
1382 if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1383 iwm_clear_cmd_in_flight(sc);
1384 }
1385
1386 static void
1387 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1388 {
1389 int i;
1390
1391 iwm_dma_contig_free(&ring->desc_dma);
1392 iwm_dma_contig_free(&ring->cmd_dma);
1393
1394 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1395 struct iwm_tx_data *data = &ring->data[i];
1396
1397 if (data->m != NULL) {
1398 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1399 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1400 bus_dmamap_unload(sc->sc_dmat, data->map);
1401 m_freem(data->m);
1402 data->m = NULL;
1403 }
1404 if (data->map != NULL) {
1405 bus_dmamap_destroy(sc->sc_dmat, data->map);
1406 data->map = NULL;
1407 }
1408 }
1409 }
1410
1411 static void
1412 iwm_enable_rfkill_int(struct iwm_softc *sc)
1413 {
1414 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1415 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1416 }
1417
1418 static int
1419 iwm_check_rfkill(struct iwm_softc *sc)
1420 {
1421 uint32_t v;
1422 int s;
1423 int rv;
1424
1425 s = splnet();
1426
1427 /*
1428 * "documentation" is not really helpful here:
1429 * 27: HW_RF_KILL_SW
1430 * Indicates state of (platform's) hardware RF-Kill switch
1431 *
1432 * But apparently when it's off, it's on ...
1433 */
1434 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1435 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1436 if (rv) {
1437 sc->sc_flags |= IWM_FLAG_RFKILL;
1438 } else {
1439 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1440 }
1441
1442 splx(s);
1443 return rv;
1444 }
1445
1446 static void
1447 iwm_enable_interrupts(struct iwm_softc *sc)
1448 {
1449 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1450 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1451 }
1452
1453 static void
1454 iwm_restore_interrupts(struct iwm_softc *sc)
1455 {
1456 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1457 }
1458
1459 static void
1460 iwm_disable_interrupts(struct iwm_softc *sc)
1461 {
1462 int s = splnet();
1463
1464 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1465
1466 /* acknowledge all interrupts */
1467 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1468 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1469
1470 splx(s);
1471 }
1472
1473 static void
1474 iwm_ict_reset(struct iwm_softc *sc)
1475 {
1476 iwm_disable_interrupts(sc);
1477
1478 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1479 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1480 BUS_DMASYNC_PREWRITE);
1481 sc->ict_cur = 0;
1482
1483 /* Set physical address of ICT (4KB aligned). */
1484 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1485 IWM_CSR_DRAM_INT_TBL_ENABLE
1486 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1487 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1488 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1489
1490 /* Switch to ICT interrupt mode in driver. */
1491 sc->sc_flags |= IWM_FLAG_USE_ICT;
1492
1493 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1494 iwm_enable_interrupts(sc);
1495 }
1496
1497 #define IWM_HW_READY_TIMEOUT 50
1498 static int
1499 iwm_set_hw_ready(struct iwm_softc *sc)
1500 {
1501 int ready;
1502
1503 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1504 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1505
1506 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1507 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1508 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1509 IWM_HW_READY_TIMEOUT);
1510 if (ready)
1511 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1512 IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1513
1514 return ready;
1515 }
1516 #undef IWM_HW_READY_TIMEOUT
1517
1518 static int
1519 iwm_prepare_card_hw(struct iwm_softc *sc)
1520 {
1521 int t = 0;
1522
1523 if (iwm_set_hw_ready(sc))
1524 return 0;
1525
1526 DELAY(100);
1527
1528 /* If HW is not ready, prepare the conditions to check again */
1529 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1530 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1531
1532 do {
1533 if (iwm_set_hw_ready(sc))
1534 return 0;
1535 DELAY(200);
1536 t += 200;
1537 } while (t < 150000);
1538
1539 return ETIMEDOUT;
1540 }
1541
1542 static void
1543 iwm_apm_config(struct iwm_softc *sc)
1544 {
1545 pcireg_t reg;
1546
1547 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1548 sc->sc_cap_off + PCIE_LCSR);
1549 if (reg & PCIE_LCSR_ASPM_L1) {
1550 /* Um the Linux driver prints "Disabling L0S for this one ... */
1551 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1552 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1553 } else {
1554 /* ... and "Enabling" here */
1555 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1556 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1557 }
1558 }
1559
1560 /*
1561 * Start up NIC's basic functionality after it has been reset
1562 * e.g. after platform boot or shutdown.
1563 * NOTE: This does not load uCode nor start the embedded processor
1564 */
1565 static int
1566 iwm_apm_init(struct iwm_softc *sc)
1567 {
1568 int err = 0;
1569
1570 /* Disable L0S exit timer (platform NMI workaround) */
1571 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1572 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1573 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1574 }
1575
1576 /*
1577 * Disable L0s without affecting L1;
1578 * don't wait for ICH L0s (ICH bug W/A)
1579 */
1580 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1581 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1582
1583 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1584 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1585
1586 /*
1587 * Enable HAP INTA (interrupt from management bus) to
1588 * wake device's PCI Express link L1a -> L0s
1589 */
1590 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1591 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1592
1593 iwm_apm_config(sc);
1594
1595 #if 0 /* not for 7k/8k */
1596 /* Configure analog phase-lock-loop before activating to D0A */
1597 if (trans->cfg->base_params->pll_cfg_val)
1598 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1599 trans->cfg->base_params->pll_cfg_val);
1600 #endif
1601
1602 /*
1603 * Set "initialization complete" bit to move adapter from
1604 * D0U* --> D0A* (powered-up active) state.
1605 */
1606 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1607
1608 /*
1609 * Wait for clock stabilization; once stabilized, access to
1610 * device-internal resources is supported, e.g. iwm_write_prph()
1611 * and accesses to uCode SRAM.
1612 */
1613 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1614 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1615 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1616 aprint_error_dev(sc->sc_dev,
1617 "timeout waiting for clock stabilization\n");
1618 err = ETIMEDOUT;
1619 goto out;
1620 }
1621
1622 if (sc->host_interrupt_operation_mode) {
1623 /*
1624 * This is a bit of an abuse - This is needed for 7260 / 3160
1625 * only check host_interrupt_operation_mode even if this is
1626 * not related to host_interrupt_operation_mode.
1627 *
1628 * Enable the oscillator to count wake up time for L1 exit. This
1629 * consumes slightly more power (100uA) - but allows to be sure
1630 * that we wake up from L1 on time.
1631 *
1632 * This looks weird: read twice the same register, discard the
1633 * value, set a bit, and yet again, read that same register
1634 * just to discard the value. But that's the way the hardware
1635 * seems to like it.
1636 */
1637 iwm_read_prph(sc, IWM_OSC_CLK);
1638 iwm_read_prph(sc, IWM_OSC_CLK);
1639 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1640 iwm_read_prph(sc, IWM_OSC_CLK);
1641 iwm_read_prph(sc, IWM_OSC_CLK);
1642 }
1643
1644 /*
1645 * Enable DMA clock and wait for it to stabilize.
1646 *
1647 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1648 * do not disable clocks. This preserves any hardware bits already
1649 * set by default in "CLK_CTRL_REG" after reset.
1650 */
1651 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1652 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1653 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1654 DELAY(20);
1655
1656 /* Disable L1-Active */
1657 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1658 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1659
1660 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1661 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1662 IWM_APMG_RTC_INT_STT_RFKILL);
1663 }
1664 out:
1665 if (err)
1666 aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1667 return err;
1668 }
1669
1670 static void
1671 iwm_apm_stop(struct iwm_softc *sc)
1672 {
1673 /* stop device's busmaster DMA activity */
1674 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1675
1676 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1677 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1678 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1679 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1680 DPRINTF(("iwm apm stop\n"));
1681 }
1682
1683 static int
1684 iwm_start_hw(struct iwm_softc *sc)
1685 {
1686 int err;
1687
1688 err = iwm_prepare_card_hw(sc);
1689 if (err)
1690 return err;
1691
1692 /* Reset the entire device */
1693 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1694 DELAY(10);
1695
1696 err = iwm_apm_init(sc);
1697 if (err)
1698 return err;
1699
1700 iwm_enable_rfkill_int(sc);
1701 iwm_check_rfkill(sc);
1702
1703 return 0;
1704 }
1705
1706 static void
1707 iwm_stop_device(struct iwm_softc *sc)
1708 {
1709 int chnl, ntries;
1710 int qid;
1711
1712 iwm_disable_interrupts(sc);
1713 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1714
1715 /* Deactivate TX scheduler. */
1716 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1717
1718 /* Stop all DMA channels. */
1719 if (iwm_nic_lock(sc)) {
1720 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1721 IWM_WRITE(sc,
1722 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1723 for (ntries = 0; ntries < 200; ntries++) {
1724 uint32_t r;
1725
1726 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1727 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1728 chnl))
1729 break;
1730 DELAY(20);
1731 }
1732 }
1733 iwm_nic_unlock(sc);
1734 }
1735 iwm_disable_rx_dma(sc);
1736
1737 iwm_reset_rx_ring(sc, &sc->rxq);
1738
1739 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1740 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1741
1742 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1743 /* Power-down device's busmaster DMA clocks */
1744 if (iwm_nic_lock(sc)) {
1745 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1746 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1747 DELAY(5);
1748 iwm_nic_unlock(sc);
1749 }
1750 }
1751
1752 /* Make sure (redundant) we've released our request to stay awake */
1753 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1754 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1755
1756 /* Stop the device, and put it in low power state */
1757 iwm_apm_stop(sc);
1758
1759 /*
1760 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1761 * Clean again the interrupt here
1762 */
1763 iwm_disable_interrupts(sc);
1764
1765 /* Reset the on-board processor. */
1766 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1767
1768 /* Even though we stop the HW we still want the RF kill interrupt. */
1769 iwm_enable_rfkill_int(sc);
1770 iwm_check_rfkill(sc);
1771 }
1772
1773 static void
1774 iwm_nic_config(struct iwm_softc *sc)
1775 {
1776 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1777 uint32_t reg_val = 0;
1778
1779 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1780 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1781 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1782 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1783 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1784 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1785
1786 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1787 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1788 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1789 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1790
1791 /* radio configuration */
1792 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1793 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1794 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1795
1796 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1797
1798 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1799 radio_cfg_step, radio_cfg_dash));
1800
1801 /*
1802 * W/A : NIC is stuck in a reset state after Early PCIe power off
1803 * (PCIe power is lost before PERST# is asserted), causing ME FW
1804 * to lose ownership and not being able to obtain it back.
1805 */
1806 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1807 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1808 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1809 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1810 }
1811 }
1812
1813 static int
1814 iwm_nic_rx_init(struct iwm_softc *sc)
1815 {
1816 if (!iwm_nic_lock(sc))
1817 return EBUSY;
1818
1819 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1820 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1821 0, sc->rxq.stat_dma.size,
1822 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1823
1824 iwm_disable_rx_dma(sc);
1825 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1826 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1827 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1828 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1829
1830 /* Set physical address of RX ring (256-byte aligned). */
1831 IWM_WRITE(sc,
1832 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1833
1834 /* Set physical address of RX status (16-byte aligned). */
1835 IWM_WRITE(sc,
1836 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1837
1838 /* Enable RX. */
1839 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1840 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1841 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1842 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1843 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1844 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1845 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1846 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1847
1848 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1849
1850 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1851 if (sc->host_interrupt_operation_mode)
1852 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1853
1854 /*
1855 * This value should initially be 0 (before preparing any RBs),
1856 * and should be 8 after preparing the first 8 RBs (for example).
1857 */
1858 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1859
1860 iwm_nic_unlock(sc);
1861
1862 return 0;
1863 }
1864
1865 static int
1866 iwm_nic_tx_init(struct iwm_softc *sc)
1867 {
1868 int qid;
1869
1870 if (!iwm_nic_lock(sc))
1871 return EBUSY;
1872
1873 /* Deactivate TX scheduler. */
1874 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1875
1876 /* Set physical address of "keep warm" page (16-byte aligned). */
1877 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1878
1879 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1880 struct iwm_tx_ring *txq = &sc->txq[qid];
1881
1882 /* Set physical address of TX ring (256-byte aligned). */
1883 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1884 txq->desc_dma.paddr >> 8);
1885 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1886 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1887 }
1888
1889 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1890
1891 iwm_nic_unlock(sc);
1892
1893 return 0;
1894 }
1895
1896 static int
1897 iwm_nic_init(struct iwm_softc *sc)
1898 {
1899 int err;
1900
1901 iwm_apm_init(sc);
1902 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1903 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1904 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1905 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1906 }
1907
1908 iwm_nic_config(sc);
1909
1910 err = iwm_nic_rx_init(sc);
1911 if (err)
1912 return err;
1913
1914 err = iwm_nic_tx_init(sc);
1915 if (err)
1916 return err;
1917
1918 DPRINTF(("shadow registers enabled\n"));
1919 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1920
1921 return 0;
1922 }
1923
1924 static const uint8_t iwm_ac_to_tx_fifo[] = {
1925 IWM_TX_FIFO_VO,
1926 IWM_TX_FIFO_VI,
1927 IWM_TX_FIFO_BE,
1928 IWM_TX_FIFO_BK,
1929 };
1930
1931 static int
1932 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1933 {
1934 if (!iwm_nic_lock(sc)) {
1935 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1936 return EBUSY;
1937 }
1938
1939 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1940
1941 if (qid == IWM_CMD_QUEUE) {
1942 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1943 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1944 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1945
1946 iwm_nic_unlock(sc);
1947
1948 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1949
1950 if (!iwm_nic_lock(sc))
1951 return EBUSY;
1952 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1953 iwm_nic_unlock(sc);
1954
1955 iwm_write_mem32(sc,
1956 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1957
1958 /* Set scheduler window size and frame limit. */
1959 iwm_write_mem32(sc,
1960 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1961 sizeof(uint32_t),
1962 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1963 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1964 ((IWM_FRAME_LIMIT
1965 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1966 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1967
1968 if (!iwm_nic_lock(sc))
1969 return EBUSY;
1970 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1971 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1972 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1973 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1974 IWM_SCD_QUEUE_STTS_REG_MSK);
1975 } else {
1976 struct iwm_scd_txq_cfg_cmd cmd;
1977 int err;
1978
1979 iwm_nic_unlock(sc);
1980
1981 memset(&cmd, 0, sizeof(cmd));
1982 cmd.scd_queue = qid;
1983 cmd.enable = 1;
1984 cmd.sta_id = sta_id;
1985 cmd.tx_fifo = fifo;
1986 cmd.aggregate = 0;
1987 cmd.window = IWM_FRAME_LIMIT;
1988
1989 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
1990 &cmd);
1991 if (err)
1992 return err;
1993
1994 if (!iwm_nic_lock(sc))
1995 return EBUSY;
1996 }
1997
1998 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1999 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2000
2001 iwm_nic_unlock(sc);
2002
2003 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2004
2005 return 0;
2006 }
2007
2008 static int
2009 iwm_post_alive(struct iwm_softc *sc)
2010 {
2011 int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2012 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2013 int err, chnl;
2014 uint32_t base;
2015
2016 if (!iwm_nic_lock(sc))
2017 return EBUSY;
2018
2019 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2020 if (sc->sched_base != base) {
2021 DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2022 DEVNAME(sc), sc->sched_base, base));
2023 sc->sched_base = base;
2024 }
2025
2026 iwm_nic_unlock(sc);
2027
2028 iwm_ict_reset(sc);
2029
2030 /* Clear TX scheduler state in SRAM. */
2031 err = iwm_write_mem(sc,
2032 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2033 if (err)
2034 return err;
2035
2036 if (!iwm_nic_lock(sc))
2037 return EBUSY;
2038
2039 /* Set physical address of TX scheduler rings (1KB aligned). */
2040 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2041
2042 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2043
2044 iwm_nic_unlock(sc);
2045
2046 /* enable command channel */
2047 err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2048 if (err)
2049 return err;
2050
2051 if (!iwm_nic_lock(sc))
2052 return EBUSY;
2053
2054 /* Activate TX scheduler. */
2055 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2056
2057 /* Enable DMA channels. */
2058 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2059 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2060 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2061 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2062 }
2063
2064 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2065 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2066
2067 /* Enable L1-Active */
2068 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2069 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2070 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2071 }
2072
2073 iwm_nic_unlock(sc);
2074
2075 return 0;
2076 }
2077
2078 static struct iwm_phy_db_entry *
2079 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2080 uint16_t chg_id)
2081 {
2082 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2083
2084 if (type >= IWM_PHY_DB_MAX)
2085 return NULL;
2086
2087 switch (type) {
2088 case IWM_PHY_DB_CFG:
2089 return &phy_db->cfg;
2090 case IWM_PHY_DB_CALIB_NCH:
2091 return &phy_db->calib_nch;
2092 case IWM_PHY_DB_CALIB_CHG_PAPD:
2093 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2094 return NULL;
2095 return &phy_db->calib_ch_group_papd[chg_id];
2096 case IWM_PHY_DB_CALIB_CHG_TXP:
2097 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2098 return NULL;
2099 return &phy_db->calib_ch_group_txp[chg_id];
2100 default:
2101 return NULL;
2102 }
2103 return NULL;
2104 }
2105
2106 static int
2107 iwm_phy_db_set_section(struct iwm_softc *sc,
2108 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2109 {
2110 struct iwm_phy_db_entry *entry;
2111 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2112 uint16_t chg_id = 0;
2113
2114 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2115 type == IWM_PHY_DB_CALIB_CHG_TXP)
2116 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2117
2118 entry = iwm_phy_db_get_section(sc, type, chg_id);
2119 if (!entry)
2120 return EINVAL;
2121
2122 if (entry->data)
2123 kmem_intr_free(entry->data, entry->size);
2124 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2125 if (!entry->data) {
2126 entry->size = 0;
2127 return ENOMEM;
2128 }
2129 memcpy(entry->data, phy_db_notif->data, size);
2130 entry->size = size;
2131
2132 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2133 __func__, __LINE__, type, size, entry->data));
2134
2135 return 0;
2136 }
2137
2138 static int
2139 iwm_is_valid_channel(uint16_t ch_id)
2140 {
2141 if (ch_id <= 14 ||
2142 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2143 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2144 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2145 return 1;
2146 return 0;
2147 }
2148
2149 static uint8_t
2150 iwm_ch_id_to_ch_index(uint16_t ch_id)
2151 {
2152 if (!iwm_is_valid_channel(ch_id))
2153 return 0xff;
2154
2155 if (ch_id <= 14)
2156 return ch_id - 1;
2157 if (ch_id <= 64)
2158 return (ch_id + 20) / 4;
2159 if (ch_id <= 140)
2160 return (ch_id - 12) / 4;
2161 return (ch_id - 13) / 4;
2162 }
2163
2164
2165 static uint16_t
2166 iwm_channel_id_to_papd(uint16_t ch_id)
2167 {
2168 if (!iwm_is_valid_channel(ch_id))
2169 return 0xff;
2170
2171 if (1 <= ch_id && ch_id <= 14)
2172 return 0;
2173 if (36 <= ch_id && ch_id <= 64)
2174 return 1;
2175 if (100 <= ch_id && ch_id <= 140)
2176 return 2;
2177 return 3;
2178 }
2179
2180 static uint16_t
2181 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2182 {
2183 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2184 struct iwm_phy_db_chg_txp *txp_chg;
2185 int i;
2186 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2187
2188 if (ch_index == 0xff)
2189 return 0xff;
2190
2191 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2192 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2193 if (!txp_chg)
2194 return 0xff;
2195 /*
2196 * Looking for the first channel group the max channel
2197 * of which is higher than the requested channel.
2198 */
2199 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2200 return i;
2201 }
2202 return 0xff;
2203 }
2204
2205 static int
2206 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2207 uint16_t *size, uint16_t ch_id)
2208 {
2209 struct iwm_phy_db_entry *entry;
2210 uint16_t ch_group_id = 0;
2211
2212 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2213 ch_group_id = iwm_channel_id_to_papd(ch_id);
2214 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2215 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2216
2217 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2218 if (!entry)
2219 return EINVAL;
2220
2221 *data = entry->data;
2222 *size = entry->size;
2223
2224 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2225 __func__, __LINE__, type, *size));
2226
2227 return 0;
2228 }
2229
2230 static int
2231 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2232 void *data)
2233 {
2234 struct iwm_phy_db_cmd phy_db_cmd;
2235 struct iwm_host_cmd cmd = {
2236 .id = IWM_PHY_DB_CMD,
2237 .flags = IWM_CMD_ASYNC,
2238 };
2239
2240 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2241 type, length));
2242
2243 phy_db_cmd.type = le16toh(type);
2244 phy_db_cmd.length = le16toh(length);
2245
2246 cmd.data[0] = &phy_db_cmd;
2247 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2248 cmd.data[1] = data;
2249 cmd.len[1] = length;
2250
2251 return iwm_send_cmd(sc, &cmd);
2252 }
2253
2254 static int
2255 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2256 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2257 {
2258 uint16_t i;
2259 int err;
2260 struct iwm_phy_db_entry *entry;
2261
2262 /* Send all the channel-specific groups to operational fw */
2263 for (i = 0; i < max_ch_groups; i++) {
2264 entry = iwm_phy_db_get_section(sc, type, i);
2265 if (!entry)
2266 return EINVAL;
2267
2268 if (!entry->size)
2269 continue;
2270
2271 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2272 if (err) {
2273 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2274 "err %d\n", DEVNAME(sc), type, i, err));
2275 return err;
2276 }
2277
2278 DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2279 DEVNAME(sc), type, i));
2280
2281 DELAY(1000);
2282 }
2283
2284 return 0;
2285 }
2286
2287 static int
2288 iwm_send_phy_db_data(struct iwm_softc *sc)
2289 {
2290 uint8_t *data = NULL;
2291 uint16_t size = 0;
2292 int err;
2293
2294 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2295 if (err)
2296 return err;
2297
2298 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2299 if (err)
2300 return err;
2301
2302 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2303 &data, &size, 0);
2304 if (err)
2305 return err;
2306
2307 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2308 if (err)
2309 return err;
2310
2311 err = iwm_phy_db_send_all_channel_groups(sc,
2312 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2313 if (err)
2314 return err;
2315
2316 err = iwm_phy_db_send_all_channel_groups(sc,
2317 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2318 if (err)
2319 return err;
2320
2321 return 0;
2322 }
2323
2324 /*
2325 * For the high priority TE use a time event type that has similar priority to
2326 * the FW's action scan priority.
2327 */
2328 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2329 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2330
2331 /* used to convert from time event API v2 to v1 */
2332 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2333 IWM_TE_V2_EVENT_SOCIOPATHIC)
2334 static inline uint16_t
2335 iwm_te_v2_get_notify(uint16_t policy)
2336 {
2337 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2338 }
2339
2340 static inline uint16_t
2341 iwm_te_v2_get_dep_policy(uint16_t policy)
2342 {
2343 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2344 IWM_TE_V2_PLACEMENT_POS;
2345 }
2346
2347 static inline uint16_t
2348 iwm_te_v2_get_absence(uint16_t policy)
2349 {
2350 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2351 }
2352
2353 static void
2354 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2355 struct iwm_time_event_cmd_v1 *cmd_v1)
2356 {
2357 cmd_v1->id_and_color = cmd_v2->id_and_color;
2358 cmd_v1->action = cmd_v2->action;
2359 cmd_v1->id = cmd_v2->id;
2360 cmd_v1->apply_time = cmd_v2->apply_time;
2361 cmd_v1->max_delay = cmd_v2->max_delay;
2362 cmd_v1->depends_on = cmd_v2->depends_on;
2363 cmd_v1->interval = cmd_v2->interval;
2364 cmd_v1->duration = cmd_v2->duration;
2365 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2366 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2367 else
2368 cmd_v1->repeat = htole32(cmd_v2->repeat);
2369 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2370 cmd_v1->interval_reciprocal = 0; /* unused */
2371
2372 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2373 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2374 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2375 }
2376
2377 static int
2378 iwm_send_time_event_cmd(struct iwm_softc *sc,
2379 const struct iwm_time_event_cmd_v2 *cmd)
2380 {
2381 struct iwm_time_event_cmd_v1 cmd_v1;
2382
2383 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2384 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2385 cmd);
2386
2387 iwm_te_v2_to_v1(cmd, &cmd_v1);
2388 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2389 &cmd_v1);
2390 }
2391
2392 static void
2393 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2394 uint32_t duration, uint32_t max_delay)
2395 {
2396 struct iwm_time_event_cmd_v2 time_cmd;
2397
2398 memset(&time_cmd, 0, sizeof(time_cmd));
2399
2400 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2401 time_cmd.id_and_color =
2402 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2403 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2404
2405 time_cmd.apply_time = htole32(0);
2406
2407 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2408 time_cmd.max_delay = htole32(max_delay);
2409 /* TODO: why do we need to interval = bi if it is not periodic? */
2410 time_cmd.interval = htole32(1);
2411 time_cmd.duration = htole32(duration);
2412 time_cmd.repeat = 1;
2413 time_cmd.policy
2414 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2415 IWM_TE_V2_NOTIF_HOST_EVENT_END |
2416 IWM_T2_V2_START_IMMEDIATELY);
2417
2418 iwm_send_time_event_cmd(sc, &time_cmd);
2419 }
2420
2421 /*
2422 * NVM read access and content parsing. We do not support
2423 * external NVM or writing NVM.
2424 */
2425
2426 /* list of NVM sections we are allowed/need to read */
2427 static const int iwm_nvm_to_read[] = {
2428 IWM_NVM_SECTION_TYPE_HW,
2429 IWM_NVM_SECTION_TYPE_SW,
2430 IWM_NVM_SECTION_TYPE_REGULATORY,
2431 IWM_NVM_SECTION_TYPE_CALIBRATION,
2432 IWM_NVM_SECTION_TYPE_PRODUCTION,
2433 IWM_NVM_SECTION_TYPE_HW_8000,
2434 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2435 IWM_NVM_SECTION_TYPE_PHY_SKU,
2436 };
2437
2438 /* Default NVM size to read */
2439 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2440 #define IWM_MAX_NVM_SECTION_SIZE_7000 (16 * 512 * sizeof(uint16_t)) /*16 KB*/
2441 #define IWM_MAX_NVM_SECTION_SIZE_8000 (32 * 512 * sizeof(uint16_t)) /*32 KB*/
2442
2443 #define IWM_NVM_WRITE_OPCODE 1
2444 #define IWM_NVM_READ_OPCODE 0
2445
2446 static int
2447 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2448 uint16_t length, uint8_t *data, uint16_t *len)
2449 {
2450 offset = 0;
2451 struct iwm_nvm_access_cmd nvm_access_cmd = {
2452 .offset = htole16(offset),
2453 .length = htole16(length),
2454 .type = htole16(section),
2455 .op_code = IWM_NVM_READ_OPCODE,
2456 };
2457 struct iwm_nvm_access_resp *nvm_resp;
2458 struct iwm_rx_packet *pkt;
2459 struct iwm_host_cmd cmd = {
2460 .id = IWM_NVM_ACCESS_CMD,
2461 .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2462 .data = { &nvm_access_cmd, },
2463 };
2464 int err, offset_read;
2465 size_t bytes_read;
2466 uint8_t *resp_data;
2467
2468 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2469
2470 err = iwm_send_cmd(sc, &cmd);
2471 if (err) {
2472 DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2473 DEVNAME(sc), err));
2474 return err;
2475 }
2476
2477 pkt = cmd.resp_pkt;
2478 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2479 err = EIO;
2480 goto exit;
2481 }
2482
2483 /* Extract NVM response */
2484 nvm_resp = (void *)pkt->data;
2485
2486 err = le16toh(nvm_resp->status);
2487 bytes_read = le16toh(nvm_resp->length);
2488 offset_read = le16toh(nvm_resp->offset);
2489 resp_data = nvm_resp->data;
2490 if (err) {
2491 err = EINVAL;
2492 goto exit;
2493 }
2494
2495 if (offset_read != offset) {
2496 err = EINVAL;
2497 goto exit;
2498 }
2499 if (bytes_read > length) {
2500 err = EINVAL;
2501 goto exit;
2502 }
2503
2504 memcpy(data + offset, resp_data, bytes_read);
2505 *len = bytes_read;
2506
2507 exit:
2508 iwm_free_resp(sc, &cmd);
2509 return err;
2510 }
2511
2512 /*
2513 * Reads an NVM section completely.
2514 * NICs prior to 7000 family doesn't have a real NVM, but just read
2515 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2516 * by uCode, we need to manually check in this case that we don't
2517 * overflow and try to read more than the EEPROM size.
2518 */
2519 static int
2520 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2521 uint16_t *len, size_t max_len)
2522 {
2523 uint16_t chunklen, seglen;
2524 int err;
2525
2526 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2527 *len = 0;
2528
2529 /* Read NVM chunks until exhausted (reading less than requested) */
2530 while (seglen == chunklen && *len < max_len) {
2531 err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2532 &seglen);
2533 if (err) {
2534 DPRINTF(("%s: Cannot read NVM from section %d "
2535 "offset %d, length %d\n",
2536 DEVNAME(sc), section, *len, chunklen));
2537 return err;
2538 }
2539 *len += seglen;
2540 }
2541
2542 DPRINTFN(4, ("NVM section %d read completed\n", section));
2543 return 0;
2544 }
2545
2546 static uint8_t
2547 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2548 {
2549 uint8_t tx_ant;
2550
2551 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2552 >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2553
2554 if (sc->sc_nvm.valid_tx_ant)
2555 tx_ant &= sc->sc_nvm.valid_tx_ant;
2556
2557 return tx_ant;
2558 }
2559
2560 static uint8_t
2561 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2562 {
2563 uint8_t rx_ant;
2564
2565 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2566 >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2567
2568 if (sc->sc_nvm.valid_rx_ant)
2569 rx_ant &= sc->sc_nvm.valid_rx_ant;
2570
2571 return rx_ant;
2572 }
2573
2574 static void
2575 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2576 const uint8_t *nvm_channels, size_t nchan)
2577 {
2578 struct ieee80211com *ic = &sc->sc_ic;
2579 struct iwm_nvm_data *data = &sc->sc_nvm;
2580 int ch_idx;
2581 struct ieee80211_channel *channel;
2582 uint16_t ch_flags;
2583 int is_5ghz;
2584 int flags, hw_value;
2585
2586 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2587 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2588
2589 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2590 !data->sku_cap_band_52GHz_enable)
2591 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2592
2593 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2594 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2595 iwm_nvm_channels[ch_idx],
2596 ch_flags,
2597 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2598 "5.2" : "2.4"));
2599 continue;
2600 }
2601
2602 hw_value = nvm_channels[ch_idx];
2603 channel = &ic->ic_channels[hw_value];
2604
2605 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2606 if (!is_5ghz) {
2607 flags = IEEE80211_CHAN_2GHZ;
2608 channel->ic_flags
2609 = IEEE80211_CHAN_CCK
2610 | IEEE80211_CHAN_OFDM
2611 | IEEE80211_CHAN_DYN
2612 | IEEE80211_CHAN_2GHZ;
2613 } else {
2614 flags = IEEE80211_CHAN_5GHZ;
2615 channel->ic_flags =
2616 IEEE80211_CHAN_A;
2617 }
2618 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2619
2620 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2621 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2622
2623 #ifndef IEEE80211_NO_HT
2624 if (data->sku_cap_11n_enable)
2625 channel->ic_flags |= IEEE80211_CHAN_HT;
2626 #endif
2627 }
2628 }
2629
2630 #ifndef IEEE80211_NO_HT
2631 static void
2632 iwm_setup_ht_rates(struct iwm_softc *sc)
2633 {
2634 struct ieee80211com *ic = &sc->sc_ic;
2635
2636 /* TX is supported with the same MCS as RX. */
2637 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2638
2639 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2640
2641 #ifdef notyet
2642 if (sc->sc_nvm.sku_cap_mimo_disable)
2643 return;
2644
2645 if (iwm_fw_valid_rx_ant(sc) > 1)
2646 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2647 if (iwm_fw_valid_rx_ant(sc) > 2)
2648 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2649 #endif
2650 }
2651
2652 #define IWM_MAX_RX_BA_SESSIONS 16
2653
2654 static void
2655 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2656 uint16_t ssn, int start)
2657 {
2658 struct ieee80211com *ic = &sc->sc_ic;
2659 struct iwm_add_sta_cmd_v7 cmd;
2660 struct iwm_node *in = (struct iwm_node *)ni;
2661 int err, s;
2662 uint32_t status;
2663
2664 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2665 ieee80211_addba_req_refuse(ic, ni, tid);
2666 return;
2667 }
2668
2669 memset(&cmd, 0, sizeof(cmd));
2670
2671 cmd.sta_id = IWM_STATION_ID;
2672 cmd.mac_id_n_color
2673 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2674 cmd.add_modify = IWM_STA_MODE_MODIFY;
2675
2676 if (start) {
2677 cmd.add_immediate_ba_tid = (uint8_t)tid;
2678 cmd.add_immediate_ba_ssn = ssn;
2679 } else {
2680 cmd.remove_immediate_ba_tid = (uint8_t)tid;
2681 }
2682 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2683 IWM_STA_MODIFY_REMOVE_BA_TID;
2684
2685 status = IWM_ADD_STA_SUCCESS;
2686 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2687 &status);
2688
2689 s = splnet();
2690 if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2691 if (start) {
2692 sc->sc_rx_ba_sessions++;
2693 ieee80211_addba_req_accept(ic, ni, tid);
2694 } else if (sc->sc_rx_ba_sessions > 0)
2695 sc->sc_rx_ba_sessions--;
2696 } else if (start)
2697 ieee80211_addba_req_refuse(ic, ni, tid);
2698
2699 splx(s);
2700 }
2701
2702 static void
2703 iwm_htprot_task(void *arg)
2704 {
2705 struct iwm_softc *sc = arg;
2706 struct ieee80211com *ic = &sc->sc_ic;
2707 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2708 int err;
2709
2710 /* This call updates HT protection based on in->in_ni.ni_htop1. */
2711 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2712 if (err)
2713 aprint_error_dev(sc->sc_dev,
2714 "could not change HT protection: error %d\n", err);
2715 }
2716
2717 /*
2718 * This function is called by upper layer when HT protection settings in
2719 * beacons have changed.
2720 */
2721 static void
2722 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2723 {
2724 struct iwm_softc *sc = ic->ic_softc;
2725
2726 /* assumes that ni == ic->ic_bss */
2727 task_add(systq, &sc->htprot_task);
2728 }
2729
2730 static void
2731 iwm_ba_task(void *arg)
2732 {
2733 struct iwm_softc *sc = arg;
2734 struct ieee80211com *ic = &sc->sc_ic;
2735 struct ieee80211_node *ni = ic->ic_bss;
2736
2737 if (sc->ba_start)
2738 iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2739 else
2740 iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2741 }
2742
2743 /*
2744 * This function is called by upper layer when an ADDBA request is received
2745 * from another STA and before the ADDBA response is sent.
2746 */
2747 static int
2748 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2749 uint8_t tid)
2750 {
2751 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2752 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2753
2754 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2755 return ENOSPC;
2756
2757 sc->ba_start = 1;
2758 sc->ba_tid = tid;
2759 sc->ba_ssn = htole16(ba->ba_winstart);
2760 task_add(systq, &sc->ba_task);
2761
2762 return EBUSY;
2763 }
2764
2765 /*
2766 * This function is called by upper layer on teardown of an HT-immediate
2767 * Block Ack agreement (eg. upon receipt of a DELBA frame).
2768 */
2769 static void
2770 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2771 uint8_t tid)
2772 {
2773 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2774
2775 sc->ba_start = 0;
2776 sc->ba_tid = tid;
2777 task_add(systq, &sc->ba_task);
2778 }
2779 #endif
2780
2781 static void
2782 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2783 const uint16_t *mac_override, const uint16_t *nvm_hw)
2784 {
2785 static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
2786 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2787 };
2788 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
2789 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
2790 };
2791 const uint8_t *hw_addr;
2792
2793 if (mac_override) {
2794 hw_addr = (const uint8_t *)(mac_override +
2795 IWM_MAC_ADDRESS_OVERRIDE_8000);
2796
2797 /*
2798 * Store the MAC address from MAO section.
2799 * No byte swapping is required in MAO section
2800 */
2801 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2802
2803 /*
2804 * Force the use of the OTP MAC address in case of reserved MAC
2805 * address in the NVM, or if address is given but invalid.
2806 */
2807 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2808 (memcmp(etherbroadcastaddr, data->hw_addr,
2809 sizeof(etherbroadcastaddr)) != 0) &&
2810 (memcmp(etheranyaddr, data->hw_addr,
2811 sizeof(etheranyaddr)) != 0) &&
2812 !ETHER_IS_MULTICAST(data->hw_addr))
2813 return;
2814 }
2815
2816 if (nvm_hw) {
2817 /* Read the mac address from WFMP registers. */
2818 uint32_t mac_addr0 =
2819 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2820 uint32_t mac_addr1 =
2821 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2822
2823 hw_addr = (const uint8_t *)&mac_addr0;
2824 data->hw_addr[0] = hw_addr[3];
2825 data->hw_addr[1] = hw_addr[2];
2826 data->hw_addr[2] = hw_addr[1];
2827 data->hw_addr[3] = hw_addr[0];
2828
2829 hw_addr = (const uint8_t *)&mac_addr1;
2830 data->hw_addr[4] = hw_addr[1];
2831 data->hw_addr[5] = hw_addr[0];
2832
2833 return;
2834 }
2835
2836 aprint_error_dev(sc->sc_dev, "mac address not found\n");
2837 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2838 }
2839
2840 static int
2841 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2842 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2843 const uint16_t *mac_override, const uint16_t *phy_sku,
2844 const uint16_t *regulatory)
2845 {
2846 struct iwm_nvm_data *data = &sc->sc_nvm;
2847 uint8_t hw_addr[ETHER_ADDR_LEN];
2848 uint32_t sku;
2849
2850 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2851 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2852 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2853 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2854 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2855 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2856
2857 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2858 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2859 } else {
2860 uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
2861 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2862 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2863 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2864 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2865 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2866 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2867
2868 data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
2869 sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
2870 }
2871
2872 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2873 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2874 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2875 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2876
2877 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2878
2879 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2880 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2881 data->hw_addr[0] = hw_addr[1];
2882 data->hw_addr[1] = hw_addr[0];
2883 data->hw_addr[2] = hw_addr[3];
2884 data->hw_addr[3] = hw_addr[2];
2885 data->hw_addr[4] = hw_addr[5];
2886 data->hw_addr[5] = hw_addr[4];
2887 } else
2888 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2889
2890 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2891 uint16_t lar_offset, lar_config;
2892 lar_offset = data->nvm_version < 0xE39 ?
2893 IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
2894 lar_config = le16_to_cpup(regulatory + lar_offset);
2895 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
2896 }
2897
2898 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2899 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2900 iwm_nvm_channels, __arraycount(iwm_nvm_channels));
2901 else
2902 iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
2903 iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
2904
2905 data->calib_version = 255; /* TODO:
2906 this value will prevent some checks from
2907 failing, we need to check if this
2908 field is still needed, and if it does,
2909 where is it in the NVM */
2910
2911 return 0;
2912 }
2913
2914 static int
2915 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2916 {
2917 const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2918 const uint16_t *regulatory = NULL;
2919
2920 /* Checking for required sections */
2921 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2922 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2923 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2924 return ENOENT;
2925 }
2926
2927 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2928 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2929 /* SW and REGULATORY sections are mandatory */
2930 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2931 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2932 return ENOENT;
2933 }
2934 /* MAC_OVERRIDE or at least HW section must exist */
2935 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2936 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2937 return ENOENT;
2938 }
2939
2940 /* PHY_SKU section is mandatory in B0 */
2941 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2942 return ENOENT;
2943 }
2944
2945 regulatory = (const uint16_t *)
2946 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2947 hw = (const uint16_t *)
2948 sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2949 mac_override =
2950 (const uint16_t *)
2951 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2952 phy_sku = (const uint16_t *)
2953 sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2954 } else {
2955 panic("unknown device family %d\n", sc->sc_device_family);
2956 }
2957
2958 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2959 calib = (const uint16_t *)
2960 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2961
2962 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2963 phy_sku, regulatory);
2964 }
2965
2966 static int
2967 iwm_nvm_init(struct iwm_softc *sc)
2968 {
2969 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2970 int i, section, err;
2971 uint16_t len;
2972 uint8_t *buf;
2973 const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
2974 IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
2975
2976 /* Read From FW NVM */
2977 DPRINTF(("Read NVM\n"));
2978
2979 memset(nvm_sections, 0, sizeof(nvm_sections));
2980
2981 buf = kmem_alloc(bufsz, KM_SLEEP);
2982 if (buf == NULL)
2983 return ENOMEM;
2984
2985 for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
2986 section = iwm_nvm_to_read[i];
2987 KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
2988
2989 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2990 if (err) {
2991 err = 0;
2992 continue;
2993 }
2994 nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
2995 if (nvm_sections[section].data == NULL) {
2996 err = ENOMEM;
2997 break;
2998 }
2999 memcpy(nvm_sections[section].data, buf, len);
3000 nvm_sections[section].length = len;
3001 }
3002 kmem_free(buf, bufsz);
3003 if (err == 0)
3004 err = iwm_parse_nvm_sections(sc, nvm_sections);
3005
3006 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3007 if (nvm_sections[i].data != NULL)
3008 kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3009 }
3010
3011 return err;
3012 }
3013
3014 static int
3015 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3016 const uint8_t *section, uint32_t byte_cnt)
3017 {
3018 int err = EINVAL;
3019 uint32_t chunk_sz, offset;
3020
3021 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3022
3023 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3024 uint32_t addr, len;
3025 const uint8_t *data;
3026 bool is_extended = false;
3027
3028 addr = dst_addr + offset;
3029 len = MIN(chunk_sz, byte_cnt - offset);
3030 data = section + offset;
3031
3032 if (addr >= IWM_FW_MEM_EXTENDED_START &&
3033 addr <= IWM_FW_MEM_EXTENDED_END)
3034 is_extended = true;
3035
3036 if (is_extended)
3037 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3038 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3039
3040 err = iwm_firmware_load_chunk(sc, addr, data, len);
3041
3042 if (is_extended)
3043 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3044 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3045
3046 if (err)
3047 break;
3048 }
3049
3050 return err;
3051 }
3052
3053 static int
3054 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3055 const uint8_t *section, uint32_t byte_cnt)
3056 {
3057 struct iwm_dma_info *dma = &sc->fw_dma;
3058 int err;
3059
3060 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
3061 memcpy(dma->vaddr, section, byte_cnt);
3062 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3063 BUS_DMASYNC_PREWRITE);
3064
3065 sc->sc_fw_chunk_done = 0;
3066
3067 if (!iwm_nic_lock(sc))
3068 return EBUSY;
3069
3070 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3071 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3072 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3073 dst_addr);
3074 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3075 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3076 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3077 (iwm_get_dma_hi_addr(dma->paddr)
3078 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3079 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3080 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3081 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3082 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3083 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3084 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3085 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3086 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3087
3088 iwm_nic_unlock(sc);
3089
3090 /* Wait for this segment to load. */
3091 err = 0;
3092 while (!sc->sc_fw_chunk_done) {
3093 err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3094 if (err)
3095 break;
3096 }
3097 if (!sc->sc_fw_chunk_done) {
3098 DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3099 DEVNAME(sc), dst_addr, byte_cnt));
3100 }
3101
3102 return err;
3103 }
3104
3105 static int
3106 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3107 {
3108 struct iwm_fw_sects *fws;
3109 int err, i;
3110 void *data;
3111 uint32_t dlen;
3112 uint32_t offset;
3113
3114 fws = &sc->sc_fw.fw_sects[ucode_type];
3115 for (i = 0; i < fws->fw_count; i++) {
3116 data = fws->fw_sect[i].fws_data;
3117 dlen = fws->fw_sect[i].fws_len;
3118 offset = fws->fw_sect[i].fws_devoff;
3119 if (dlen > sc->sc_fwdmasegsz) {
3120 err = EFBIG;
3121 } else
3122 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3123 if (err) {
3124 DPRINTF(("%s: could not load firmware chunk %u of %u\n",
3125 DEVNAME(sc), i, fws->fw_count));
3126 return err;
3127 }
3128 }
3129
3130 IWM_WRITE(sc, IWM_CSR_RESET, 0);
3131
3132 return 0;
3133 }
3134
3135 static int
3136 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3137 int cpu, int *first_ucode_section)
3138 {
3139 int shift_param;
3140 int i, err = 0, sec_num = 0x1;
3141 uint32_t val, last_read_idx = 0;
3142 void *data;
3143 uint32_t dlen;
3144 uint32_t offset;
3145
3146 if (cpu == 1) {
3147 shift_param = 0;
3148 *first_ucode_section = 0;
3149 } else {
3150 shift_param = 16;
3151 (*first_ucode_section)++;
3152 }
3153
3154 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3155 last_read_idx = i;
3156 data = fws->fw_sect[i].fws_data;
3157 dlen = fws->fw_sect[i].fws_len;
3158 offset = fws->fw_sect[i].fws_devoff;
3159
3160 /*
3161 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3162 * CPU1 to CPU2.
3163 * PAGING_SEPARATOR_SECTION delimiter - separate between
3164 * CPU2 non paged to CPU2 paging sec.
3165 */
3166 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3167 offset == IWM_PAGING_SEPARATOR_SECTION)
3168 break;
3169
3170 if (dlen > sc->sc_fwdmasegsz) {
3171 err = EFBIG;
3172 } else
3173 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3174 if (err) {
3175 DPRINTF(("%s: could not load firmware chunk %d "
3176 "(error %d)\n", DEVNAME(sc), i, err));
3177 return err;
3178 }
3179
3180 /* Notify the ucode of the loaded section number and status */
3181 if (iwm_nic_lock(sc)) {
3182 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3183 val = val | (sec_num << shift_param);
3184 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3185 sec_num = (sec_num << 1) | 0x1;
3186 iwm_nic_unlock(sc);
3187
3188 /*
3189 * The firmware won't load correctly without this delay.
3190 */
3191 DELAY(8000);
3192 }
3193 }
3194
3195 *first_ucode_section = last_read_idx;
3196
3197 if (iwm_nic_lock(sc)) {
3198 if (cpu == 1)
3199 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3200 else
3201 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3202 iwm_nic_unlock(sc);
3203 }
3204
3205 return 0;
3206 }
3207
3208 static int
3209 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3210 {
3211 struct iwm_fw_sects *fws;
3212 int err = 0;
3213 int first_ucode_section;
3214
3215 fws = &sc->sc_fw.fw_sects[ucode_type];
3216
3217 /* configure the ucode to be ready to get the secured image */
3218 /* release CPU reset */
3219 if (iwm_nic_lock(sc)) {
3220 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3221 IWM_RELEASE_CPU_RESET_BIT);
3222 iwm_nic_unlock(sc);
3223 }
3224
3225 /* load to FW the binary Secured sections of CPU1 */
3226 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3227 if (err)
3228 return err;
3229
3230 /* load to FW the binary sections of CPU2 */
3231 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3232 }
3233
3234 static int
3235 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3236 {
3237 int err, w;
3238
3239 sc->sc_uc.uc_intr = 0;
3240
3241 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3242 err = iwm_load_firmware_8000(sc, ucode_type);
3243 else
3244 err = iwm_load_firmware_7000(sc, ucode_type);
3245 if (err)
3246 return err;
3247
3248 /* wait for the firmware to load */
3249 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3250 err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3251 if (err || !sc->sc_uc.uc_ok) {
3252 aprint_error_dev(sc->sc_dev,
3253 "could not load firmware (error %d, ok %d)\n",
3254 err, sc->sc_uc.uc_ok);
3255 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3256 aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3257 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3258 aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3259 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3260 }
3261 }
3262
3263 return err;
3264 }
3265
3266 static int
3267 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3268 {
3269 int err;
3270
3271 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3272
3273 err = iwm_nic_init(sc);
3274 if (err) {
3275 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3276 return err;
3277 }
3278
3279 /* make sure rfkill handshake bits are cleared */
3280 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3281 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3282 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3283
3284 /* clear (again), then enable host interrupts */
3285 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3286 iwm_enable_interrupts(sc);
3287
3288 /* really make sure rfkill handshake bits are cleared */
3289 /* maybe we should write a few times more? just to make sure */
3290 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3291 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3292
3293 return iwm_load_firmware(sc, ucode_type);
3294 }
3295
3296 static int
3297 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3298 {
3299 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3300 .valid = htole32(valid_tx_ant),
3301 };
3302
3303 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3304 sizeof(tx_ant_cmd), &tx_ant_cmd);
3305 }
3306
3307 static int
3308 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3309 {
3310 struct iwm_phy_cfg_cmd phy_cfg_cmd;
3311 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3312
3313 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3314 phy_cfg_cmd.calib_control.event_trigger =
3315 sc->sc_default_calib[ucode_type].event_trigger;
3316 phy_cfg_cmd.calib_control.flow_trigger =
3317 sc->sc_default_calib[ucode_type].flow_trigger;
3318
3319 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3320 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3321 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3322 }
3323
3324 static int
3325 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3326 {
3327 enum iwm_ucode_type old_type = sc->sc_uc_current;
3328 int err;
3329
3330 err = iwm_read_firmware(sc, ucode_type);
3331 if (err)
3332 return err;
3333
3334 sc->sc_uc_current = ucode_type;
3335 err = iwm_start_fw(sc, ucode_type);
3336 if (err) {
3337 sc->sc_uc_current = old_type;
3338 return err;
3339 }
3340
3341 return iwm_post_alive(sc);
3342 }
3343
3344 static int
3345 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3346 {
3347 int err;
3348
3349 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3350 aprint_error_dev(sc->sc_dev,
3351 "radio is disabled by hardware switch\n");
3352 return EPERM;
3353 }
3354
3355 sc->sc_init_complete = 0;
3356 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3357 if (err) {
3358 DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3359 return err;
3360 }
3361
3362 if (justnvm) {
3363 err = iwm_nvm_init(sc);
3364 if (err) {
3365 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3366 return err;
3367 }
3368
3369 memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3370 ETHER_ADDR_LEN);
3371 return 0;
3372 }
3373
3374 err = iwm_send_bt_init_conf(sc);
3375 if (err)
3376 return err;
3377
3378 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3379 if (err)
3380 return err;
3381
3382 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3383 if (err)
3384 return err;
3385
3386 /*
3387 * Send phy configurations command to init uCode
3388 * to start the 16.0 uCode init image internal calibrations.
3389 */
3390 err = iwm_send_phy_cfg_cmd(sc);
3391 if (err)
3392 return err;
3393
3394 /*
3395 * Nothing to do but wait for the init complete notification
3396 * from the firmware
3397 */
3398 while (!sc->sc_init_complete) {
3399 err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3400 if (err)
3401 break;
3402 }
3403
3404 return err;
3405 }
3406
3407 static int
3408 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3409 {
3410 struct iwm_rx_ring *ring = &sc->rxq;
3411 struct iwm_rx_data *data = &ring->data[idx];
3412 struct mbuf *m;
3413 int err;
3414 int fatal = 0;
3415
3416 m = m_gethdr(M_DONTWAIT, MT_DATA);
3417 if (m == NULL)
3418 return ENOBUFS;
3419
3420 if (size <= MCLBYTES) {
3421 MCLGET(m, M_DONTWAIT);
3422 } else {
3423 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3424 }
3425 if ((m->m_flags & M_EXT) == 0) {
3426 m_freem(m);
3427 return ENOBUFS;
3428 }
3429
3430 if (data->m != NULL) {
3431 bus_dmamap_unload(sc->sc_dmat, data->map);
3432 fatal = 1;
3433 }
3434
3435 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3436 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3437 BUS_DMA_READ|BUS_DMA_NOWAIT);
3438 if (err) {
3439 /* XXX */
3440 if (fatal)
3441 panic("iwm: could not load RX mbuf");
3442 m_freem(m);
3443 return err;
3444 }
3445 data->m = m;
3446 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3447
3448 /* Update RX descriptor. */
3449 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3450 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3451 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3452
3453 return 0;
3454 }
3455
3456 #define IWM_RSSI_OFFSET 50
3457 static int
3458 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3459 {
3460 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3461 uint32_t agc_a, agc_b;
3462 uint32_t val;
3463
3464 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3465 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3466 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3467
3468 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3469 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3470 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3471
3472 /*
3473 * dBm = rssi dB - agc dB - constant.
3474 * Higher AGC (higher radio gain) means lower signal.
3475 */
3476 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3477 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3478 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3479
3480 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3481 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3482
3483 return max_rssi_dbm;
3484 }
3485
3486 /*
3487 * RSSI values are reported by the FW as positive values - need to negate
3488 * to obtain their dBM. Account for missing antennas by replacing 0
3489 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3490 */
3491 static int
3492 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3493 {
3494 int energy_a, energy_b, energy_c, max_energy;
3495 uint32_t val;
3496
3497 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3498 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3499 IWM_RX_INFO_ENERGY_ANT_A_POS;
3500 energy_a = energy_a ? -energy_a : -256;
3501 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3502 IWM_RX_INFO_ENERGY_ANT_B_POS;
3503 energy_b = energy_b ? -energy_b : -256;
3504 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3505 IWM_RX_INFO_ENERGY_ANT_C_POS;
3506 energy_c = energy_c ? -energy_c : -256;
3507 max_energy = MAX(energy_a, energy_b);
3508 max_energy = MAX(max_energy, energy_c);
3509
3510 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3511 energy_a, energy_b, energy_c, max_energy));
3512
3513 return max_energy;
3514 }
3515
3516 static void
3517 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3518 struct iwm_rx_data *data)
3519 {
3520 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3521
3522 DPRINTFN(20, ("received PHY stats\n"));
3523 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3524 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3525
3526 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3527 }
3528
3529 /*
3530 * Retrieve the average noise (in dBm) among receivers.
3531 */
3532 static int
3533 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3534 {
3535 int i, total, nbant, noise;
3536
3537 total = nbant = noise = 0;
3538 for (i = 0; i < 3; i++) {
3539 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3540 if (noise) {
3541 total += noise;
3542 nbant++;
3543 }
3544 }
3545
3546 /* There should be at least one antenna but check anyway. */
3547 return (nbant == 0) ? -127 : (total / nbant) - 107;
3548 }
3549
3550 static void
3551 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3552 struct iwm_rx_data *data)
3553 {
3554 struct ieee80211com *ic = &sc->sc_ic;
3555 struct ieee80211_frame *wh;
3556 struct ieee80211_node *ni;
3557 struct ieee80211_channel *c = NULL;
3558 struct mbuf *m;
3559 struct iwm_rx_phy_info *phy_info;
3560 struct iwm_rx_mpdu_res_start *rx_res;
3561 int device_timestamp;
3562 uint32_t len;
3563 uint32_t rx_pkt_status;
3564 int rssi;
3565 int s;
3566
3567 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3568 BUS_DMASYNC_POSTREAD);
3569
3570 phy_info = &sc->sc_last_phy_info;
3571 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3572 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3573 len = le16toh(rx_res->byte_count);
3574 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3575 sizeof(*rx_res) + len));
3576
3577 m = data->m;
3578 m->m_data = pkt->data + sizeof(*rx_res);
3579 m->m_pkthdr.len = m->m_len = len;
3580
3581 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3582 DPRINTF(("dsp size out of range [0,20]: %d\n",
3583 phy_info->cfg_phy_cnt));
3584 return;
3585 }
3586
3587 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3588 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3589 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3590 return; /* drop */
3591 }
3592
3593 device_timestamp = le32toh(phy_info->system_timestamp);
3594
3595 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3596 rssi = iwm_get_signal_strength(sc, phy_info);
3597 } else {
3598 rssi = iwm_calc_rssi(sc, phy_info);
3599 }
3600 rssi = -rssi;
3601
3602 if (ic->ic_state == IEEE80211_S_SCAN)
3603 iwm_fix_channel(sc, m);
3604
3605 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3606 return;
3607
3608 m_set_rcvif(m, IC2IFP(ic));
3609
3610 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3611 c = &ic->ic_channels[le32toh(phy_info->channel)];
3612
3613 s = splnet();
3614
3615 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3616 if (c)
3617 ni->ni_chan = c;
3618
3619 if (__predict_false(sc->sc_drvbpf != NULL)) {
3620 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3621
3622 tap->wr_flags = 0;
3623 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3624 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3625 tap->wr_chan_freq =
3626 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3627 tap->wr_chan_flags =
3628 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3629 tap->wr_dbm_antsignal = (int8_t)rssi;
3630 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3631 tap->wr_tsft = phy_info->system_timestamp;
3632 if (phy_info->phy_flags &
3633 htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3634 uint8_t mcs = (phy_info->rate_n_flags &
3635 htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3636 IWM_RATE_HT_MCS_NSS_MSK));
3637 tap->wr_rate = (0x80 | mcs);
3638 } else {
3639 uint8_t rate = (phy_info->rate_n_flags &
3640 htole32(IWM_RATE_LEGACY_RATE_MSK));
3641 switch (rate) {
3642 /* CCK rates. */
3643 case 10: tap->wr_rate = 2; break;
3644 case 20: tap->wr_rate = 4; break;
3645 case 55: tap->wr_rate = 11; break;
3646 case 110: tap->wr_rate = 22; break;
3647 /* OFDM rates. */
3648 case 0xd: tap->wr_rate = 12; break;
3649 case 0xf: tap->wr_rate = 18; break;
3650 case 0x5: tap->wr_rate = 24; break;
3651 case 0x7: tap->wr_rate = 36; break;
3652 case 0x9: tap->wr_rate = 48; break;
3653 case 0xb: tap->wr_rate = 72; break;
3654 case 0x1: tap->wr_rate = 96; break;
3655 case 0x3: tap->wr_rate = 108; break;
3656 /* Unknown rate: should not happen. */
3657 default: tap->wr_rate = 0;
3658 }
3659 }
3660
3661 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3662 }
3663 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3664 ieee80211_free_node(ni);
3665
3666 splx(s);
3667 }
3668
3669 static void
3670 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3671 struct iwm_node *in)
3672 {
3673 struct ieee80211com *ic = &sc->sc_ic;
3674 struct ifnet *ifp = IC2IFP(ic);
3675 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3676 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3677 int failack = tx_resp->failure_frame;
3678
3679 KASSERT(tx_resp->frame_count == 1);
3680
3681 /* Update rate control statistics. */
3682 in->in_amn.amn_txcnt++;
3683 if (failack > 0) {
3684 in->in_amn.amn_retrycnt++;
3685 }
3686
3687 if (status != IWM_TX_STATUS_SUCCESS &&
3688 status != IWM_TX_STATUS_DIRECT_DONE)
3689 ifp->if_oerrors++;
3690 else
3691 ifp->if_opackets++;
3692 }
3693
3694 static void
3695 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3696 struct iwm_rx_data *data)
3697 {
3698 struct ieee80211com *ic = &sc->sc_ic;
3699 struct ifnet *ifp = IC2IFP(ic);
3700 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3701 int idx = cmd_hdr->idx;
3702 int qid = cmd_hdr->qid;
3703 struct iwm_tx_ring *ring = &sc->txq[qid];
3704 struct iwm_tx_data *txd = &ring->data[idx];
3705 struct iwm_node *in = txd->in;
3706 int s;
3707
3708 s = splnet();
3709
3710 if (txd->done) {
3711 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3712 DEVNAME(sc)));
3713 splx(s);
3714 return;
3715 }
3716
3717 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3718 BUS_DMASYNC_POSTREAD);
3719
3720 sc->sc_tx_timer = 0;
3721
3722 iwm_rx_tx_cmd_single(sc, pkt, in);
3723
3724 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3725 BUS_DMASYNC_POSTWRITE);
3726 bus_dmamap_unload(sc->sc_dmat, txd->map);
3727 m_freem(txd->m);
3728
3729 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3730 KASSERT(txd->done == 0);
3731 txd->done = 1;
3732 KASSERT(txd->in);
3733
3734 txd->m = NULL;
3735 txd->in = NULL;
3736 ieee80211_free_node(&in->in_ni);
3737
3738 if (--ring->queued < IWM_TX_RING_LOMARK) {
3739 sc->qfullmsk &= ~(1 << qid);
3740 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3741 ifp->if_flags &= ~IFF_OACTIVE;
3742 if_start_lock(ifp);
3743 }
3744 }
3745
3746 splx(s);
3747 }
3748
3749 static int
3750 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3751 {
3752 struct iwm_binding_cmd cmd;
3753 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3754 int i, err;
3755 uint32_t status;
3756
3757 memset(&cmd, 0, sizeof(cmd));
3758
3759 cmd.id_and_color
3760 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3761 cmd.action = htole32(action);
3762 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3763
3764 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3765 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3766 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3767
3768 status = 0;
3769 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3770 sizeof(cmd), &cmd, &status);
3771 if (err == 0 && status != 0)
3772 err = EIO;
3773
3774 return err;
3775 }
3776
3777 static void
3778 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3779 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3780 {
3781 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3782
3783 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3784 ctxt->color));
3785 cmd->action = htole32(action);
3786 cmd->apply_time = htole32(apply_time);
3787 }
3788
3789 static void
3790 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3791 struct ieee80211_channel *chan, uint8_t chains_static,
3792 uint8_t chains_dynamic)
3793 {
3794 struct ieee80211com *ic = &sc->sc_ic;
3795 uint8_t active_cnt, idle_cnt;
3796
3797 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3798 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3799
3800 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3801 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3802 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3803
3804 /* Set rx the chains */
3805 idle_cnt = chains_static;
3806 active_cnt = chains_dynamic;
3807
3808 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3809 IWM_PHY_RX_CHAIN_VALID_POS);
3810 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3811 cmd->rxchain_info |= htole32(active_cnt <<
3812 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3813
3814 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3815 }
3816
3817 static int
3818 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3819 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3820 uint32_t apply_time)
3821 {
3822 struct iwm_phy_context_cmd cmd;
3823
3824 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3825
3826 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3827 chains_static, chains_dynamic);
3828
3829 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3830 sizeof(struct iwm_phy_context_cmd), &cmd);
3831 }
3832
3833 static int
3834 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3835 {
3836 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3837 struct iwm_tfd *desc;
3838 struct iwm_tx_data *txdata;
3839 struct iwm_device_cmd *cmd;
3840 struct mbuf *m;
3841 bus_addr_t paddr;
3842 uint32_t addr_lo;
3843 int err = 0, i, paylen, off, s;
3844 int code;
3845 int async, wantresp;
3846 int group_id;
3847 size_t hdrlen, datasz;
3848 uint8_t *data;
3849
3850 code = hcmd->id;
3851 async = hcmd->flags & IWM_CMD_ASYNC;
3852 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3853
3854 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3855 paylen += hcmd->len[i];
3856 }
3857
3858 /* if the command wants an answer, busy sc_cmd_resp */
3859 if (wantresp) {
3860 KASSERT(!async);
3861 while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3862 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3863 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3864 }
3865
3866 /*
3867 * Is the hardware still available? (after e.g. above wait).
3868 */
3869 s = splnet();
3870 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3871 err = ENXIO;
3872 goto out;
3873 }
3874
3875 desc = &ring->desc[ring->cur];
3876 txdata = &ring->data[ring->cur];
3877
3878 group_id = iwm_cmd_groupid(code);
3879 if (group_id != 0) {
3880 hdrlen = sizeof(cmd->hdr_wide);
3881 datasz = sizeof(cmd->data_wide);
3882 } else {
3883 hdrlen = sizeof(cmd->hdr);
3884 datasz = sizeof(cmd->data);
3885 }
3886
3887 if (paylen > datasz) {
3888 /* Command is too large to fit in pre-allocated space. */
3889 size_t totlen = hdrlen + paylen;
3890 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3891 aprint_error_dev(sc->sc_dev,
3892 "firmware command too long (%zd bytes)\n", totlen);
3893 err = EINVAL;
3894 goto out;
3895 }
3896 m = m_gethdr(M_DONTWAIT, MT_DATA);
3897 if (m == NULL) {
3898 err = ENOMEM;
3899 goto out;
3900 }
3901 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3902 if (!(m->m_flags & M_EXT)) {
3903 aprint_error_dev(sc->sc_dev,
3904 "could not get fw cmd mbuf (%zd bytes)\n", totlen);
3905 m_freem(m);
3906 err = ENOMEM;
3907 goto out;
3908 }
3909 cmd = mtod(m, struct iwm_device_cmd *);
3910 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3911 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3912 if (err) {
3913 aprint_error_dev(sc->sc_dev,
3914 "could not load fw cmd mbuf (%zd bytes)\n", totlen);
3915 m_freem(m);
3916 goto out;
3917 }
3918 txdata->m = m;
3919 paddr = txdata->map->dm_segs[0].ds_addr;
3920 } else {
3921 cmd = &ring->cmd[ring->cur];
3922 paddr = txdata->cmd_paddr;
3923 }
3924
3925 if (group_id != 0) {
3926 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3927 cmd->hdr_wide.group_id = group_id;
3928 cmd->hdr_wide.qid = ring->qid;
3929 cmd->hdr_wide.idx = ring->cur;
3930 cmd->hdr_wide.length = htole16(paylen);
3931 cmd->hdr_wide.version = iwm_cmd_version(code);
3932 data = cmd->data_wide;
3933 } else {
3934 cmd->hdr.code = code;
3935 cmd->hdr.flags = 0;
3936 cmd->hdr.qid = ring->qid;
3937 cmd->hdr.idx = ring->cur;
3938 data = cmd->data;
3939 }
3940
3941 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3942 if (hcmd->len[i] == 0)
3943 continue;
3944 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3945 off += hcmd->len[i];
3946 }
3947 KASSERT(off == paylen);
3948
3949 /* lo field is not aligned */
3950 addr_lo = htole32((uint32_t)paddr);
3951 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3952 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3953 | ((hdrlen + paylen) << 4));
3954 desc->num_tbs = 1;
3955
3956 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3957 code, hdrlen + paylen, async ? " (async)" : ""));
3958
3959 if (paylen > datasz) {
3960 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
3961 BUS_DMASYNC_PREWRITE);
3962 } else {
3963 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3964 (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
3965 BUS_DMASYNC_PREWRITE);
3966 }
3967 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3968 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
3969 BUS_DMASYNC_PREWRITE);
3970
3971 err = iwm_set_cmd_in_flight(sc);
3972 if (err)
3973 goto out;
3974 ring->queued++;
3975
3976 #if 0
3977 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3978 #endif
3979 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3980 code, ring->qid, ring->cur));
3981
3982 /* Kick command ring. */
3983 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3984 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3985
3986 if (!async) {
3987 int generation = sc->sc_generation;
3988 err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
3989 if (err == 0) {
3990 /* if hardware is no longer up, return error */
3991 if (generation != sc->sc_generation) {
3992 err = ENXIO;
3993 } else {
3994 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3995 }
3996 }
3997 }
3998 out:
3999 if (wantresp && err) {
4000 iwm_free_resp(sc, hcmd);
4001 }
4002 splx(s);
4003
4004 return err;
4005 }
4006
4007 static int
4008 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4009 uint16_t len, const void *data)
4010 {
4011 struct iwm_host_cmd cmd = {
4012 .id = id,
4013 .len = { len, },
4014 .data = { data, },
4015 .flags = flags,
4016 };
4017
4018 return iwm_send_cmd(sc, &cmd);
4019 }
4020
4021 static int
4022 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4023 uint32_t *status)
4024 {
4025 struct iwm_rx_packet *pkt;
4026 struct iwm_cmd_response *resp;
4027 int err, resp_len;
4028
4029 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4030 cmd->flags |= IWM_CMD_WANT_SKB;
4031
4032 err = iwm_send_cmd(sc, cmd);
4033 if (err)
4034 return err;
4035 pkt = cmd->resp_pkt;
4036
4037 /* Can happen if RFKILL is asserted */
4038 if (!pkt) {
4039 err = 0;
4040 goto out_free_resp;
4041 }
4042
4043 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4044 err = EIO;
4045 goto out_free_resp;
4046 }
4047
4048 resp_len = iwm_rx_packet_payload_len(pkt);
4049 if (resp_len != sizeof(*resp)) {
4050 err = EIO;
4051 goto out_free_resp;
4052 }
4053
4054 resp = (void *)pkt->data;
4055 *status = le32toh(resp->status);
4056 out_free_resp:
4057 iwm_free_resp(sc, cmd);
4058 return err;
4059 }
4060
4061 static int
4062 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4063 const void *data, uint32_t *status)
4064 {
4065 struct iwm_host_cmd cmd = {
4066 .id = id,
4067 .len = { len, },
4068 .data = { data, },
4069 };
4070
4071 return iwm_send_cmd_status(sc, &cmd, status);
4072 }
4073
4074 static void
4075 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4076 {
4077 KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4078 KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4079 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4080 wakeup(&sc->sc_wantresp);
4081 }
4082
4083 static void
4084 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4085 {
4086 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4087 struct iwm_tx_data *data;
4088 int s;
4089
4090 if (qid != IWM_CMD_QUEUE) {
4091 return; /* Not a command ack. */
4092 }
4093
4094 s = splnet();
4095
4096 data = &ring->data[idx];
4097
4098 if (data->m != NULL) {
4099 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4100 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4101 bus_dmamap_unload(sc->sc_dmat, data->map);
4102 m_freem(data->m);
4103 data->m = NULL;
4104 }
4105 wakeup(&ring->desc[idx]);
4106
4107 if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4108 aprint_error_dev(sc->sc_dev,
4109 "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4110 idx, ring->queued, ring->cur);
4111 }
4112
4113 KASSERT(ring->queued > 0);
4114 if (--ring->queued == 0)
4115 iwm_clear_cmd_in_flight(sc);
4116
4117 splx(s);
4118 }
4119
4120 #if 0
4121 /*
4122 * necessary only for block ack mode
4123 */
4124 void
4125 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4126 uint16_t len)
4127 {
4128 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4129 uint16_t w_val;
4130
4131 scd_bc_tbl = sc->sched_dma.vaddr;
4132
4133 len += 8; /* magic numbers came naturally from paris */
4134 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4135 len = roundup(len, 4) / 4;
4136
4137 w_val = htole16(sta_id << 12 | len);
4138
4139 /* Update TX scheduler. */
4140 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4141 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4142 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4143 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4144
4145 /* I really wonder what this is ?!? */
4146 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4147 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4148 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4149 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4150 (char *)(void *)sc->sched_dma.vaddr,
4151 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4152 }
4153 }
4154 #endif
4155
4156 /*
4157 * Fill in various bit for management frames, and leave them
4158 * unfilled for data frames (firmware takes care of that).
4159 * Return the selected TX rate.
4160 */
4161 static const struct iwm_rate *
4162 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4163 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4164 {
4165 struct ieee80211com *ic = &sc->sc_ic;
4166 struct ieee80211_node *ni = &in->in_ni;
4167 const struct iwm_rate *rinfo;
4168 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4169 int ridx, rate_flags, i, ind;
4170 int nrates = ni->ni_rates.rs_nrates;
4171
4172 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4173 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4174
4175 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4176 type != IEEE80211_FC0_TYPE_DATA) {
4177 /* for non-data, use the lowest supported rate */
4178 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4179 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4180 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4181 #ifndef IEEE80211_NO_HT
4182 } else if (ic->ic_fixed_mcs != -1) {
4183 ridx = sc->sc_fixed_ridx;
4184 #endif
4185 } else if (ic->ic_fixed_rate != -1) {
4186 ridx = sc->sc_fixed_ridx;
4187 } else {
4188 /* for data frames, use RS table */
4189 tx->initial_rate_index = 0;
4190 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4191 DPRINTFN(12, ("start with txrate %d\n",
4192 tx->initial_rate_index));
4193 #ifndef IEEE80211_NO_HT
4194 if (ni->ni_flags & IEEE80211_NODE_HT) {
4195 ridx = iwm_mcs2ridx[ni->ni_txmcs];
4196 return &iwm_rates[ridx];
4197 }
4198 #endif
4199 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4200 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4201 for (i = 0; i < nrates; i++) {
4202 if (iwm_rates[i].rate == (ni->ni_txrate &
4203 IEEE80211_RATE_VAL)) {
4204 ridx = i;
4205 break;
4206 }
4207 }
4208 return &iwm_rates[ridx];
4209 }
4210
4211 rinfo = &iwm_rates[ridx];
4212 for (i = 0, ind = sc->sc_mgmt_last_antenna;
4213 i < IWM_RATE_MCS_ANT_NUM; i++) {
4214 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4215 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4216 sc->sc_mgmt_last_antenna = ind;
4217 break;
4218 }
4219 }
4220 rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4221 if (IWM_RIDX_IS_CCK(ridx))
4222 rate_flags |= IWM_RATE_MCS_CCK_MSK;
4223 #ifndef IEEE80211_NO_HT
4224 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4225 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4226 rate_flags |= IWM_RATE_MCS_HT_MSK;
4227 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4228 } else
4229 #endif
4230 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4231
4232 return rinfo;
4233 }
4234
4235 #define TB0_SIZE 16
4236 static int
4237 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4238 {
4239 struct ieee80211com *ic = &sc->sc_ic;
4240 struct iwm_node *in = (struct iwm_node *)ni;
4241 struct iwm_tx_ring *ring;
4242 struct iwm_tx_data *data;
4243 struct iwm_tfd *desc;
4244 struct iwm_device_cmd *cmd;
4245 struct iwm_tx_cmd *tx;
4246 struct ieee80211_frame *wh;
4247 struct ieee80211_key *k = NULL;
4248 struct mbuf *m1;
4249 const struct iwm_rate *rinfo;
4250 uint32_t flags;
4251 u_int hdrlen;
4252 bus_dma_segment_t *seg;
4253 uint8_t tid, type;
4254 int i, totlen, err, pad;
4255
4256 wh = mtod(m, struct ieee80211_frame *);
4257 hdrlen = ieee80211_anyhdrsize(wh);
4258 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4259
4260 tid = 0;
4261
4262 ring = &sc->txq[ac];
4263 desc = &ring->desc[ring->cur];
4264 memset(desc, 0, sizeof(*desc));
4265 data = &ring->data[ring->cur];
4266
4267 cmd = &ring->cmd[ring->cur];
4268 cmd->hdr.code = IWM_TX_CMD;
4269 cmd->hdr.flags = 0;
4270 cmd->hdr.qid = ring->qid;
4271 cmd->hdr.idx = ring->cur;
4272
4273 tx = (void *)cmd->data;
4274 memset(tx, 0, sizeof(*tx));
4275
4276 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4277
4278 if (__predict_false(sc->sc_drvbpf != NULL)) {
4279 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4280
4281 tap->wt_flags = 0;
4282 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4283 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4284 #ifndef IEEE80211_NO_HT
4285 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4286 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4287 type == IEEE80211_FC0_TYPE_DATA &&
4288 rinfo->plcp == IWM_RATE_INVM_PLCP) {
4289 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4290 } else
4291 #endif
4292 tap->wt_rate = rinfo->rate;
4293 tap->wt_hwqueue = ac;
4294 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4295 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4296
4297 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4298 }
4299
4300 /* Encrypt the frame if need be. */
4301 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4302 k = ieee80211_crypto_encap(ic, ni, m);
4303 if (k == NULL) {
4304 m_freem(m);
4305 return ENOBUFS;
4306 }
4307 /* Packet header may have moved, reset our local pointer. */
4308 wh = mtod(m, struct ieee80211_frame *);
4309 }
4310 totlen = m->m_pkthdr.len;
4311
4312 flags = 0;
4313 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4314 flags |= IWM_TX_CMD_FLG_ACK;
4315 }
4316
4317 if (type == IEEE80211_FC0_TYPE_DATA &&
4318 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4319 (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4320 (ic->ic_flags & IEEE80211_F_USEPROT)))
4321 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4322
4323 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4324 type != IEEE80211_FC0_TYPE_DATA)
4325 tx->sta_id = IWM_AUX_STA_ID;
4326 else
4327 tx->sta_id = IWM_STATION_ID;
4328
4329 if (type == IEEE80211_FC0_TYPE_MGT) {
4330 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4331
4332 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4333 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4334 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4335 else
4336 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4337 } else {
4338 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4339 }
4340
4341 if (hdrlen & 3) {
4342 /* First segment length must be a multiple of 4. */
4343 flags |= IWM_TX_CMD_FLG_MH_PAD;
4344 pad = 4 - (hdrlen & 3);
4345 } else
4346 pad = 0;
4347
4348 tx->driver_txop = 0;
4349 tx->next_frame_len = 0;
4350
4351 tx->len = htole16(totlen);
4352 tx->tid_tspec = tid;
4353 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4354
4355 /* Set physical address of "scratch area". */
4356 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4357 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4358
4359 /* Copy 802.11 header in TX command. */
4360 memcpy(tx + 1, wh, hdrlen);
4361
4362 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4363
4364 tx->sec_ctl = 0;
4365 tx->tx_flags |= htole32(flags);
4366
4367 /* Trim 802.11 header. */
4368 m_adj(m, hdrlen);
4369
4370 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4371 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4372 if (err) {
4373 if (err != EFBIG) {
4374 aprint_error_dev(sc->sc_dev,
4375 "can't map mbuf (error %d)\n", err);
4376 m_freem(m);
4377 return err;
4378 }
4379 /* Too many DMA segments, linearize mbuf. */
4380 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4381 if (m1 == NULL) {
4382 m_freem(m);
4383 return ENOBUFS;
4384 }
4385 if (m->m_pkthdr.len > MHLEN) {
4386 MCLGET(m1, M_DONTWAIT);
4387 if (!(m1->m_flags & M_EXT)) {
4388 m_freem(m);
4389 m_freem(m1);
4390 return ENOBUFS;
4391 }
4392 }
4393 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4394 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4395 m_freem(m);
4396 m = m1;
4397
4398 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4399 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4400 if (err) {
4401 aprint_error_dev(sc->sc_dev,
4402 "can't map mbuf (error %d)\n", err);
4403 m_freem(m);
4404 return err;
4405 }
4406 }
4407 data->m = m;
4408 data->in = in;
4409 data->done = 0;
4410
4411 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4412 KASSERT(data->in != NULL);
4413
4414 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4415 "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4416 ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4417 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4418 le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4419 le32toh(tx->rate_n_flags)));
4420
4421 /* Fill TX descriptor. */
4422 desc->num_tbs = 2 + data->map->dm_nsegs;
4423
4424 desc->tbs[0].lo = htole32(data->cmd_paddr);
4425 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4426 (TB0_SIZE << 4);
4427 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4428 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4429 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4430 + hdrlen + pad - TB0_SIZE) << 4);
4431
4432 /* Other DMA segments are for data payload. */
4433 seg = data->map->dm_segs;
4434 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4435 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4436 desc->tbs[i+2].hi_n_len =
4437 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4438 | ((seg->ds_len) << 4);
4439 }
4440
4441 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4442 BUS_DMASYNC_PREWRITE);
4443 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4444 (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4445 BUS_DMASYNC_PREWRITE);
4446 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4447 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4448 BUS_DMASYNC_PREWRITE);
4449
4450 #if 0
4451 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4452 le16toh(tx->len));
4453 #endif
4454
4455 /* Kick TX ring. */
4456 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4457 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4458
4459 /* Mark TX ring as full if we reach a certain threshold. */
4460 if (++ring->queued > IWM_TX_RING_HIMARK) {
4461 sc->qfullmsk |= 1 << ring->qid;
4462 }
4463
4464 return 0;
4465 }
4466
4467 #if 0
4468 /* not necessary? */
4469 static int
4470 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4471 {
4472 struct iwm_tx_path_flush_cmd flush_cmd = {
4473 .queues_ctl = htole32(tfd_msk),
4474 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4475 };
4476 int err;
4477
4478 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4479 sizeof(flush_cmd), &flush_cmd);
4480 if (err)
4481 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4482 err);
4483 return err;
4484 }
4485 #endif
4486
4487 static void
4488 iwm_led_enable(struct iwm_softc *sc)
4489 {
4490 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4491 }
4492
4493 static void
4494 iwm_led_disable(struct iwm_softc *sc)
4495 {
4496 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4497 }
4498
4499 static int
4500 iwm_led_is_enabled(struct iwm_softc *sc)
4501 {
4502 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4503 }
4504
4505 static void
4506 iwm_led_blink_timeout(void *arg)
4507 {
4508 struct iwm_softc *sc = arg;
4509
4510 if (iwm_led_is_enabled(sc))
4511 iwm_led_disable(sc);
4512 else
4513 iwm_led_enable(sc);
4514
4515 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4516 }
4517
4518 static void
4519 iwm_led_blink_start(struct iwm_softc *sc)
4520 {
4521 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4522 }
4523
4524 static void
4525 iwm_led_blink_stop(struct iwm_softc *sc)
4526 {
4527 callout_stop(&sc->sc_led_blink_to);
4528 iwm_led_disable(sc);
4529 }
4530
4531 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4532
4533 static int
4534 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4535 struct iwm_beacon_filter_cmd *cmd)
4536 {
4537 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4538 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4539 }
4540
4541 static void
4542 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4543 struct iwm_beacon_filter_cmd *cmd)
4544 {
4545 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4546 }
4547
4548 static int
4549 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4550 {
4551 struct iwm_beacon_filter_cmd cmd = {
4552 IWM_BF_CMD_CONFIG_DEFAULTS,
4553 .bf_enable_beacon_filter = htole32(1),
4554 .ba_enable_beacon_abort = htole32(enable),
4555 };
4556
4557 if (!sc->sc_bf.bf_enabled)
4558 return 0;
4559
4560 sc->sc_bf.ba_enabled = enable;
4561 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4562 return iwm_beacon_filter_send_cmd(sc, &cmd);
4563 }
4564
4565 static void
4566 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4567 struct iwm_mac_power_cmd *cmd)
4568 {
4569 struct ieee80211_node *ni = &in->in_ni;
4570 int dtim_period, dtim_msec, keep_alive;
4571
4572 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4573 in->in_color));
4574 if (ni->ni_dtim_period)
4575 dtim_period = ni->ni_dtim_period;
4576 else
4577 dtim_period = 1;
4578
4579 /*
4580 * Regardless of power management state the driver must set
4581 * keep alive period. FW will use it for sending keep alive NDPs
4582 * immediately after association. Check that keep alive period
4583 * is at least 3 * DTIM.
4584 */
4585 dtim_msec = dtim_period * ni->ni_intval;
4586 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4587 keep_alive = roundup(keep_alive, 1000) / 1000;
4588 cmd->keep_alive_seconds = htole16(keep_alive);
4589
4590 #ifdef notyet
4591 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4592 cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4593 cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4594 #endif
4595 }
4596
4597 static int
4598 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4599 {
4600 int err;
4601 int ba_enable;
4602 struct iwm_mac_power_cmd cmd;
4603
4604 memset(&cmd, 0, sizeof(cmd));
4605
4606 iwm_power_build_cmd(sc, in, &cmd);
4607
4608 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4609 sizeof(cmd), &cmd);
4610 if (err)
4611 return err;
4612
4613 ba_enable = !!(cmd.flags &
4614 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4615 return iwm_update_beacon_abort(sc, in, ba_enable);
4616 }
4617
4618 static int
4619 iwm_power_update_device(struct iwm_softc *sc)
4620 {
4621 struct iwm_device_power_cmd cmd = {
4622 #ifdef notyet
4623 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4624 #else
4625 .flags = 0,
4626 #endif
4627 };
4628
4629 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4630 return 0;
4631
4632 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4633 DPRINTF(("Sending device power command with flags = 0x%X\n",
4634 cmd.flags));
4635
4636 return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4637 }
4638
4639 #ifdef notyet
4640 static int
4641 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4642 {
4643 struct iwm_beacon_filter_cmd cmd = {
4644 IWM_BF_CMD_CONFIG_DEFAULTS,
4645 .bf_enable_beacon_filter = htole32(1),
4646 };
4647 int err;
4648
4649 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4650 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4651
4652 if (err == 0)
4653 sc->sc_bf.bf_enabled = 1;
4654
4655 return err;
4656 }
4657 #endif
4658
4659 static int
4660 iwm_disable_beacon_filter(struct iwm_softc *sc)
4661 {
4662 struct iwm_beacon_filter_cmd cmd;
4663 int err;
4664
4665 memset(&cmd, 0, sizeof(cmd));
4666 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4667 return 0;
4668
4669 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4670 if (err == 0)
4671 sc->sc_bf.bf_enabled = 0;
4672
4673 return err;
4674 }
4675
4676 static int
4677 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4678 {
4679 struct iwm_add_sta_cmd_v7 add_sta_cmd;
4680 int err;
4681 uint32_t status;
4682
4683 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4684
4685 add_sta_cmd.sta_id = IWM_STATION_ID;
4686 add_sta_cmd.mac_id_n_color
4687 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4688 if (!update) {
4689 int ac;
4690 for (ac = 0; ac < WME_NUM_AC; ac++) {
4691 add_sta_cmd.tfd_queue_msk |=
4692 htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
4693 }
4694 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4695 }
4696 add_sta_cmd.add_modify = update ? 1 : 0;
4697 add_sta_cmd.station_flags_msk
4698 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4699 add_sta_cmd.tid_disable_tx = htole16(0xffff);
4700 if (update)
4701 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4702
4703 #ifndef IEEE80211_NO_HT
4704 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4705 add_sta_cmd.station_flags_msk
4706 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4707 IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4708
4709 add_sta_cmd.station_flags
4710 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4711 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4712 case IEEE80211_AMPDU_PARAM_SS_2:
4713 add_sta_cmd.station_flags
4714 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4715 break;
4716 case IEEE80211_AMPDU_PARAM_SS_4:
4717 add_sta_cmd.station_flags
4718 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4719 break;
4720 case IEEE80211_AMPDU_PARAM_SS_8:
4721 add_sta_cmd.station_flags
4722 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4723 break;
4724 case IEEE80211_AMPDU_PARAM_SS_16:
4725 add_sta_cmd.station_flags
4726 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4727 break;
4728 default:
4729 break;
4730 }
4731 }
4732 #endif
4733
4734 status = IWM_ADD_STA_SUCCESS;
4735 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4736 &add_sta_cmd, &status);
4737 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4738 err = EIO;
4739
4740 return err;
4741 }
4742
4743 static int
4744 iwm_add_aux_sta(struct iwm_softc *sc)
4745 {
4746 struct iwm_add_sta_cmd_v7 cmd;
4747 int err;
4748 uint32_t status;
4749
4750 err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4751 if (err)
4752 return err;
4753
4754 memset(&cmd, 0, sizeof(cmd));
4755 cmd.sta_id = IWM_AUX_STA_ID;
4756 cmd.mac_id_n_color =
4757 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4758 cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4759 cmd.tid_disable_tx = htole16(0xffff);
4760
4761 status = IWM_ADD_STA_SUCCESS;
4762 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4763 &status);
4764 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4765 err = EIO;
4766
4767 return err;
4768 }
4769
4770 #define IWM_PLCP_QUIET_THRESH 1
4771 #define IWM_ACTIVE_QUIET_TIME 10
4772 #define LONG_OUT_TIME_PERIOD 600
4773 #define SHORT_OUT_TIME_PERIOD 200
4774 #define SUSPEND_TIME_PERIOD 100
4775
4776 static uint16_t
4777 iwm_scan_rx_chain(struct iwm_softc *sc)
4778 {
4779 uint16_t rx_chain;
4780 uint8_t rx_ant;
4781
4782 rx_ant = iwm_fw_valid_rx_ant(sc);
4783 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4784 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4785 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4786 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4787 return htole16(rx_chain);
4788 }
4789
4790 static uint32_t
4791 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4792 {
4793 uint32_t tx_ant;
4794 int i, ind;
4795
4796 for (i = 0, ind = sc->sc_scan_last_antenna;
4797 i < IWM_RATE_MCS_ANT_NUM; i++) {
4798 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4799 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4800 sc->sc_scan_last_antenna = ind;
4801 break;
4802 }
4803 }
4804 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4805
4806 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4807 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4808 tx_ant);
4809 else
4810 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4811 }
4812
4813 #ifdef notyet
4814 /*
4815 * If req->n_ssids > 0, it means we should do an active scan.
4816 * In case of active scan w/o directed scan, we receive a zero-length SSID
4817 * just to notify that this scan is active and not passive.
4818 * In order to notify the FW of the number of SSIDs we wish to scan (including
4819 * the zero-length one), we need to set the corresponding bits in chan->type,
4820 * one for each SSID, and set the active bit (first). If the first SSID is
4821 * already included in the probe template, so we need to set only
4822 * req->n_ssids - 1 bits in addition to the first bit.
4823 */
4824 static uint16_t
4825 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4826 {
4827 if (flags & IEEE80211_CHAN_2GHZ)
4828 return 30 + 3 * (n_ssids + 1);
4829 return 20 + 2 * (n_ssids + 1);
4830 }
4831
4832 static uint16_t
4833 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4834 {
4835 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4836 }
4837 #endif
4838
4839 static uint8_t
4840 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4841 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4842 {
4843 struct ieee80211com *ic = &sc->sc_ic;
4844 struct ieee80211_channel *c;
4845 uint8_t nchan;
4846
4847 for (nchan = 0, c = &ic->ic_channels[1];
4848 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4849 nchan < sc->sc_capa_n_scan_channels;
4850 c++) {
4851 if (c->ic_flags == 0)
4852 continue;
4853
4854 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4855 chan->iter_count = htole16(1);
4856 chan->iter_interval = htole32(0);
4857 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4858 chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
4859 if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
4860 chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4861 chan++;
4862 nchan++;
4863 }
4864
4865 return nchan;
4866 }
4867
4868 static uint8_t
4869 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4870 struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4871 {
4872 struct ieee80211com *ic = &sc->sc_ic;
4873 struct ieee80211_channel *c;
4874 uint8_t nchan;
4875
4876 for (nchan = 0, c = &ic->ic_channels[1];
4877 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4878 nchan < sc->sc_capa_n_scan_channels;
4879 c++) {
4880 if (c->ic_flags == 0)
4881 continue;
4882
4883 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4884 chan->iter_count = 1;
4885 chan->iter_interval = htole16(0);
4886 chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
4887 chan++;
4888 nchan++;
4889 }
4890
4891 return nchan;
4892 }
4893
4894 static int
4895 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4896 {
4897 struct ieee80211com *ic = &sc->sc_ic;
4898 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4899 struct ieee80211_rateset *rs;
4900 size_t remain = sizeof(preq->buf);
4901 uint8_t *frm, *pos;
4902
4903 memset(preq, 0, sizeof(*preq));
4904
4905 if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4906 return ENOBUFS;
4907
4908 /*
4909 * Build a probe request frame. Most of the following code is a
4910 * copy & paste of what is done in net80211.
4911 */
4912 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4913 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4914 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4915 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4916 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4917 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4918 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4919 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4920
4921 frm = (uint8_t *)(wh + 1);
4922 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4923
4924 /* Tell the firmware where the MAC header is. */
4925 preq->mac_header.offset = 0;
4926 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4927 remain -= frm - (uint8_t *)wh;
4928
4929 /* Fill in 2GHz IEs and tell firmware where they are. */
4930 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4931 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4932 if (remain < 4 + rs->rs_nrates)
4933 return ENOBUFS;
4934 } else if (remain < 2 + rs->rs_nrates)
4935 return ENOBUFS;
4936 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4937 pos = frm;
4938 frm = ieee80211_add_rates(frm, rs);
4939 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4940 frm = ieee80211_add_xrates(frm, rs);
4941 preq->band_data[0].len = htole16(frm - pos);
4942 remain -= frm - pos;
4943
4944 if (isset(sc->sc_enabled_capa,
4945 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4946 if (remain < 3)
4947 return ENOBUFS;
4948 *frm++ = IEEE80211_ELEMID_DSPARMS;
4949 *frm++ = 1;
4950 *frm++ = 0;
4951 remain -= 3;
4952 }
4953
4954 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4955 /* Fill in 5GHz IEs. */
4956 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4957 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4958 if (remain < 4 + rs->rs_nrates)
4959 return ENOBUFS;
4960 } else if (remain < 2 + rs->rs_nrates)
4961 return ENOBUFS;
4962 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4963 pos = frm;
4964 frm = ieee80211_add_rates(frm, rs);
4965 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4966 frm = ieee80211_add_xrates(frm, rs);
4967 preq->band_data[1].len = htole16(frm - pos);
4968 remain -= frm - pos;
4969 }
4970
4971 #ifndef IEEE80211_NO_HT
4972 /* Send 11n IEs on both 2GHz and 5GHz bands. */
4973 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4974 pos = frm;
4975 if (ic->ic_flags & IEEE80211_F_HTON) {
4976 if (remain < 28)
4977 return ENOBUFS;
4978 frm = ieee80211_add_htcaps(frm, ic);
4979 /* XXX add WME info? */
4980 }
4981 #endif
4982
4983 preq->common_data.len = htole16(frm - pos);
4984
4985 return 0;
4986 }
4987
4988 static int
4989 iwm_lmac_scan(struct iwm_softc *sc)
4990 {
4991 struct ieee80211com *ic = &sc->sc_ic;
4992 struct iwm_host_cmd hcmd = {
4993 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4994 .len = { 0, },
4995 .data = { NULL, },
4996 .flags = 0,
4997 };
4998 struct iwm_scan_req_lmac *req;
4999 size_t req_len;
5000 int err;
5001
5002 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5003
5004 req_len = sizeof(struct iwm_scan_req_lmac) +
5005 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5006 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5007 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5008 return ENOMEM;
5009 req = kmem_zalloc(req_len, KM_SLEEP);
5010 if (req == NULL)
5011 return ENOMEM;
5012
5013 hcmd.len[0] = (uint16_t)req_len;
5014 hcmd.data[0] = (void *)req;
5015
5016 /* These timings correspond to iwlwifi's UNASSOC scan. */
5017 req->active_dwell = 10;
5018 req->passive_dwell = 110;
5019 req->fragmented_dwell = 44;
5020 req->extended_dwell = 90;
5021 req->max_out_time = 0;
5022 req->suspend_time = 0;
5023
5024 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5025 req->rx_chain_select = iwm_scan_rx_chain(sc);
5026 req->iter_num = htole32(1);
5027 req->delay = 0;
5028
5029 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5030 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5031 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5032 if (ic->ic_des_esslen == 0)
5033 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5034 else
5035 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5036 if (isset(sc->sc_enabled_capa,
5037 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5038 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5039
5040 req->flags = htole32(IWM_PHY_BAND_24);
5041 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5042 req->flags |= htole32(IWM_PHY_BAND_5);
5043 req->filter_flags =
5044 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5045
5046 /* Tx flags 2 GHz. */
5047 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5048 IWM_TX_CMD_FLG_BT_DIS);
5049 req->tx_cmd[0].rate_n_flags =
5050 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5051 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5052
5053 /* Tx flags 5 GHz. */
5054 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5055 IWM_TX_CMD_FLG_BT_DIS);
5056 req->tx_cmd[1].rate_n_flags =
5057 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5058 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5059
5060 /* Check if we're doing an active directed scan. */
5061 if (ic->ic_des_esslen != 0) {
5062 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5063 req->direct_scan[0].len = ic->ic_des_esslen;
5064 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5065 ic->ic_des_esslen);
5066 }
5067
5068 req->n_channels = iwm_lmac_scan_fill_channels(sc,
5069 (struct iwm_scan_channel_cfg_lmac *)req->data,
5070 ic->ic_des_esslen != 0);
5071
5072 err = iwm_fill_probe_req(sc,
5073 (struct iwm_scan_probe_req *)(req->data +
5074 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5075 sc->sc_capa_n_scan_channels)));
5076 if (err) {
5077 kmem_free(req, req_len);
5078 return err;
5079 }
5080
5081 /* Specify the scan plan: We'll do one iteration. */
5082 req->schedule[0].iterations = 1;
5083 req->schedule[0].full_scan_mul = 1;
5084
5085 /* Disable EBS. */
5086 req->channel_opt[0].non_ebs_ratio = 1;
5087 req->channel_opt[1].non_ebs_ratio = 1;
5088
5089 err = iwm_send_cmd(sc, &hcmd);
5090 kmem_free(req, req_len);
5091 return err;
5092 }
5093
5094 static int
5095 iwm_config_umac_scan(struct iwm_softc *sc)
5096 {
5097 struct ieee80211com *ic = &sc->sc_ic;
5098 struct iwm_scan_config *scan_config;
5099 int err, nchan;
5100 size_t cmd_size;
5101 struct ieee80211_channel *c;
5102 struct iwm_host_cmd hcmd = {
5103 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5104 .flags = 0,
5105 };
5106 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5107 IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5108 IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5109 IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5110 IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5111 IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5112 IWM_SCAN_CONFIG_RATE_54M);
5113
5114 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5115
5116 scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5117 if (scan_config == NULL)
5118 return ENOMEM;
5119
5120 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5121 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5122 scan_config->legacy_rates = htole32(rates |
5123 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5124
5125 /* These timings correspond to iwlwifi's UNASSOC scan. */
5126 scan_config->dwell_active = 10;
5127 scan_config->dwell_passive = 110;
5128 scan_config->dwell_fragmented = 44;
5129 scan_config->dwell_extended = 90;
5130 scan_config->out_of_channel_time = htole32(0);
5131 scan_config->suspend_time = htole32(0);
5132
5133 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5134
5135 scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5136 scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5137 IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5138 IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5139
5140 for (c = &ic->ic_channels[1], nchan = 0;
5141 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5142 nchan < sc->sc_capa_n_scan_channels; c++) {
5143 if (c->ic_flags == 0)
5144 continue;
5145 scan_config->channel_array[nchan++] =
5146 ieee80211_mhz2ieee(c->ic_freq, 0);
5147 }
5148
5149 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5150 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5151 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5152 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5153 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5154 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5155 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5156 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5157 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5158 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5159 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5160
5161 hcmd.data[0] = scan_config;
5162 hcmd.len[0] = cmd_size;
5163
5164 err = iwm_send_cmd(sc, &hcmd);
5165 kmem_free(scan_config, cmd_size);
5166 return err;
5167 }
5168
5169 static int
5170 iwm_umac_scan(struct iwm_softc *sc)
5171 {
5172 struct ieee80211com *ic = &sc->sc_ic;
5173 struct iwm_host_cmd hcmd = {
5174 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5175 .len = { 0, },
5176 .data = { NULL, },
5177 .flags = 0,
5178 };
5179 struct iwm_scan_req_umac *req;
5180 struct iwm_scan_req_umac_tail *tail;
5181 size_t req_len;
5182 int err;
5183
5184 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5185
5186 req_len = sizeof(struct iwm_scan_req_umac) +
5187 (sizeof(struct iwm_scan_channel_cfg_umac) *
5188 sc->sc_capa_n_scan_channels) +
5189 sizeof(struct iwm_scan_req_umac_tail);
5190 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5191 return ENOMEM;
5192 req = kmem_zalloc(req_len, KM_SLEEP);
5193 if (req == NULL)
5194 return ENOMEM;
5195
5196 hcmd.len[0] = (uint16_t)req_len;
5197 hcmd.data[0] = (void *)req;
5198
5199 /* These timings correspond to iwlwifi's UNASSOC scan. */
5200 req->active_dwell = 10;
5201 req->passive_dwell = 110;
5202 req->fragmented_dwell = 44;
5203 req->extended_dwell = 90;
5204 req->max_out_time = 0;
5205 req->suspend_time = 0;
5206
5207 req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5208 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5209
5210 req->n_channels = iwm_umac_scan_fill_channels(sc,
5211 (struct iwm_scan_channel_cfg_umac *)req->data,
5212 ic->ic_des_esslen != 0);
5213
5214 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5215 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5216 IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5217
5218 tail = (struct iwm_scan_req_umac_tail *)(req->data +
5219 sizeof(struct iwm_scan_channel_cfg_umac) *
5220 sc->sc_capa_n_scan_channels);
5221
5222 /* Check if we're doing an active directed scan. */
5223 if (ic->ic_des_esslen != 0) {
5224 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5225 tail->direct_scan[0].len = ic->ic_des_esslen;
5226 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5227 ic->ic_des_esslen);
5228 req->general_flags |=
5229 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5230 } else
5231 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5232
5233 if (isset(sc->sc_enabled_capa,
5234 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5235 req->general_flags |=
5236 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5237
5238 err = iwm_fill_probe_req(sc, &tail->preq);
5239 if (err) {
5240 kmem_free(req, req_len);
5241 return err;
5242 }
5243
5244 /* Specify the scan plan: We'll do one iteration. */
5245 tail->schedule[0].interval = 0;
5246 tail->schedule[0].iter_count = 1;
5247
5248 err = iwm_send_cmd(sc, &hcmd);
5249 kmem_free(req, req_len);
5250 return err;
5251 }
5252
5253 static uint8_t
5254 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5255 {
5256 int i;
5257 uint8_t rval;
5258
5259 for (i = 0; i < rs->rs_nrates; i++) {
5260 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5261 if (rval == iwm_rates[ridx].rate)
5262 return rs->rs_rates[i];
5263 }
5264 return 0;
5265 }
5266
5267 static void
5268 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5269 int *ofdm_rates)
5270 {
5271 struct ieee80211_node *ni = &in->in_ni;
5272 struct ieee80211_rateset *rs = &ni->ni_rates;
5273 int lowest_present_ofdm = -1;
5274 int lowest_present_cck = -1;
5275 uint8_t cck = 0;
5276 uint8_t ofdm = 0;
5277 int i;
5278
5279 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5280 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5281 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5282 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5283 continue;
5284 cck |= (1 << i);
5285 if (lowest_present_cck == -1 || lowest_present_cck > i)
5286 lowest_present_cck = i;
5287 }
5288 }
5289 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5290 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5291 continue;
5292 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5293 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5294 lowest_present_ofdm = i;
5295 }
5296
5297 /*
5298 * Now we've got the basic rates as bitmaps in the ofdm and cck
5299 * variables. This isn't sufficient though, as there might not
5300 * be all the right rates in the bitmap. E.g. if the only basic
5301 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5302 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5303 *
5304 * [...] a STA responding to a received frame shall transmit
5305 * its Control Response frame [...] at the highest rate in the
5306 * BSSBasicRateSet parameter that is less than or equal to the
5307 * rate of the immediately previous frame in the frame exchange
5308 * sequence ([...]) and that is of the same modulation class
5309 * ([...]) as the received frame. If no rate contained in the
5310 * BSSBasicRateSet parameter meets these conditions, then the
5311 * control frame sent in response to a received frame shall be
5312 * transmitted at the highest mandatory rate of the PHY that is
5313 * less than or equal to the rate of the received frame, and
5314 * that is of the same modulation class as the received frame.
5315 *
5316 * As a consequence, we need to add all mandatory rates that are
5317 * lower than all of the basic rates to these bitmaps.
5318 */
5319
5320 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5321 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5322 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5323 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5324 /* 6M already there or needed so always add */
5325 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5326
5327 /*
5328 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5329 * Note, however:
5330 * - if no CCK rates are basic, it must be ERP since there must
5331 * be some basic rates at all, so they're OFDM => ERP PHY
5332 * (or we're in 5 GHz, and the cck bitmap will never be used)
5333 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5334 * - if 5.5M is basic, 1M and 2M are mandatory
5335 * - if 2M is basic, 1M is mandatory
5336 * - if 1M is basic, that's the only valid ACK rate.
5337 * As a consequence, it's not as complicated as it sounds, just add
5338 * any lower rates to the ACK rate bitmap.
5339 */
5340 if (IWM_RATE_11M_INDEX < lowest_present_cck)
5341 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5342 if (IWM_RATE_5M_INDEX < lowest_present_cck)
5343 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5344 if (IWM_RATE_2M_INDEX < lowest_present_cck)
5345 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5346 /* 1M already there or needed so always add */
5347 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5348
5349 *cck_rates = cck;
5350 *ofdm_rates = ofdm;
5351 }
5352
5353 static void
5354 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5355 struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5356 {
5357 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5358 struct ieee80211com *ic = &sc->sc_ic;
5359 struct ieee80211_node *ni = ic->ic_bss;
5360 int cck_ack_rates, ofdm_ack_rates;
5361 int i;
5362
5363 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5364 in->in_color));
5365 cmd->action = htole32(action);
5366
5367 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5368 cmd->tsf_id = htole32(IWM_TSF_ID_A);
5369
5370 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5371 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5372
5373 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5374 cmd->cck_rates = htole32(cck_ack_rates);
5375 cmd->ofdm_rates = htole32(ofdm_ack_rates);
5376
5377 cmd->cck_short_preamble
5378 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5379 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5380 cmd->short_slot
5381 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5382 ? IWM_MAC_FLG_SHORT_SLOT : 0);
5383
5384 for (i = 0; i < WME_NUM_AC; i++) {
5385 struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5386 int txf = iwm_ac_to_tx_fifo[i];
5387
5388 cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5389 cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5390 cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5391 cmd->ac[txf].fifos_mask = (1 << txf);
5392 cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5393 }
5394 if (ni->ni_flags & IEEE80211_NODE_QOS)
5395 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5396
5397 #ifndef IEEE80211_NO_HT
5398 if (ni->ni_flags & IEEE80211_NODE_HT) {
5399 enum ieee80211_htprot htprot =
5400 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5401 switch (htprot) {
5402 case IEEE80211_HTPROT_NONE:
5403 break;
5404 case IEEE80211_HTPROT_NONMEMBER:
5405 case IEEE80211_HTPROT_NONHT_MIXED:
5406 cmd->protection_flags |=
5407 htole32(IWM_MAC_PROT_FLG_HT_PROT);
5408 case IEEE80211_HTPROT_20MHZ:
5409 cmd->protection_flags |=
5410 htole32(IWM_MAC_PROT_FLG_HT_PROT |
5411 IWM_MAC_PROT_FLG_FAT_PROT);
5412 break;
5413 default:
5414 break;
5415 }
5416
5417 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5418 }
5419 #endif
5420
5421 if (ic->ic_flags & IEEE80211_F_USEPROT)
5422 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5423
5424 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5425 #undef IWM_EXP2
5426 }
5427
5428 static void
5429 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5430 struct iwm_mac_data_sta *sta, int assoc)
5431 {
5432 struct ieee80211_node *ni = &in->in_ni;
5433 uint32_t dtim_off;
5434 uint64_t tsf;
5435
5436 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5437 tsf = le64toh(ni->ni_tstamp.tsf);
5438
5439 sta->is_assoc = htole32(assoc);
5440 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5441 sta->dtim_tsf = htole64(tsf + dtim_off);
5442 sta->bi = htole32(ni->ni_intval);
5443 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5444 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5445 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5446 sta->listen_interval = htole32(10);
5447 sta->assoc_id = htole32(ni->ni_associd);
5448 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5449 }
5450
5451 static int
5452 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5453 int assoc)
5454 {
5455 struct ieee80211_node *ni = &in->in_ni;
5456 struct iwm_mac_ctx_cmd cmd;
5457
5458 memset(&cmd, 0, sizeof(cmd));
5459
5460 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5461
5462 /* Allow beacons to pass through as long as we are not associated or we
5463 * do not have dtim period information */
5464 if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5465 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5466 else
5467 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5468
5469 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5470 }
5471
5472 #define IWM_MISSED_BEACONS_THRESHOLD 8
5473
5474 static void
5475 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5476 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5477 {
5478 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5479
5480 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5481 le32toh(mb->mac_id),
5482 le32toh(mb->consec_missed_beacons),
5483 le32toh(mb->consec_missed_beacons_since_last_rx),
5484 le32toh(mb->num_recvd_beacons),
5485 le32toh(mb->num_expected_beacons)));
5486
5487 /*
5488 * TODO: the threshold should be adjusted based on latency conditions,
5489 * and/or in case of a CS flow on one of the other AP vifs.
5490 */
5491 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5492 IWM_MISSED_BEACONS_THRESHOLD)
5493 ieee80211_beacon_miss(&sc->sc_ic);
5494 }
5495
5496 static int
5497 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5498 {
5499 struct iwm_time_quota_cmd cmd;
5500 int i, idx, num_active_macs, quota, quota_rem;
5501 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5502 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5503 uint16_t id;
5504
5505 memset(&cmd, 0, sizeof(cmd));
5506
5507 /* currently, PHY ID == binding ID */
5508 if (in) {
5509 id = in->in_phyctxt->id;
5510 KASSERT(id < IWM_MAX_BINDINGS);
5511 colors[id] = in->in_phyctxt->color;
5512
5513 if (1)
5514 n_ifs[id] = 1;
5515 }
5516
5517 /*
5518 * The FW's scheduling session consists of
5519 * IWM_MAX_QUOTA fragments. Divide these fragments
5520 * equally between all the bindings that require quota
5521 */
5522 num_active_macs = 0;
5523 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5524 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5525 num_active_macs += n_ifs[i];
5526 }
5527
5528 quota = 0;
5529 quota_rem = 0;
5530 if (num_active_macs) {
5531 quota = IWM_MAX_QUOTA / num_active_macs;
5532 quota_rem = IWM_MAX_QUOTA % num_active_macs;
5533 }
5534
5535 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5536 if (colors[i] < 0)
5537 continue;
5538
5539 cmd.quotas[idx].id_and_color =
5540 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5541
5542 if (n_ifs[i] <= 0) {
5543 cmd.quotas[idx].quota = htole32(0);
5544 cmd.quotas[idx].max_duration = htole32(0);
5545 } else {
5546 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5547 cmd.quotas[idx].max_duration = htole32(0);
5548 }
5549 idx++;
5550 }
5551
5552 /* Give the remainder of the session to the first binding */
5553 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5554
5555 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5556 }
5557
5558 static int
5559 iwm_auth(struct iwm_softc *sc)
5560 {
5561 struct ieee80211com *ic = &sc->sc_ic;
5562 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5563 uint32_t duration;
5564 int err;
5565
5566 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5567 if (err)
5568 return err;
5569
5570 err = iwm_allow_mcast(sc);
5571 if (err)
5572 return err;
5573
5574 sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5575 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5576 IWM_FW_CTXT_ACTION_MODIFY, 0);
5577 if (err)
5578 return err;
5579 in->in_phyctxt = &sc->sc_phyctxt[0];
5580
5581 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5582 if (err) {
5583 aprint_error_dev(sc->sc_dev,
5584 "could not add MAC context (error %d)\n", err);
5585 return err;
5586 }
5587
5588 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5589 if (err)
5590 return err;
5591
5592 err = iwm_add_sta_cmd(sc, in, 0);
5593 if (err)
5594 return err;
5595
5596 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5597 if (err) {
5598 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5599 return err;
5600 }
5601
5602 /*
5603 * Prevent the FW from wandering off channel during association
5604 * by "protecting" the session with a time event.
5605 */
5606 if (in->in_ni.ni_intval)
5607 duration = in->in_ni.ni_intval * 2;
5608 else
5609 duration = IEEE80211_DUR_TU;
5610 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5611 DELAY(100);
5612
5613 return 0;
5614 }
5615
5616 static int
5617 iwm_assoc(struct iwm_softc *sc)
5618 {
5619 struct ieee80211com *ic = &sc->sc_ic;
5620 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5621 int err;
5622
5623 err = iwm_add_sta_cmd(sc, in, 1);
5624 if (err)
5625 return err;
5626
5627 return 0;
5628 }
5629
5630 static struct ieee80211_node *
5631 iwm_node_alloc(struct ieee80211_node_table *nt)
5632 {
5633 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5634 }
5635
5636 static void
5637 iwm_calib_timeout(void *arg)
5638 {
5639 struct iwm_softc *sc = arg;
5640 struct ieee80211com *ic = &sc->sc_ic;
5641 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5642 #ifndef IEEE80211_NO_HT
5643 struct ieee80211_node *ni = &in->in_ni;
5644 int otxrate;
5645 #endif
5646 int s;
5647
5648 s = splnet();
5649 if ((ic->ic_fixed_rate == -1
5650 #ifndef IEEE80211_NO_HT
5651 || ic->ic_fixed_mcs == -1
5652 #endif
5653 ) &&
5654 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5655 #ifndef IEEE80211_NO_HT
5656 if (ni->ni_flags & IEEE80211_NODE_HT)
5657 otxrate = ni->ni_txmcs;
5658 else
5659 otxrate = ni->ni_txrate;
5660 #endif
5661 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5662
5663 #ifndef IEEE80211_NO_HT
5664 /*
5665 * If AMRR has chosen a new TX rate we must update
5666 * the firwmare's LQ rate table from process context.
5667 */
5668 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5669 otxrate != ni->ni_txmcs)
5670 softint_schedule(sc->setrates_task);
5671 else if (otxrate != ni->ni_txrate)
5672 softint_schedule(sc->setrates_task);
5673 #endif
5674 }
5675 splx(s);
5676
5677 callout_schedule(&sc->sc_calib_to, mstohz(500));
5678 }
5679
5680 #ifndef IEEE80211_NO_HT
5681 static void
5682 iwm_setrates_task(void *arg)
5683 {
5684 struct iwm_softc *sc = arg;
5685 struct ieee80211com *ic = &sc->sc_ic;
5686 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5687
5688 /* Update rates table based on new TX rate determined by AMRR. */
5689 iwm_setrates(in);
5690 }
5691
5692 static int
5693 iwm_setrates(struct iwm_node *in)
5694 {
5695 struct ieee80211_node *ni = &in->in_ni;
5696 struct ieee80211com *ic = ni->ni_ic;
5697 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5698 struct iwm_lq_cmd *lq = &in->in_lq;
5699 struct ieee80211_rateset *rs = &ni->ni_rates;
5700 int i, j, ridx, ridx_min, tab = 0;
5701 #ifndef IEEE80211_NO_HT
5702 int sgi_ok;
5703 #endif
5704 struct iwm_host_cmd cmd = {
5705 .id = IWM_LQ_CMD,
5706 .len = { sizeof(in->in_lq), },
5707 };
5708
5709 memset(lq, 0, sizeof(*lq));
5710 lq->sta_id = IWM_STATION_ID;
5711
5712 if (ic->ic_flags & IEEE80211_F_USEPROT)
5713 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5714
5715 #ifndef IEEE80211_NO_HT
5716 sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5717 (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5718 #endif
5719
5720
5721 /*
5722 * Fill the LQ rate selection table with legacy and/or HT rates
5723 * in descending order, i.e. with the node's current TX rate first.
5724 * In cases where throughput of an HT rate corresponds to a legacy
5725 * rate it makes no sense to add both. We rely on the fact that
5726 * iwm_rates is laid out such that equivalent HT/legacy rates share
5727 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5728 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5729 */
5730 j = 0;
5731 ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5732 IWM_RIDX_OFDM : IWM_RIDX_CCK;
5733 for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5734 if (j >= __arraycount(lq->rs_table))
5735 break;
5736 tab = 0;
5737 #ifndef IEEE80211_NO_HT
5738 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5739 iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5740 for (i = ni->ni_txmcs; i >= 0; i--) {
5741 if (isclr(ni->ni_rxmcs, i))
5742 continue;
5743 if (ridx == iwm_mcs2ridx[i]) {
5744 tab = iwm_rates[ridx].ht_plcp;
5745 tab |= IWM_RATE_MCS_HT_MSK;
5746 if (sgi_ok)
5747 tab |= IWM_RATE_MCS_SGI_MSK;
5748 break;
5749 }
5750 }
5751 }
5752 #endif
5753 if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5754 for (i = ni->ni_txrate; i >= 0; i--) {
5755 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5756 IEEE80211_RATE_VAL)) {
5757 tab = iwm_rates[ridx].plcp;
5758 break;
5759 }
5760 }
5761 }
5762
5763 if (tab == 0)
5764 continue;
5765
5766 tab |= 1 << IWM_RATE_MCS_ANT_POS;
5767 if (IWM_RIDX_IS_CCK(ridx))
5768 tab |= IWM_RATE_MCS_CCK_MSK;
5769 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5770 lq->rs_table[j++] = htole32(tab);
5771 }
5772
5773 /* Fill the rest with the lowest possible rate */
5774 i = j > 0 ? j - 1 : 0;
5775 while (j < __arraycount(lq->rs_table))
5776 lq->rs_table[j++] = lq->rs_table[i];
5777
5778 lq->single_stream_ant_msk = IWM_ANT_A;
5779 lq->dual_stream_ant_msk = IWM_ANT_AB;
5780
5781 lq->agg_time_limit = htole16(4000); /* 4ms */
5782 lq->agg_disable_start_th = 3;
5783 #ifdef notyet
5784 lq->agg_frame_cnt_limit = 0x3f;
5785 #else
5786 lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5787 #endif
5788
5789 cmd.data[0] = &in->in_lq;
5790 return iwm_send_cmd(sc, &cmd);
5791 }
5792 #endif
5793
5794 static int
5795 iwm_media_change(struct ifnet *ifp)
5796 {
5797 struct iwm_softc *sc = ifp->if_softc;
5798 struct ieee80211com *ic = &sc->sc_ic;
5799 uint8_t rate, ridx;
5800 int err;
5801
5802 err = ieee80211_media_change(ifp);
5803 if (err != ENETRESET)
5804 return err;
5805
5806 #ifndef IEEE80211_NO_HT
5807 if (ic->ic_fixed_mcs != -1)
5808 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5809 else
5810 #endif
5811 if (ic->ic_fixed_rate != -1) {
5812 rate = ic->ic_sup_rates[ic->ic_curmode].
5813 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5814 /* Map 802.11 rate to HW rate index. */
5815 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5816 if (iwm_rates[ridx].rate == rate)
5817 break;
5818 sc->sc_fixed_ridx = ridx;
5819 }
5820
5821 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5822 (IFF_UP | IFF_RUNNING)) {
5823 iwm_stop(ifp, 0);
5824 err = iwm_init(ifp);
5825 }
5826 return err;
5827 }
5828
5829 static void
5830 iwm_newstate_cb(struct work *wk, void *v)
5831 {
5832 struct iwm_softc *sc = v;
5833 struct ieee80211com *ic = &sc->sc_ic;
5834 struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
5835 enum ieee80211_state nstate = iwmns->ns_nstate;
5836 enum ieee80211_state ostate = ic->ic_state;
5837 int generation = iwmns->ns_generation;
5838 struct iwm_node *in;
5839 int arg = iwmns->ns_arg;
5840 int err;
5841
5842 kmem_free(iwmns, sizeof(*iwmns));
5843
5844 DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
5845 if (sc->sc_generation != generation) {
5846 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5847 if (nstate == IEEE80211_S_INIT) {
5848 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5849 sc->sc_newstate(ic, nstate, arg);
5850 }
5851 return;
5852 }
5853
5854 DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
5855 ieee80211_state_name[nstate]));
5856
5857 if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5858 iwm_led_blink_stop(sc);
5859
5860 if (ostate == IEEE80211_S_RUN && nstate != ostate)
5861 iwm_disable_beacon_filter(sc);
5862
5863 /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5864 /* XXX Is there a way to switch states without a full reset? */
5865 if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5866 iwm_stop_device(sc);
5867 iwm_init_hw(sc);
5868
5869 /*
5870 * Upon receiving a deauth frame from AP the net80211 stack
5871 * puts the driver into AUTH state. This will fail with this
5872 * driver so bring the FSM from RUN to SCAN in this case.
5873 */
5874 if (nstate == IEEE80211_S_SCAN ||
5875 nstate == IEEE80211_S_AUTH ||
5876 nstate == IEEE80211_S_ASSOC) {
5877 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5878 /* Always pass arg as -1 since we can't Tx right now. */
5879 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5880 DPRINTF(("Going INIT->SCAN\n"));
5881 nstate = IEEE80211_S_SCAN;
5882 }
5883 }
5884
5885 switch (nstate) {
5886 case IEEE80211_S_INIT:
5887 break;
5888
5889 case IEEE80211_S_SCAN:
5890 if (ostate == nstate &&
5891 ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
5892 return;
5893 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5894 err = iwm_umac_scan(sc);
5895 else
5896 err = iwm_lmac_scan(sc);
5897 if (err) {
5898 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5899 return;
5900 }
5901 SET(sc->sc_flags, IWM_FLAG_SCANNING);
5902 ic->ic_state = nstate;
5903 iwm_led_blink_start(sc);
5904 return;
5905
5906 case IEEE80211_S_AUTH:
5907 err = iwm_auth(sc);
5908 if (err) {
5909 DPRINTF(("%s: could not move to auth state: %d\n",
5910 DEVNAME(sc), err));
5911 return;
5912 }
5913 break;
5914
5915 case IEEE80211_S_ASSOC:
5916 err = iwm_assoc(sc);
5917 if (err) {
5918 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5919 err));
5920 return;
5921 }
5922 break;
5923
5924 case IEEE80211_S_RUN:
5925 in = (struct iwm_node *)ic->ic_bss;
5926
5927 /* We have now been assigned an associd by the AP. */
5928 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5929 if (err) {
5930 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5931 return;
5932 }
5933
5934 err = iwm_power_update_device(sc);
5935 if (err) {
5936 aprint_error_dev(sc->sc_dev,
5937 "could send power command (error %d)\n", err);
5938 return;
5939 }
5940 #ifdef notyet
5941 /*
5942 * Disabled for now. Default beacon filter settings
5943 * prevent net80211 from getting ERP and HT protection
5944 * updates from beacons.
5945 */
5946 err = iwm_enable_beacon_filter(sc, in);
5947 if (err) {
5948 aprint_error_dev(sc->sc_dev,
5949 "could not enable beacon filter\n");
5950 return;
5951 }
5952 #endif
5953 err = iwm_power_mac_update_mode(sc, in);
5954 if (err) {
5955 aprint_error_dev(sc->sc_dev,
5956 "could not update MAC power (error %d)\n", err);
5957 return;
5958 }
5959
5960 err = iwm_update_quotas(sc, in);
5961 if (err) {
5962 aprint_error_dev(sc->sc_dev,
5963 "could not update quotas (error %d)\n", err);
5964 return;
5965 }
5966
5967 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5968
5969 /* Start at lowest available bit-rate, AMRR will raise. */
5970 in->in_ni.ni_txrate = 0;
5971 #ifndef IEEE80211_NO_HT
5972 in->in_ni.ni_txmcs = 0;
5973 iwm_setrates(in);
5974 #endif
5975
5976 callout_schedule(&sc->sc_calib_to, mstohz(500));
5977 iwm_led_enable(sc);
5978 break;
5979
5980 default:
5981 break;
5982 }
5983
5984 sc->sc_newstate(ic, nstate, arg);
5985 }
5986
5987 static int
5988 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5989 {
5990 struct iwm_newstate_state *iwmns;
5991 struct ifnet *ifp = IC2IFP(ic);
5992 struct iwm_softc *sc = ifp->if_softc;
5993
5994 callout_stop(&sc->sc_calib_to);
5995
5996 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5997 if (!iwmns) {
5998 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5999 return ENOMEM;
6000 }
6001
6002 iwmns->ns_nstate = nstate;
6003 iwmns->ns_arg = arg;
6004 iwmns->ns_generation = sc->sc_generation;
6005
6006 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6007
6008 return 0;
6009 }
6010
6011 static void
6012 iwm_endscan(struct iwm_softc *sc)
6013 {
6014 struct ieee80211com *ic = &sc->sc_ic;
6015 int s;
6016
6017 DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6018
6019 s = splnet();
6020 if (ic->ic_state == IEEE80211_S_SCAN)
6021 ieee80211_end_scan(ic);
6022 splx(s);
6023 }
6024
6025 /*
6026 * Aging and idle timeouts for the different possible scenarios
6027 * in default configuration
6028 */
6029 static const uint32_t
6030 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6031 {
6032 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6033 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6034 },
6035 {
6036 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6037 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6038 },
6039 {
6040 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6041 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6042 },
6043 {
6044 htole32(IWM_SF_BA_AGING_TIMER_DEF),
6045 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6046 },
6047 {
6048 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6049 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6050 },
6051 };
6052
6053 /*
6054 * Aging and idle timeouts for the different possible scenarios
6055 * in single BSS MAC configuration.
6056 */
6057 static const uint32_t
6058 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6059 {
6060 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6061 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6062 },
6063 {
6064 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6065 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6066 },
6067 {
6068 htole32(IWM_SF_MCAST_AGING_TIMER),
6069 htole32(IWM_SF_MCAST_IDLE_TIMER)
6070 },
6071 {
6072 htole32(IWM_SF_BA_AGING_TIMER),
6073 htole32(IWM_SF_BA_IDLE_TIMER)
6074 },
6075 {
6076 htole32(IWM_SF_TX_RE_AGING_TIMER),
6077 htole32(IWM_SF_TX_RE_IDLE_TIMER)
6078 },
6079 };
6080
6081 static void
6082 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6083 struct ieee80211_node *ni)
6084 {
6085 int i, j, watermark;
6086
6087 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6088
6089 /*
6090 * If we are in association flow - check antenna configuration
6091 * capabilities of the AP station, and choose the watermark accordingly.
6092 */
6093 if (ni) {
6094 #ifndef IEEE80211_NO_HT
6095 if (ni->ni_flags & IEEE80211_NODE_HT) {
6096 #ifdef notyet
6097 if (ni->ni_rxmcs[2] != 0)
6098 watermark = IWM_SF_W_MARK_MIMO3;
6099 else if (ni->ni_rxmcs[1] != 0)
6100 watermark = IWM_SF_W_MARK_MIMO2;
6101 else
6102 #endif
6103 watermark = IWM_SF_W_MARK_SISO;
6104 } else
6105 #endif
6106 watermark = IWM_SF_W_MARK_LEGACY;
6107 /* default watermark value for unassociated mode. */
6108 } else {
6109 watermark = IWM_SF_W_MARK_MIMO2;
6110 }
6111 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6112
6113 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6114 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6115 sf_cmd->long_delay_timeouts[i][j] =
6116 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6117 }
6118 }
6119
6120 if (ni) {
6121 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6122 sizeof(iwm_sf_full_timeout));
6123 } else {
6124 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6125 sizeof(iwm_sf_full_timeout_def));
6126 }
6127 }
6128
6129 static int
6130 iwm_sf_config(struct iwm_softc *sc, int new_state)
6131 {
6132 struct ieee80211com *ic = &sc->sc_ic;
6133 struct iwm_sf_cfg_cmd sf_cmd = {
6134 .state = htole32(IWM_SF_FULL_ON),
6135 };
6136
6137 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6138 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6139
6140 switch (new_state) {
6141 case IWM_SF_UNINIT:
6142 case IWM_SF_INIT_OFF:
6143 iwm_fill_sf_command(sc, &sf_cmd, NULL);
6144 break;
6145 case IWM_SF_FULL_ON:
6146 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6147 break;
6148 default:
6149 return EINVAL;
6150 }
6151
6152 return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6153 sizeof(sf_cmd), &sf_cmd);
6154 }
6155
6156 static int
6157 iwm_send_bt_init_conf(struct iwm_softc *sc)
6158 {
6159 struct iwm_bt_coex_cmd bt_cmd;
6160
6161 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6162 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6163
6164 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6165 }
6166
6167 static bool
6168 iwm_is_lar_supported(struct iwm_softc *sc)
6169 {
6170 bool nvm_lar = sc->sc_nvm.lar_enabled;
6171 bool tlv_lar = isset(sc->sc_enabled_capa,
6172 IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6173
6174 if (iwm_lar_disable)
6175 return false;
6176
6177 /*
6178 * Enable LAR only if it is supported by the FW (TLV) &&
6179 * enabled in the NVM
6180 */
6181 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6182 return nvm_lar && tlv_lar;
6183 else
6184 return tlv_lar;
6185 }
6186
6187 static int
6188 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6189 {
6190 struct iwm_mcc_update_cmd mcc_cmd;
6191 struct iwm_host_cmd hcmd = {
6192 .id = IWM_MCC_UPDATE_CMD,
6193 .flags = IWM_CMD_WANT_SKB,
6194 .data = { &mcc_cmd },
6195 };
6196 int err;
6197 int resp_v2 = isset(sc->sc_enabled_capa,
6198 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6199
6200 if (!iwm_is_lar_supported(sc)) {
6201 DPRINTF(("%s: no LAR support\n", __func__));
6202 return 0;
6203 }
6204
6205 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6206 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6207 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6208 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6209 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6210 else
6211 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6212
6213 if (resp_v2)
6214 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6215 else
6216 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6217
6218 err = iwm_send_cmd(sc, &hcmd);
6219 if (err)
6220 return err;
6221
6222 iwm_free_resp(sc, &hcmd);
6223
6224 return 0;
6225 }
6226
6227 static void
6228 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6229 {
6230 struct iwm_host_cmd cmd = {
6231 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6232 .len = { sizeof(uint32_t), },
6233 .data = { &backoff, },
6234 };
6235
6236 iwm_send_cmd(sc, &cmd);
6237 }
6238
6239 static int
6240 iwm_init_hw(struct iwm_softc *sc)
6241 {
6242 struct ieee80211com *ic = &sc->sc_ic;
6243 int err, i, ac;
6244
6245 err = iwm_preinit(sc);
6246 if (err)
6247 return err;
6248
6249 err = iwm_start_hw(sc);
6250 if (err) {
6251 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6252 return err;
6253 }
6254
6255 err = iwm_run_init_mvm_ucode(sc, 0);
6256 if (err)
6257 return err;
6258
6259 /* Should stop and start HW since INIT image just loaded. */
6260 iwm_stop_device(sc);
6261 err = iwm_start_hw(sc);
6262 if (err) {
6263 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6264 return err;
6265 }
6266
6267 /* Restart, this time with the regular firmware */
6268 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6269 if (err) {
6270 aprint_error_dev(sc->sc_dev,
6271 "could not load firmware (error %d)\n", err);
6272 goto err;
6273 }
6274
6275 err = iwm_send_bt_init_conf(sc);
6276 if (err) {
6277 aprint_error_dev(sc->sc_dev,
6278 "could not init bt coex (error %d)\n", err);
6279 goto err;
6280 }
6281
6282 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6283 if (err) {
6284 aprint_error_dev(sc->sc_dev,
6285 "could not init tx ant config (error %d)\n", err);
6286 goto err;
6287 }
6288
6289 /* Send phy db control command and then phy db calibration*/
6290 err = iwm_send_phy_db_data(sc);
6291 if (err) {
6292 aprint_error_dev(sc->sc_dev,
6293 "could not init phy db (error %d)\n", err);
6294 goto err;
6295 }
6296
6297 err = iwm_send_phy_cfg_cmd(sc);
6298 if (err) {
6299 aprint_error_dev(sc->sc_dev,
6300 "could not send phy config (error %d)\n", err);
6301 goto err;
6302 }
6303
6304 /* Add auxiliary station for scanning */
6305 err = iwm_add_aux_sta(sc);
6306 if (err) {
6307 aprint_error_dev(sc->sc_dev,
6308 "could not add aux station (error %d)\n", err);
6309 goto err;
6310 }
6311
6312 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6313 /*
6314 * The channel used here isn't relevant as it's
6315 * going to be overwritten in the other flows.
6316 * For now use the first channel we have.
6317 */
6318 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6319 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6320 IWM_FW_CTXT_ACTION_ADD, 0);
6321 if (err) {
6322 aprint_error_dev(sc->sc_dev,
6323 "could not add phy context %d (error %d)\n",
6324 i, err);
6325 goto err;
6326 }
6327 }
6328
6329 /* Initialize tx backoffs to the minimum. */
6330 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6331 iwm_tt_tx_backoff(sc, 0);
6332
6333 err = iwm_power_update_device(sc);
6334 if (err) {
6335 aprint_error_dev(sc->sc_dev,
6336 "could send power command (error %d)\n", err);
6337 goto err;
6338 }
6339
6340 err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6341 if (err) {
6342 aprint_error_dev(sc->sc_dev,
6343 "could not init LAR (error %d)\n", err);
6344 goto err;
6345 }
6346
6347 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6348 err = iwm_config_umac_scan(sc);
6349 if (err) {
6350 aprint_error_dev(sc->sc_dev,
6351 "could not configure scan (error %d)\n", err);
6352 goto err;
6353 }
6354 }
6355
6356 for (ac = 0; ac < WME_NUM_AC; ac++) {
6357 err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6358 iwm_ac_to_tx_fifo[ac]);
6359 if (err) {
6360 aprint_error_dev(sc->sc_dev,
6361 "could not enable Tx queue %d (error %d)\n",
6362 i, err);
6363 goto err;
6364 }
6365 }
6366
6367 err = iwm_disable_beacon_filter(sc);
6368 if (err) {
6369 aprint_error_dev(sc->sc_dev,
6370 "could not disable beacon filter (error %d)\n", err);
6371 goto err;
6372 }
6373
6374 return 0;
6375
6376 err:
6377 iwm_stop_device(sc);
6378 return err;
6379 }
6380
6381 /* Allow multicast from our BSSID. */
6382 static int
6383 iwm_allow_mcast(struct iwm_softc *sc)
6384 {
6385 struct ieee80211com *ic = &sc->sc_ic;
6386 struct ieee80211_node *ni = ic->ic_bss;
6387 struct iwm_mcast_filter_cmd *cmd;
6388 size_t size;
6389 int err;
6390
6391 size = roundup(sizeof(*cmd), 4);
6392 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6393 if (cmd == NULL)
6394 return ENOMEM;
6395 cmd->filter_own = 1;
6396 cmd->port_id = 0;
6397 cmd->count = 0;
6398 cmd->pass_all = 1;
6399 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6400
6401 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6402 kmem_intr_free(cmd, size);
6403 return err;
6404 }
6405
6406 static int
6407 iwm_init(struct ifnet *ifp)
6408 {
6409 struct iwm_softc *sc = ifp->if_softc;
6410 int err;
6411 int s;
6412
6413 if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6414 return 0;
6415
6416 sc->sc_generation++;
6417 sc->sc_flags &= ~IWM_FLAG_STOPPED;
6418
6419 err = iwm_init_hw(sc);
6420 if (err) {
6421 iwm_stop(ifp, 1);
6422 return err;
6423 }
6424
6425 ifp->if_flags &= ~IFF_OACTIVE;
6426 ifp->if_flags |= IFF_RUNNING;
6427
6428 s = splnet();
6429 ieee80211_begin_scan(&sc->sc_ic, 0);
6430 splx(s);
6431
6432 SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6433
6434 return 0;
6435 }
6436
6437 static void
6438 iwm_start(struct ifnet *ifp)
6439 {
6440 struct iwm_softc *sc = ifp->if_softc;
6441 struct ieee80211com *ic = &sc->sc_ic;
6442 struct ieee80211_node *ni;
6443 struct ether_header *eh;
6444 struct mbuf *m;
6445 int ac;
6446
6447 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6448 return;
6449
6450 for (;;) {
6451 /* why isn't this done per-queue? */
6452 if (sc->qfullmsk != 0) {
6453 ifp->if_flags |= IFF_OACTIVE;
6454 break;
6455 }
6456
6457 /* need to send management frames even if we're not RUNning */
6458 IF_DEQUEUE(&ic->ic_mgtq, m);
6459 if (m) {
6460 ni = M_GETCTX(m, struct ieee80211_node *);
6461 M_CLEARCTX(m);
6462 ac = WME_AC_BE;
6463 goto sendit;
6464 }
6465 if (ic->ic_state != IEEE80211_S_RUN) {
6466 break;
6467 }
6468
6469 IFQ_DEQUEUE(&ifp->if_snd, m);
6470 if (m == NULL)
6471 break;
6472
6473 if (m->m_len < sizeof (*eh) &&
6474 (m = m_pullup(m, sizeof (*eh))) == NULL) {
6475 ifp->if_oerrors++;
6476 continue;
6477 }
6478
6479 eh = mtod(m, struct ether_header *);
6480 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6481 if (ni == NULL) {
6482 m_freem(m);
6483 ifp->if_oerrors++;
6484 continue;
6485 }
6486
6487 /* classify mbuf so we can find which tx ring to use */
6488 if (ieee80211_classify(ic, m, ni) != 0) {
6489 m_freem(m);
6490 ieee80211_free_node(ni);
6491 ifp->if_oerrors++;
6492 continue;
6493 }
6494
6495 /* No QoS encapsulation for EAPOL frames. */
6496 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6497 M_WME_GETAC(m) : WME_AC_BE;
6498
6499 bpf_mtap(ifp, m);
6500
6501 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6502 ieee80211_free_node(ni);
6503 ifp->if_oerrors++;
6504 continue;
6505 }
6506
6507 sendit:
6508 bpf_mtap3(ic->ic_rawbpf, m);
6509
6510 if (iwm_tx(sc, m, ni, ac) != 0) {
6511 ieee80211_free_node(ni);
6512 ifp->if_oerrors++;
6513 continue;
6514 }
6515
6516 if (ifp->if_flags & IFF_UP) {
6517 sc->sc_tx_timer = 15;
6518 ifp->if_timer = 1;
6519 }
6520 }
6521 }
6522
6523 static void
6524 iwm_stop(struct ifnet *ifp, int disable)
6525 {
6526 struct iwm_softc *sc = ifp->if_softc;
6527 struct ieee80211com *ic = &sc->sc_ic;
6528 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6529 int s;
6530
6531 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6532 sc->sc_flags |= IWM_FLAG_STOPPED;
6533 sc->sc_generation++;
6534 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6535
6536 if (in)
6537 in->in_phyctxt = NULL;
6538
6539 s = splnet();
6540 if (ic->ic_state != IEEE80211_S_INIT)
6541 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6542 splx(s);
6543
6544 callout_stop(&sc->sc_calib_to);
6545 iwm_led_blink_stop(sc);
6546 ifp->if_timer = sc->sc_tx_timer = 0;
6547 iwm_stop_device(sc);
6548 }
6549
6550 static void
6551 iwm_watchdog(struct ifnet *ifp)
6552 {
6553 struct iwm_softc *sc = ifp->if_softc;
6554
6555 ifp->if_timer = 0;
6556 if (sc->sc_tx_timer > 0) {
6557 if (--sc->sc_tx_timer == 0) {
6558 aprint_error_dev(sc->sc_dev, "device timeout\n");
6559 #ifdef IWM_DEBUG
6560 iwm_nic_error(sc);
6561 #endif
6562 ifp->if_flags &= ~IFF_UP;
6563 iwm_stop(ifp, 1);
6564 ifp->if_oerrors++;
6565 return;
6566 }
6567 ifp->if_timer = 1;
6568 }
6569
6570 ieee80211_watchdog(&sc->sc_ic);
6571 }
6572
6573 static int
6574 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6575 {
6576 struct iwm_softc *sc = ifp->if_softc;
6577 struct ieee80211com *ic = &sc->sc_ic;
6578 const struct sockaddr *sa;
6579 int s, err = 0;
6580
6581 s = splnet();
6582
6583 switch (cmd) {
6584 case SIOCSIFADDR:
6585 ifp->if_flags |= IFF_UP;
6586 /* FALLTHROUGH */
6587 case SIOCSIFFLAGS:
6588 err = ifioctl_common(ifp, cmd, data);
6589 if (err)
6590 break;
6591 if (ifp->if_flags & IFF_UP) {
6592 if (!(ifp->if_flags & IFF_RUNNING)) {
6593 err = iwm_init(ifp);
6594 if (err)
6595 ifp->if_flags &= ~IFF_UP;
6596 }
6597 } else {
6598 if (ifp->if_flags & IFF_RUNNING)
6599 iwm_stop(ifp, 1);
6600 }
6601 break;
6602
6603 case SIOCADDMULTI:
6604 case SIOCDELMULTI:
6605 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6606 err = ENXIO;
6607 break;
6608 }
6609 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6610 err = (cmd == SIOCADDMULTI) ?
6611 ether_addmulti(sa, &sc->sc_ec) :
6612 ether_delmulti(sa, &sc->sc_ec);
6613 if (err == ENETRESET)
6614 err = 0;
6615 break;
6616
6617 default:
6618 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6619 err = ether_ioctl(ifp, cmd, data);
6620 break;
6621 }
6622 err = ieee80211_ioctl(ic, cmd, data);
6623 break;
6624 }
6625
6626 if (err == ENETRESET) {
6627 err = 0;
6628 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6629 (IFF_UP | IFF_RUNNING)) {
6630 iwm_stop(ifp, 0);
6631 err = iwm_init(ifp);
6632 }
6633 }
6634
6635 splx(s);
6636 return err;
6637 }
6638
6639 /*
6640 * Note: This structure is read from the device with IO accesses,
6641 * and the reading already does the endian conversion. As it is
6642 * read with uint32_t-sized accesses, any members with a different size
6643 * need to be ordered correctly though!
6644 */
6645 struct iwm_error_event_table {
6646 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6647 uint32_t error_id; /* type of error */
6648 uint32_t trm_hw_status0; /* TRM HW status */
6649 uint32_t trm_hw_status1; /* TRM HW status */
6650 uint32_t blink2; /* branch link */
6651 uint32_t ilink1; /* interrupt link */
6652 uint32_t ilink2; /* interrupt link */
6653 uint32_t data1; /* error-specific data */
6654 uint32_t data2; /* error-specific data */
6655 uint32_t data3; /* error-specific data */
6656 uint32_t bcon_time; /* beacon timer */
6657 uint32_t tsf_low; /* network timestamp function timer */
6658 uint32_t tsf_hi; /* network timestamp function timer */
6659 uint32_t gp1; /* GP1 timer register */
6660 uint32_t gp2; /* GP2 timer register */
6661 uint32_t fw_rev_type; /* firmware revision type */
6662 uint32_t major; /* uCode version major */
6663 uint32_t minor; /* uCode version minor */
6664 uint32_t hw_ver; /* HW Silicon version */
6665 uint32_t brd_ver; /* HW board version */
6666 uint32_t log_pc; /* log program counter */
6667 uint32_t frame_ptr; /* frame pointer */
6668 uint32_t stack_ptr; /* stack pointer */
6669 uint32_t hcmd; /* last host command header */
6670 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
6671 * rxtx_flag */
6672 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
6673 * host_flag */
6674 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
6675 * enc_flag */
6676 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
6677 * time_flag */
6678 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
6679 * wico interrupt */
6680 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
6681 uint32_t wait_event; /* wait event() caller address */
6682 uint32_t l2p_control; /* L2pControlField */
6683 uint32_t l2p_duration; /* L2pDurationField */
6684 uint32_t l2p_mhvalid; /* L2pMhValidBits */
6685 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
6686 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
6687 * (LMPM_PMG_SEL) */
6688 uint32_t u_timestamp; /* indicate when the date and time of the
6689 * compilation */
6690 uint32_t flow_handler; /* FH read/write pointers, RX credit */
6691 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6692
6693 /*
6694 * UMAC error struct - relevant starting from family 8000 chip.
6695 * Note: This structure is read from the device with IO accesses,
6696 * and the reading already does the endian conversion. As it is
6697 * read with u32-sized accesses, any members with a different size
6698 * need to be ordered correctly though!
6699 */
6700 struct iwm_umac_error_event_table {
6701 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6702 uint32_t error_id; /* type of error */
6703 uint32_t blink1; /* branch link */
6704 uint32_t blink2; /* branch link */
6705 uint32_t ilink1; /* interrupt link */
6706 uint32_t ilink2; /* interrupt link */
6707 uint32_t data1; /* error-specific data */
6708 uint32_t data2; /* error-specific data */
6709 uint32_t data3; /* error-specific data */
6710 uint32_t umac_major;
6711 uint32_t umac_minor;
6712 uint32_t frame_pointer; /* core register 27 */
6713 uint32_t stack_pointer; /* core register 28 */
6714 uint32_t cmd_header; /* latest host cmd sent to UMAC */
6715 uint32_t nic_isr_pref; /* ISR status register */
6716 } __packed;
6717
6718 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6719 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6720
6721 #ifdef IWM_DEBUG
6722 static const struct {
6723 const char *name;
6724 uint8_t num;
6725 } advanced_lookup[] = {
6726 { "NMI_INTERRUPT_WDG", 0x34 },
6727 { "SYSASSERT", 0x35 },
6728 { "UCODE_VERSION_MISMATCH", 0x37 },
6729 { "BAD_COMMAND", 0x38 },
6730 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6731 { "FATAL_ERROR", 0x3D },
6732 { "NMI_TRM_HW_ERR", 0x46 },
6733 { "NMI_INTERRUPT_TRM", 0x4C },
6734 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6735 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6736 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6737 { "NMI_INTERRUPT_HOST", 0x66 },
6738 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6739 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6740 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6741 { "ADVANCED_SYSASSERT", 0 },
6742 };
6743
6744 static const char *
6745 iwm_desc_lookup(uint32_t num)
6746 {
6747 int i;
6748
6749 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6750 if (advanced_lookup[i].num == num)
6751 return advanced_lookup[i].name;
6752
6753 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6754 return advanced_lookup[i].name;
6755 }
6756
6757 /*
6758 * Support for dumping the error log seemed like a good idea ...
6759 * but it's mostly hex junk and the only sensible thing is the
6760 * hw/ucode revision (which we know anyway). Since it's here,
6761 * I'll just leave it in, just in case e.g. the Intel guys want to
6762 * help us decipher some "ADVANCED_SYSASSERT" later.
6763 */
6764 static void
6765 iwm_nic_error(struct iwm_softc *sc)
6766 {
6767 struct iwm_error_event_table t;
6768 uint32_t base;
6769
6770 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6771 base = sc->sc_uc.uc_error_event_table;
6772 if (base < 0x800000) {
6773 aprint_error_dev(sc->sc_dev,
6774 "Invalid error log pointer 0x%08x\n", base);
6775 return;
6776 }
6777
6778 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6779 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6780 return;
6781 }
6782
6783 if (!t.valid) {
6784 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6785 return;
6786 }
6787
6788 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6789 aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
6790 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6791 sc->sc_flags, t.valid);
6792 }
6793
6794 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
6795 iwm_desc_lookup(t.error_id));
6796 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
6797 t.trm_hw_status0);
6798 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
6799 t.trm_hw_status1);
6800 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
6801 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
6802 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
6803 aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
6804 aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
6805 aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
6806 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
6807 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
6808 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
6809 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
6810 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
6811 aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
6812 t.fw_rev_type);
6813 aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
6814 t.major);
6815 aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
6816 t.minor);
6817 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
6818 aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
6819 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
6820 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
6821 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
6822 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
6823 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
6824 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
6825 aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
6826 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
6827 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
6828 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
6829 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
6830 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6831 t.l2p_addr_match);
6832 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
6833 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
6834 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
6835
6836 if (sc->sc_uc.uc_umac_error_event_table)
6837 iwm_nic_umac_error(sc);
6838 }
6839
6840 static void
6841 iwm_nic_umac_error(struct iwm_softc *sc)
6842 {
6843 struct iwm_umac_error_event_table t;
6844 uint32_t base;
6845
6846 base = sc->sc_uc.uc_umac_error_event_table;
6847
6848 if (base < 0x800000) {
6849 aprint_error_dev(sc->sc_dev,
6850 "Invalid error log pointer 0x%08x\n", base);
6851 return;
6852 }
6853
6854 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6855 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6856 return;
6857 }
6858
6859 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6860 aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
6861 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6862 sc->sc_flags, t.valid);
6863 }
6864
6865 aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
6866 iwm_desc_lookup(t.error_id));
6867 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
6868 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
6869 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
6870 t.ilink1);
6871 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
6872 t.ilink2);
6873 aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
6874 aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
6875 aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
6876 aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
6877 aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
6878 aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
6879 t.frame_pointer);
6880 aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
6881 t.stack_pointer);
6882 aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
6883 aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
6884 t.nic_isr_pref);
6885 }
6886 #endif
6887
6888 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6889 do { \
6890 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6891 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6892 _var_ = (void *)((_pkt_)+1); \
6893 } while (/*CONSTCOND*/0)
6894
6895 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6896 do { \
6897 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6898 sizeof(len), BUS_DMASYNC_POSTREAD); \
6899 _ptr_ = (void *)((_pkt_)+1); \
6900 } while (/*CONSTCOND*/0)
6901
6902 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6903
6904 static void
6905 iwm_notif_intr(struct iwm_softc *sc)
6906 {
6907 uint16_t hw;
6908
6909 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6910 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6911
6912 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6913 while (sc->rxq.cur != hw) {
6914 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6915 struct iwm_rx_packet *pkt;
6916 struct iwm_cmd_response *cresp;
6917 int orig_qid, qid, idx, code;
6918
6919 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6920 BUS_DMASYNC_POSTREAD);
6921 pkt = mtod(data->m, struct iwm_rx_packet *);
6922
6923 orig_qid = pkt->hdr.qid;
6924 qid = orig_qid & ~0x80;
6925 idx = pkt->hdr.idx;
6926
6927 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6928
6929 /*
6930 * randomly get these from the firmware, no idea why.
6931 * they at least seem harmless, so just ignore them for now
6932 */
6933 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6934 || pkt->len_n_flags == htole32(0x55550000))) {
6935 ADVANCE_RXQ(sc);
6936 continue;
6937 }
6938
6939 switch (code) {
6940 case IWM_REPLY_RX_PHY_CMD:
6941 iwm_rx_rx_phy_cmd(sc, pkt, data);
6942 break;
6943
6944 case IWM_REPLY_RX_MPDU_CMD:
6945 iwm_rx_rx_mpdu(sc, pkt, data);
6946 break;
6947
6948 case IWM_TX_CMD:
6949 iwm_rx_tx_cmd(sc, pkt, data);
6950 break;
6951
6952 case IWM_MISSED_BEACONS_NOTIFICATION:
6953 iwm_rx_missed_beacons_notif(sc, pkt, data);
6954 break;
6955
6956 case IWM_MFUART_LOAD_NOTIFICATION:
6957 break;
6958
6959 case IWM_ALIVE: {
6960 struct iwm_alive_resp_v1 *resp1;
6961 struct iwm_alive_resp_v2 *resp2;
6962 struct iwm_alive_resp_v3 *resp3;
6963
6964 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6965 SYNC_RESP_STRUCT(resp1, pkt);
6966 sc->sc_uc.uc_error_event_table
6967 = le32toh(resp1->error_event_table_ptr);
6968 sc->sc_uc.uc_log_event_table
6969 = le32toh(resp1->log_event_table_ptr);
6970 sc->sched_base = le32toh(resp1->scd_base_ptr);
6971 if (resp1->status == IWM_ALIVE_STATUS_OK)
6972 sc->sc_uc.uc_ok = 1;
6973 else
6974 sc->sc_uc.uc_ok = 0;
6975 }
6976 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
6977 SYNC_RESP_STRUCT(resp2, pkt);
6978 sc->sc_uc.uc_error_event_table
6979 = le32toh(resp2->error_event_table_ptr);
6980 sc->sc_uc.uc_log_event_table
6981 = le32toh(resp2->log_event_table_ptr);
6982 sc->sched_base = le32toh(resp2->scd_base_ptr);
6983 sc->sc_uc.uc_umac_error_event_table
6984 = le32toh(resp2->error_info_addr);
6985 if (resp2->status == IWM_ALIVE_STATUS_OK)
6986 sc->sc_uc.uc_ok = 1;
6987 else
6988 sc->sc_uc.uc_ok = 0;
6989 }
6990 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
6991 SYNC_RESP_STRUCT(resp3, pkt);
6992 sc->sc_uc.uc_error_event_table
6993 = le32toh(resp3->error_event_table_ptr);
6994 sc->sc_uc.uc_log_event_table
6995 = le32toh(resp3->log_event_table_ptr);
6996 sc->sched_base = le32toh(resp3->scd_base_ptr);
6997 sc->sc_uc.uc_umac_error_event_table
6998 = le32toh(resp3->error_info_addr);
6999 if (resp3->status == IWM_ALIVE_STATUS_OK)
7000 sc->sc_uc.uc_ok = 1;
7001 else
7002 sc->sc_uc.uc_ok = 0;
7003 }
7004
7005 sc->sc_uc.uc_intr = 1;
7006 wakeup(&sc->sc_uc);
7007 break;
7008 }
7009
7010 case IWM_CALIB_RES_NOTIF_PHY_DB: {
7011 struct iwm_calib_res_notif_phy_db *phy_db_notif;
7012 SYNC_RESP_STRUCT(phy_db_notif, pkt);
7013 uint16_t size = le16toh(phy_db_notif->length);
7014 bus_dmamap_sync(sc->sc_dmat, data->map,
7015 sizeof(*pkt) + sizeof(*phy_db_notif),
7016 size, BUS_DMASYNC_POSTREAD);
7017 iwm_phy_db_set_section(sc, phy_db_notif, size);
7018 break;
7019 }
7020
7021 case IWM_STATISTICS_NOTIFICATION: {
7022 struct iwm_notif_statistics *stats;
7023 SYNC_RESP_STRUCT(stats, pkt);
7024 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7025 sc->sc_noise = iwm_get_noise(&stats->rx.general);
7026 break;
7027 }
7028
7029 case IWM_NVM_ACCESS_CMD:
7030 case IWM_MCC_UPDATE_CMD:
7031 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7032 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7033 sizeof(sc->sc_cmd_resp),
7034 BUS_DMASYNC_POSTREAD);
7035 memcpy(sc->sc_cmd_resp,
7036 pkt, sizeof(sc->sc_cmd_resp));
7037 }
7038 break;
7039
7040 case IWM_MCC_CHUB_UPDATE_CMD: {
7041 struct iwm_mcc_chub_notif *notif;
7042 SYNC_RESP_STRUCT(notif, pkt);
7043
7044 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7045 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7046 sc->sc_fw_mcc[2] = '\0';
7047 break;
7048 }
7049
7050 case IWM_DTS_MEASUREMENT_NOTIFICATION:
7051 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7052 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7053 struct iwm_dts_measurement_notif_v1 *notif1;
7054 struct iwm_dts_measurement_notif_v2 *notif2;
7055
7056 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7057 SYNC_RESP_STRUCT(notif1, pkt);
7058 DPRINTF(("%s: DTS temp=%d \n",
7059 DEVNAME(sc), notif1->temp));
7060 break;
7061 }
7062 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7063 SYNC_RESP_STRUCT(notif2, pkt);
7064 DPRINTF(("%s: DTS temp=%d \n",
7065 DEVNAME(sc), notif2->temp));
7066 break;
7067 }
7068 break;
7069 }
7070
7071 case IWM_PHY_CONFIGURATION_CMD:
7072 case IWM_TX_ANT_CONFIGURATION_CMD:
7073 case IWM_ADD_STA:
7074 case IWM_MAC_CONTEXT_CMD:
7075 case IWM_REPLY_SF_CFG_CMD:
7076 case IWM_POWER_TABLE_CMD:
7077 case IWM_PHY_CONTEXT_CMD:
7078 case IWM_BINDING_CONTEXT_CMD:
7079 case IWM_TIME_EVENT_CMD:
7080 case IWM_SCAN_REQUEST_CMD:
7081 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7082 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7083 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7084 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7085 case IWM_SCAN_OFFLOAD_ABORT_CMD:
7086 case IWM_REPLY_BEACON_FILTERING_CMD:
7087 case IWM_MAC_PM_POWER_TABLE:
7088 case IWM_TIME_QUOTA_CMD:
7089 case IWM_REMOVE_STA:
7090 case IWM_TXPATH_FLUSH:
7091 case IWM_LQ_CMD:
7092 case IWM_BT_CONFIG:
7093 case IWM_REPLY_THERMAL_MNG_BACKOFF:
7094 SYNC_RESP_STRUCT(cresp, pkt);
7095 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7096 memcpy(sc->sc_cmd_resp,
7097 pkt, sizeof(*pkt) + sizeof(*cresp));
7098 }
7099 break;
7100
7101 /* ignore */
7102 case IWM_PHY_DB_CMD:
7103 break;
7104
7105 case IWM_INIT_COMPLETE_NOTIF:
7106 sc->sc_init_complete = 1;
7107 wakeup(&sc->sc_init_complete);
7108 break;
7109
7110 case IWM_SCAN_OFFLOAD_COMPLETE: {
7111 struct iwm_periodic_scan_complete *notif;
7112 SYNC_RESP_STRUCT(notif, pkt);
7113 break;
7114 }
7115
7116 case IWM_SCAN_ITERATION_COMPLETE: {
7117 struct iwm_lmac_scan_complete_notif *notif;
7118 SYNC_RESP_STRUCT(notif, pkt);
7119 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7120 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7121 iwm_endscan(sc);
7122 }
7123 break;
7124 }
7125
7126 case IWM_SCAN_COMPLETE_UMAC: {
7127 struct iwm_umac_scan_complete *notif;
7128 SYNC_RESP_STRUCT(notif, pkt);
7129 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7130 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7131 iwm_endscan(sc);
7132 }
7133 break;
7134 }
7135
7136 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7137 struct iwm_umac_scan_iter_complete_notif *notif;
7138 SYNC_RESP_STRUCT(notif, pkt);
7139 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7140 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7141 iwm_endscan(sc);
7142 }
7143 break;
7144 }
7145
7146 case IWM_REPLY_ERROR: {
7147 struct iwm_error_resp *resp;
7148 SYNC_RESP_STRUCT(resp, pkt);
7149 aprint_error_dev(sc->sc_dev,
7150 "firmware error 0x%x, cmd 0x%x\n",
7151 le32toh(resp->error_type), resp->cmd_id);
7152 break;
7153 }
7154
7155 case IWM_TIME_EVENT_NOTIFICATION: {
7156 struct iwm_time_event_notif *notif;
7157 SYNC_RESP_STRUCT(notif, pkt);
7158 break;
7159 }
7160
7161 case IWM_MCAST_FILTER_CMD:
7162 break;
7163
7164 case IWM_SCD_QUEUE_CFG: {
7165 struct iwm_scd_txq_cfg_rsp *rsp;
7166 SYNC_RESP_STRUCT(rsp, pkt);
7167 break;
7168 }
7169
7170 default:
7171 aprint_error_dev(sc->sc_dev,
7172 "unhandled firmware response 0x%x 0x%x/0x%x "
7173 "rx ring %d[%d]\n",
7174 code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7175 break;
7176 }
7177
7178 /*
7179 * uCode sets bit 0x80 when it originates the notification,
7180 * i.e. when the notification is not a direct response to a
7181 * command sent by the driver.
7182 * For example, uCode issues IWM_REPLY_RX when it sends a
7183 * received frame to the driver.
7184 */
7185 if (!(orig_qid & (1 << 7))) {
7186 iwm_cmd_done(sc, qid, idx);
7187 }
7188
7189 ADVANCE_RXQ(sc);
7190 }
7191
7192 /*
7193 * Seems like the hardware gets upset unless we align the write by 8??
7194 */
7195 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7196 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7197 }
7198
7199 static int
7200 iwm_intr(void *arg)
7201 {
7202 struct iwm_softc *sc = arg;
7203 int r1, r2;
7204
7205 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7206
7207 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
7208 uint32_t *ict = sc->ict_dma.vaddr;
7209 int tmp;
7210
7211 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7212 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7213 tmp = htole32(ict[sc->ict_cur]);
7214 if (!tmp)
7215 goto out_ena;
7216
7217 /*
7218 * ok, there was something. keep plowing until we have all.
7219 */
7220 r1 = r2 = 0;
7221 while (tmp) {
7222 r1 |= tmp;
7223 ict[sc->ict_cur] = 0; /* Acknowledge. */
7224 sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7225 tmp = htole32(ict[sc->ict_cur]);
7226 }
7227
7228 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7229 0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7230
7231 /* this is where the fun begins. don't ask */
7232 if (r1 == 0xffffffff)
7233 r1 = 0;
7234
7235 /* i am not expected to understand this */
7236 if (r1 & 0xc0000)
7237 r1 |= 0x8000;
7238 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7239 } else {
7240 r1 = IWM_READ(sc, IWM_CSR_INT);
7241 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7242 goto out;
7243 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7244 }
7245 if (r1 == 0 && r2 == 0) {
7246 goto out_ena;
7247 }
7248
7249 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7250
7251 atomic_or_32(&sc->sc_soft_flags, r1);
7252 softint_schedule(sc->sc_soft_ih);
7253 return 1;
7254
7255 out_ena:
7256 iwm_restore_interrupts(sc);
7257 out:
7258 return 0;
7259 }
7260
7261 static void
7262 iwm_softintr(void *arg)
7263 {
7264 struct iwm_softc *sc = arg;
7265 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7266 uint32_t r1;
7267 int isperiodic = 0;
7268
7269 r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7270
7271 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7272 #ifdef IWM_DEBUG
7273 int i;
7274
7275 iwm_nic_error(sc);
7276
7277 /* Dump driver status (TX and RX rings) while we're here. */
7278 DPRINTF(("driver status:\n"));
7279 for (i = 0; i < IWM_MAX_QUEUES; i++) {
7280 struct iwm_tx_ring *ring = &sc->txq[i];
7281 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7282 "queued=%-3d\n",
7283 i, ring->qid, ring->cur, ring->queued));
7284 }
7285 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7286 DPRINTF((" 802.11 state %s\n",
7287 ieee80211_state_name[sc->sc_ic.ic_state]));
7288 #endif
7289
7290 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7291 fatal:
7292 ifp->if_flags &= ~IFF_UP;
7293 iwm_stop(ifp, 1);
7294 /* Don't restore interrupt mask */
7295 return;
7296
7297 }
7298
7299 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7300 aprint_error_dev(sc->sc_dev,
7301 "hardware error, stopping device\n");
7302 goto fatal;
7303 }
7304
7305 /* firmware chunk loaded */
7306 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7307 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7308 sc->sc_fw_chunk_done = 1;
7309 wakeup(&sc->sc_fw);
7310 }
7311
7312 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7313 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
7314 ifp->if_flags &= ~IFF_UP;
7315 iwm_stop(ifp, 1);
7316 }
7317 }
7318
7319 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7320 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7321 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7322 IWM_WRITE_1(sc,
7323 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7324 isperiodic = 1;
7325 }
7326
7327 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7328 isperiodic) {
7329 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7330
7331 iwm_notif_intr(sc);
7332
7333 /* enable periodic interrupt, see above */
7334 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7335 !isperiodic)
7336 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7337 IWM_CSR_INT_PERIODIC_ENA);
7338 }
7339
7340 iwm_restore_interrupts(sc);
7341 }
7342
7343 /*
7344 * Autoconf glue-sniffing
7345 */
7346
7347 static const pci_product_id_t iwm_devices[] = {
7348 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7349 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7350 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7351 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7352 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7353 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7354 PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7355 PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7356 PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7357 PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7358 PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7359 PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7360 };
7361
7362 static int
7363 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7364 {
7365 struct pci_attach_args *pa = aux;
7366
7367 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7368 return 0;
7369
7370 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7371 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7372 return 1;
7373
7374 return 0;
7375 }
7376
7377 static int
7378 iwm_preinit(struct iwm_softc *sc)
7379 {
7380 struct ieee80211com *ic = &sc->sc_ic;
7381 int err;
7382
7383 if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7384 return 0;
7385
7386 err = iwm_start_hw(sc);
7387 if (err) {
7388 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7389 return err;
7390 }
7391
7392 err = iwm_run_init_mvm_ucode(sc, 1);
7393 iwm_stop_device(sc);
7394 if (err)
7395 return err;
7396
7397 sc->sc_flags |= IWM_FLAG_ATTACHED;
7398
7399 aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7400 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7401 ether_sprintf(sc->sc_nvm.hw_addr));
7402
7403 #ifndef IEEE80211_NO_HT
7404 if (sc->sc_nvm.sku_cap_11n_enable)
7405 iwm_setup_ht_rates(sc);
7406 #endif
7407
7408 /* not all hardware can do 5GHz band */
7409 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7410 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7411
7412 ieee80211_ifattach(ic);
7413
7414 ic->ic_node_alloc = iwm_node_alloc;
7415
7416 /* Override 802.11 state transition machine. */
7417 sc->sc_newstate = ic->ic_newstate;
7418 ic->ic_newstate = iwm_newstate;
7419 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7420 ieee80211_announce(ic);
7421
7422 iwm_radiotap_attach(sc);
7423
7424 return 0;
7425 }
7426
7427 static void
7428 iwm_attach_hook(device_t dev)
7429 {
7430 struct iwm_softc *sc = device_private(dev);
7431
7432 iwm_preinit(sc);
7433 }
7434
7435 static void
7436 iwm_attach(device_t parent, device_t self, void *aux)
7437 {
7438 struct iwm_softc *sc = device_private(self);
7439 struct pci_attach_args *pa = aux;
7440 struct ieee80211com *ic = &sc->sc_ic;
7441 struct ifnet *ifp = &sc->sc_ec.ec_if;
7442 pcireg_t reg, memtype;
7443 char intrbuf[PCI_INTRSTR_LEN];
7444 const char *intrstr;
7445 int err;
7446 int txq_i;
7447 const struct sysctlnode *node;
7448
7449 sc->sc_dev = self;
7450 sc->sc_pct = pa->pa_pc;
7451 sc->sc_pcitag = pa->pa_tag;
7452 sc->sc_dmat = pa->pa_dmat;
7453 sc->sc_pciid = pa->pa_id;
7454
7455 pci_aprint_devinfo(pa, NULL);
7456
7457 if (workqueue_create(&sc->sc_nswq, "iwmns",
7458 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7459 panic("%s: could not create workqueue: newstate",
7460 device_xname(self));
7461 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7462 if (sc->sc_soft_ih == NULL)
7463 panic("%s: could not establish softint", device_xname(self));
7464
7465 /*
7466 * Get the offset of the PCI Express Capability Structure in PCI
7467 * Configuration Space.
7468 */
7469 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7470 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7471 if (err == 0) {
7472 aprint_error_dev(self,
7473 "PCIe capability structure not found!\n");
7474 return;
7475 }
7476
7477 /* Clear device-specific "PCI retry timeout" register (41h). */
7478 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7479 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7480
7481 /* Enable bus-mastering */
7482 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7483 reg |= PCI_COMMAND_MASTER_ENABLE;
7484 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7485
7486 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7487 err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7488 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7489 if (err) {
7490 aprint_error_dev(self, "can't map mem space\n");
7491 return;
7492 }
7493
7494 /* Install interrupt handler. */
7495 err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7496 if (err) {
7497 aprint_error_dev(self, "can't allocate interrupt\n");
7498 return;
7499 }
7500 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7501 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7502 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7503 else
7504 SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7505 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7506 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7507 sizeof(intrbuf));
7508 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7509 IPL_NET, iwm_intr, sc, device_xname(self));
7510 if (sc->sc_ih == NULL) {
7511 aprint_error_dev(self, "can't establish interrupt");
7512 if (intrstr != NULL)
7513 aprint_error(" at %s", intrstr);
7514 aprint_error("\n");
7515 return;
7516 }
7517 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7518
7519 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7520
7521 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7522 switch (PCI_PRODUCT(sc->sc_pciid)) {
7523 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7524 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7525 sc->sc_fwname = "iwlwifi-3160-16.ucode";
7526 sc->host_interrupt_operation_mode = 1;
7527 sc->apmg_wake_up_wa = 1;
7528 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7529 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7530 break;
7531 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7532 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7533 sc->sc_fwname = "iwlwifi-7265D-17.ucode";
7534 sc->host_interrupt_operation_mode = 0;
7535 sc->apmg_wake_up_wa = 1;
7536 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7537 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7538 break;
7539 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7540 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7541 sc->sc_fwname = "iwlwifi-7260-16.ucode";
7542 sc->host_interrupt_operation_mode = 1;
7543 sc->apmg_wake_up_wa = 1;
7544 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7545 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7546 break;
7547 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7548 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7549 sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7550 IWM_CSR_HW_REV_TYPE_7265D ?
7551 "iwlwifi-7265D-17.ucode": "iwlwifi-7265-16.ucode";
7552 sc->host_interrupt_operation_mode = 0;
7553 sc->apmg_wake_up_wa = 1;
7554 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7555 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7556 break;
7557 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7558 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7559 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7560 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7561 sc->sc_fwname = "iwlwifi-8000C-16.ucode";
7562 sc->host_interrupt_operation_mode = 0;
7563 sc->apmg_wake_up_wa = 0;
7564 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7565 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7566 break;
7567 default:
7568 aprint_error_dev(self, "unknown product %#x",
7569 PCI_PRODUCT(sc->sc_pciid));
7570 return;
7571 }
7572 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7573
7574 /*
7575 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7576 * changed, and now the revision step also includes bit 0-1 (no more
7577 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7578 * in the old format.
7579 */
7580
7581 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7582 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7583 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7584
7585 if (iwm_prepare_card_hw(sc) != 0) {
7586 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7587 return;
7588 }
7589
7590 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7591 uint32_t hw_step;
7592
7593 /*
7594 * In order to recognize C step the driver should read the
7595 * chip version id located at the AUX bus MISC address.
7596 */
7597 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7598 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7599 DELAY(2);
7600
7601 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7602 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7603 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7604 25000);
7605 if (!err) {
7606 aprint_error_dev(sc->sc_dev,
7607 "failed to wake up the nic\n");
7608 return;
7609 }
7610
7611 if (iwm_nic_lock(sc)) {
7612 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7613 hw_step |= IWM_ENABLE_WFPM;
7614 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7615 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7616 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7617 if (hw_step == 0x3)
7618 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7619 (IWM_SILICON_C_STEP << 2);
7620 iwm_nic_unlock(sc);
7621 } else {
7622 aprint_error_dev(sc->sc_dev,
7623 "failed to lock the nic\n");
7624 return;
7625 }
7626 }
7627
7628 /*
7629 * Allocate DMA memory for firmware transfers.
7630 * Must be aligned on a 16-byte boundary.
7631 */
7632 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7633 16);
7634 if (err) {
7635 aprint_error_dev(sc->sc_dev,
7636 "could not allocate memory for firmware\n");
7637 return;
7638 }
7639
7640 /* Allocate "Keep Warm" page, used internally by the card. */
7641 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7642 if (err) {
7643 aprint_error_dev(sc->sc_dev,
7644 "could not allocate keep warm page\n");
7645 goto fail1;
7646 }
7647
7648 /* Allocate interrupt cause table (ICT).*/
7649 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7650 1 << IWM_ICT_PADDR_SHIFT);
7651 if (err) {
7652 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7653 goto fail2;
7654 }
7655
7656 /* TX scheduler rings must be aligned on a 1KB boundary. */
7657 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7658 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7659 if (err) {
7660 aprint_error_dev(sc->sc_dev,
7661 "could not allocate TX scheduler rings\n");
7662 goto fail3;
7663 }
7664
7665 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7666 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7667 if (err) {
7668 aprint_error_dev(sc->sc_dev,
7669 "could not allocate TX ring %d\n", txq_i);
7670 goto fail4;
7671 }
7672 }
7673
7674 err = iwm_alloc_rx_ring(sc, &sc->rxq);
7675 if (err) {
7676 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
7677 goto fail4;
7678 }
7679
7680 /* Clear pending interrupts. */
7681 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7682
7683 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7684 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
7685 SYSCTL_DESCR("iwm per-controller controls"),
7686 NULL, 0, NULL, 0,
7687 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
7688 CTL_EOL)) != 0) {
7689 aprint_normal_dev(sc->sc_dev,
7690 "couldn't create iwm per-controller sysctl node\n");
7691 }
7692 if (err == 0) {
7693 int iwm_nodenum = node->sysctl_num;
7694
7695 /* Reload firmware sysctl node */
7696 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7697 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
7698 SYSCTL_DESCR("Reload firmware"),
7699 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
7700 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
7701 CTL_EOL)) != 0) {
7702 aprint_normal_dev(sc->sc_dev,
7703 "couldn't create load_fw sysctl node\n");
7704 }
7705 }
7706
7707 /*
7708 * Attach interface
7709 */
7710 ic->ic_ifp = ifp;
7711 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
7712 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
7713 ic->ic_state = IEEE80211_S_INIT;
7714
7715 /* Set device capabilities. */
7716 ic->ic_caps =
7717 IEEE80211_C_WEP | /* WEP */
7718 IEEE80211_C_WPA | /* 802.11i */
7719 #ifdef notyet
7720 IEEE80211_C_SCANALL | /* device scans all channels at once */
7721 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
7722 #endif
7723 IEEE80211_C_SHSLOT | /* short slot time supported */
7724 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
7725
7726 #ifndef IEEE80211_NO_HT
7727 ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7728 ic->ic_htxcaps = 0;
7729 ic->ic_txbfcaps = 0;
7730 ic->ic_aselcaps = 0;
7731 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7732 #endif
7733
7734 /* all hardware can do 2.4GHz band */
7735 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7736 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7737
7738 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
7739 sc->sc_phyctxt[i].id = i;
7740 }
7741
7742 sc->sc_amrr.amrr_min_success_threshold = 1;
7743 sc->sc_amrr.amrr_max_success_threshold = 15;
7744
7745 /* IBSS channel undefined for now. */
7746 ic->ic_ibss_chan = &ic->ic_channels[1];
7747
7748 #if 0
7749 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7750 #endif
7751
7752 ifp->if_softc = sc;
7753 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7754 ifp->if_init = iwm_init;
7755 ifp->if_stop = iwm_stop;
7756 ifp->if_ioctl = iwm_ioctl;
7757 ifp->if_start = iwm_start;
7758 ifp->if_watchdog = iwm_watchdog;
7759 IFQ_SET_READY(&ifp->if_snd);
7760 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7761
7762 if_initialize(ifp);
7763 #if 0
7764 ieee80211_ifattach(ic);
7765 #else
7766 ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
7767 #endif
7768 /* Use common softint-based if_input */
7769 ifp->if_percpuq = if_percpuq_create(ifp);
7770 if_register(ifp);
7771
7772 callout_init(&sc->sc_calib_to, 0);
7773 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
7774 callout_init(&sc->sc_led_blink_to, 0);
7775 callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7776 #ifndef IEEE80211_NO_HT
7777 if (workqueue_create(&sc->sc_setratewq, "iwmsr",
7778 iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
7779 panic("%s: could not create workqueue: setrates",
7780 device_xname(self));
7781 if (workqueue_create(&sc->sc_bawq, "iwmba",
7782 iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
7783 panic("%s: could not create workqueue: blockack",
7784 device_xname(self));
7785 if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
7786 iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
7787 panic("%s: could not create workqueue: htprot",
7788 device_xname(self));
7789 #endif
7790
7791 if (pmf_device_register(self, NULL, NULL))
7792 pmf_class_network_register(self, ifp);
7793 else
7794 aprint_error_dev(self, "couldn't establish power handler\n");
7795
7796 /*
7797 * We can't do normal attach before the file system is mounted
7798 * because we cannot read the MAC address without loading the
7799 * firmware from disk. So we postpone until mountroot is done.
7800 * Notably, this will require a full driver unload/load cycle
7801 * (or reboot) in case the firmware is not present when the
7802 * hook runs.
7803 */
7804 config_mountroot(self, iwm_attach_hook);
7805
7806 return;
7807
7808 fail4: while (--txq_i >= 0)
7809 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7810 iwm_free_rx_ring(sc, &sc->rxq);
7811 iwm_dma_contig_free(&sc->sched_dma);
7812 fail3: if (sc->ict_dma.vaddr != NULL)
7813 iwm_dma_contig_free(&sc->ict_dma);
7814 fail2: iwm_dma_contig_free(&sc->kw_dma);
7815 fail1: iwm_dma_contig_free(&sc->fw_dma);
7816 }
7817
7818 void
7819 iwm_radiotap_attach(struct iwm_softc *sc)
7820 {
7821 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7822
7823 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
7824 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
7825 &sc->sc_drvbpf);
7826
7827 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7828 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7829 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7830
7831 sc->sc_txtap_len = sizeof sc->sc_txtapu;
7832 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7833 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7834 }
7835
7836 #if 0
7837 static void
7838 iwm_init_task(void *arg)
7839 {
7840 struct iwm_softc *sc = arg;
7841 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7842 int s;
7843
7844 rw_enter_write(&sc->ioctl_rwl);
7845 s = splnet();
7846
7847 iwm_stop(ifp, 0);
7848 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7849 iwm_init(ifp);
7850
7851 splx(s);
7852 rw_exit(&sc->ioctl_rwl);
7853 }
7854
7855 static void
7856 iwm_wakeup(struct iwm_softc *sc)
7857 {
7858 pcireg_t reg;
7859
7860 /* Clear device-specific "PCI retry timeout" register (41h). */
7861 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7862 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7863
7864 iwm_init_task(sc);
7865 }
7866
7867 static int
7868 iwm_activate(device_t self, enum devact act)
7869 {
7870 struct iwm_softc *sc = device_private(self);
7871 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7872
7873 switch (act) {
7874 case DVACT_DEACTIVATE:
7875 if (ifp->if_flags & IFF_RUNNING)
7876 iwm_stop(ifp, 0);
7877 return 0;
7878 default:
7879 return EOPNOTSUPP;
7880 }
7881 }
7882 #endif
7883
7884 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
7885 NULL, NULL);
7886
7887 static int
7888 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
7889 {
7890 struct sysctlnode node;
7891 struct iwm_softc *sc;
7892 int err, t;
7893
7894 node = *rnode;
7895 sc = node.sysctl_data;
7896 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
7897 node.sysctl_data = &t;
7898 err = sysctl_lookup(SYSCTLFN_CALL(&node));
7899 if (err || newp == NULL)
7900 return err;
7901
7902 if (t == 0)
7903 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
7904 return 0;
7905 }
7906
7907 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
7908 {
7909 const struct sysctlnode *rnode;
7910 #ifdef IWM_DEBUG
7911 const struct sysctlnode *cnode;
7912 #endif /* IWM_DEBUG */
7913 int rc;
7914
7915 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
7916 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
7917 SYSCTL_DESCR("iwm global controls"),
7918 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
7919 goto err;
7920
7921 iwm_sysctl_root_num = rnode->sysctl_num;
7922
7923 #ifdef IWM_DEBUG
7924 /* control debugging printfs */
7925 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
7926 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
7927 "debug", SYSCTL_DESCR("Enable debugging output"),
7928 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
7929 goto err;
7930 #endif /* IWM_DEBUG */
7931
7932 return;
7933
7934 err:
7935 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
7936 }
7937