if_iwm.c revision 1.75.2.2 1 /* $NetBSD: if_iwm.c,v 1.75.2.2 2017/07/25 19:43:03 snj Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp */
3 #define IEEE80211_NO_HT
4 /*
5 * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 * Author: Stefan Sperling <stsp (at) openbsd.org>
7 * Copyright (c) 2014 Fixup Software Ltd.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 /*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <linuxwifi (at) intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
61 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62 * Copyright(c) 2016 Intel Deutschland GmbH
63 * All rights reserved.
64 *
65 * Redistribution and use in source and binary forms, with or without
66 * modification, are permitted provided that the following conditions
67 * are met:
68 *
69 * * Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * * Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in
73 * the documentation and/or other materials provided with the
74 * distribution.
75 * * Neither the name Intel Corporation nor the names of its
76 * contributors may be used to endorse or promote products derived
77 * from this software without specific prior written permission.
78 *
79 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 */
91
92 /*-
93 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
94 *
95 * Permission to use, copy, modify, and distribute this software for any
96 * purpose with or without fee is hereby granted, provided that the above
97 * copyright notice and this permission notice appear in all copies.
98 *
99 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 */
107
108 #include <sys/cdefs.h>
109 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.75.2.2 2017/07/25 19:43:03 snj Exp $");
110
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/kmem.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/socket.h>
119 #include <sys/sockio.h>
120 #include <sys/sysctl.h>
121 #include <sys/systm.h>
122
123 #include <sys/cpu.h>
124 #include <sys/bus.h>
125 #include <sys/workqueue.h>
126 #include <machine/endian.h>
127 #include <sys/intr.h>
128
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 #include <dev/firmload.h>
133
134 #include <net/bpf.h>
135 #include <net/if.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_ether.h>
139
140 #include <netinet/in.h>
141 #include <netinet/ip.h>
142
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_radiotap.h>
146
147 #define DEVNAME(_s) device_xname((_s)->sc_dev)
148 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
149
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 0;
157 #else
158 #define DPRINTF(x) do { ; } while (0)
159 #define DPRINTFN(n, x) do { ; } while (0)
160 #endif
161
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164
165 static const uint8_t iwm_nvm_channels[] = {
166 /* 2.4 GHz */
167 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 /* 5 GHz */
169 36, 40, 44, 48, 52, 56, 60, 64,
170 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 149, 153, 157, 161, 165
172 };
173
174 static const uint8_t iwm_nvm_channels_8000[] = {
175 /* 2.4 GHz */
176 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 /* 5 GHz */
178 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 149, 153, 157, 161, 165, 169, 173, 177, 181
181 };
182
183 #define IWM_NUM_2GHZ_CHANNELS 14
184
185 static const struct iwm_rate {
186 uint8_t rate;
187 uint8_t plcp;
188 uint8_t ht_plcp;
189 } iwm_rates[] = {
190 /* Legacy */ /* HT */
191 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
192 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
196 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
198 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
199 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
200 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
201 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
202 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
203 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
204 };
205 #define IWM_RIDX_CCK 0
206 #define IWM_RIDX_OFDM 4
207 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
208 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
209 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
210
211 #ifndef IEEE80211_NO_HT
212 /* Convert an MCS index into an iwm_rates[] index. */
213 static const int iwm_mcs2ridx[] = {
214 IWM_RATE_MCS_0_INDEX,
215 IWM_RATE_MCS_1_INDEX,
216 IWM_RATE_MCS_2_INDEX,
217 IWM_RATE_MCS_3_INDEX,
218 IWM_RATE_MCS_4_INDEX,
219 IWM_RATE_MCS_5_INDEX,
220 IWM_RATE_MCS_6_INDEX,
221 IWM_RATE_MCS_7_INDEX,
222 };
223 #endif
224
225 struct iwm_nvm_section {
226 uint16_t length;
227 uint8_t *data;
228 };
229
230 struct iwm_newstate_state {
231 struct work ns_wk;
232 enum ieee80211_state ns_nstate;
233 int ns_arg;
234 int ns_generation;
235 };
236
237 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
238 static int iwm_firmware_store_section(struct iwm_softc *,
239 enum iwm_ucode_type, uint8_t *, size_t);
240 static int iwm_set_default_calib(struct iwm_softc *, const void *);
241 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
243 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
244 #ifdef IWM_DEBUG
245 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
246 #endif
247 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
248 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
249 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
250 static int iwm_nic_lock(struct iwm_softc *);
251 static void iwm_nic_unlock(struct iwm_softc *);
252 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
253 uint32_t);
254 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
255 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 bus_size_t, bus_size_t);
258 static void iwm_dma_contig_free(struct iwm_dma_info *);
259 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
260 static void iwm_disable_rx_dma(struct iwm_softc *);
261 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
262 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
264 int);
265 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
266 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void iwm_enable_rfkill_int(struct iwm_softc *);
268 static int iwm_check_rfkill(struct iwm_softc *);
269 static void iwm_enable_interrupts(struct iwm_softc *);
270 static void iwm_restore_interrupts(struct iwm_softc *);
271 static void iwm_disable_interrupts(struct iwm_softc *);
272 static void iwm_ict_reset(struct iwm_softc *);
273 static int iwm_set_hw_ready(struct iwm_softc *);
274 static int iwm_prepare_card_hw(struct iwm_softc *);
275 static void iwm_apm_config(struct iwm_softc *);
276 static int iwm_apm_init(struct iwm_softc *);
277 static void iwm_apm_stop(struct iwm_softc *);
278 static int iwm_allow_mcast(struct iwm_softc *);
279 static int iwm_start_hw(struct iwm_softc *);
280 static void iwm_stop_device(struct iwm_softc *);
281 static void iwm_nic_config(struct iwm_softc *);
282 static int iwm_nic_rx_init(struct iwm_softc *);
283 static int iwm_nic_tx_init(struct iwm_softc *);
284 static int iwm_nic_init(struct iwm_softc *);
285 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int iwm_post_alive(struct iwm_softc *);
287 static struct iwm_phy_db_entry *
288 iwm_phy_db_get_section(struct iwm_softc *,
289 enum iwm_phy_db_section_type, uint16_t);
290 static int iwm_phy_db_set_section(struct iwm_softc *,
291 struct iwm_calib_res_notif_phy_db *, uint16_t);
292 static int iwm_is_valid_channel(uint16_t);
293 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
294 static uint16_t iwm_channel_id_to_papd(uint16_t);
295 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
296 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
297 uint8_t **, uint16_t *, uint16_t);
298 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
299 void *);
300 static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
301 enum iwm_phy_db_section_type, uint8_t);
302 static int iwm_send_phy_db_data(struct iwm_softc *);
303 static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
304 struct iwm_time_event_cmd_v1 *);
305 static int iwm_send_time_event_cmd(struct iwm_softc *,
306 const struct iwm_time_event_cmd_v2 *);
307 static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
308 uint32_t, uint32_t);
309 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
310 uint16_t, uint8_t *, uint16_t *);
311 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
312 uint16_t *, size_t);
313 static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
314 const uint8_t *, size_t);
315 #ifndef IEEE80211_NO_HT
316 static void iwm_setup_ht_rates(struct iwm_softc *);
317 static void iwm_htprot_task(void *);
318 static void iwm_update_htprot(struct ieee80211com *,
319 struct ieee80211_node *);
320 static int iwm_ampdu_rx_start(struct ieee80211com *,
321 struct ieee80211_node *, uint8_t);
322 static void iwm_ampdu_rx_stop(struct ieee80211com *,
323 struct ieee80211_node *, uint8_t);
324 static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
325 uint8_t, uint16_t, int);
326 #ifdef notyet
327 static int iwm_ampdu_tx_start(struct ieee80211com *,
328 struct ieee80211_node *, uint8_t);
329 static void iwm_ampdu_tx_stop(struct ieee80211com *,
330 struct ieee80211_node *, uint8_t);
331 #endif
332 static void iwm_ba_task(void *);
333 #endif
334
335 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336 const uint16_t *, const uint16_t *, const uint16_t *,
337 const uint16_t *, const uint16_t *);
338 static void iwm_set_hw_address_8000(struct iwm_softc *,
339 struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
340 static int iwm_parse_nvm_sections(struct iwm_softc *,
341 struct iwm_nvm_section *);
342 static int iwm_nvm_init(struct iwm_softc *);
343 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
344 const uint8_t *, uint32_t);
345 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
346 const uint8_t *, uint32_t);
347 static int iwm_load_cpu_sections_7000(struct iwm_softc *,
348 struct iwm_fw_sects *, int , int *);
349 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
350 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
351 struct iwm_fw_sects *, int , int *);
352 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
353 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
354 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
355 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
356 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
357 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
358 enum iwm_ucode_type);
359 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
360 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
361 static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
362 static int iwm_get_signal_strength(struct iwm_softc *,
363 struct iwm_rx_phy_info *);
364 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
365 struct iwm_rx_packet *, struct iwm_rx_data *);
366 static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
367 static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
368 struct iwm_rx_data *);
369 static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
370 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
371 struct iwm_rx_data *);
372 static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
373 uint32_t);
374 #if 0
375 static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
376 static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
377 #endif
378 static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
379 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
380 static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
381 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
382 uint8_t, uint8_t);
383 static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
384 uint8_t, uint8_t, uint32_t, uint32_t);
385 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
386 static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
387 uint16_t, const void *);
388 static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
389 uint32_t *);
390 static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
391 const void *, uint32_t *);
392 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
393 static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
394 #if 0
395 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
396 uint16_t);
397 #endif
398 static const struct iwm_rate *
399 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
400 struct ieee80211_frame *, struct iwm_tx_cmd *);
401 static int iwm_tx(struct iwm_softc *, struct mbuf *,
402 struct ieee80211_node *, int);
403 static void iwm_led_enable(struct iwm_softc *);
404 static void iwm_led_disable(struct iwm_softc *);
405 static int iwm_led_is_enabled(struct iwm_softc *);
406 static void iwm_led_blink_timeout(void *);
407 static void iwm_led_blink_start(struct iwm_softc *);
408 static void iwm_led_blink_stop(struct iwm_softc *);
409 static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
410 struct iwm_beacon_filter_cmd *);
411 static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
412 struct iwm_node *, struct iwm_beacon_filter_cmd *);
413 static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
414 int);
415 static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 struct iwm_mac_power_cmd *);
417 static int iwm_power_mac_update_mode(struct iwm_softc *,
418 struct iwm_node *);
419 static int iwm_power_update_device(struct iwm_softc *);
420 #ifdef notyet
421 static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 #endif
423 static int iwm_disable_beacon_filter(struct iwm_softc *);
424 static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
425 static int iwm_add_aux_sta(struct iwm_softc *);
426 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 #ifdef notyet
429 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
430 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
431 #endif
432 static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
433 struct iwm_scan_channel_cfg_lmac *, int);
434 static int iwm_fill_probe_req(struct iwm_softc *,
435 struct iwm_scan_probe_req *);
436 static int iwm_lmac_scan(struct iwm_softc *);
437 static int iwm_config_umac_scan(struct iwm_softc *);
438 static int iwm_umac_scan(struct iwm_softc *);
439 static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
440 static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
441 int *);
442 static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
443 struct iwm_mac_ctx_cmd *, uint32_t, int);
444 static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
445 struct iwm_mac_data_sta *, int);
446 static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
447 uint32_t, int);
448 static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
449 static int iwm_auth(struct iwm_softc *);
450 static int iwm_assoc(struct iwm_softc *);
451 static void iwm_calib_timeout(void *);
452 #ifndef IEEE80211_NO_HT
453 static void iwm_setrates_task(void *);
454 static int iwm_setrates(struct iwm_node *);
455 #endif
456 static int iwm_media_change(struct ifnet *);
457 static int iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
458 int);
459 static void iwm_newstate_cb(struct work *, void *);
460 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 static void iwm_endscan(struct iwm_softc *);
462 static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 struct ieee80211_node *);
464 static int iwm_sf_config(struct iwm_softc *, int);
465 static int iwm_send_bt_init_conf(struct iwm_softc *);
466 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 static int iwm_init_hw(struct iwm_softc *);
469 static int iwm_init(struct ifnet *);
470 static void iwm_start(struct ifnet *);
471 static void iwm_stop(struct ifnet *, int);
472 static void iwm_watchdog(struct ifnet *);
473 static int iwm_ioctl(struct ifnet *, u_long, void *);
474 #ifdef IWM_DEBUG
475 static const char *iwm_desc_lookup(uint32_t);
476 static void iwm_nic_error(struct iwm_softc *);
477 static void iwm_nic_umac_error(struct iwm_softc *);
478 #endif
479 static void iwm_notif_intr(struct iwm_softc *);
480 static int iwm_intr(void *);
481 static void iwm_softintr(void *);
482 static int iwm_preinit(struct iwm_softc *);
483 static void iwm_attach_hook(device_t);
484 static void iwm_attach(device_t, device_t, void *);
485 #if 0
486 static void iwm_init_task(void *);
487 static int iwm_activate(device_t, enum devact);
488 static void iwm_wakeup(struct iwm_softc *);
489 #endif
490 static void iwm_radiotap_attach(struct iwm_softc *);
491 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
492
493 /* XXX needed by iwn_scan */
494 static u_int8_t *ieee80211_add_ssid(u_int8_t *, const u_int8_t *, u_int);
495 static u_int8_t *ieee80211_add_rates(u_int8_t *,
496 const struct ieee80211_rateset *);
497 static u_int8_t *ieee80211_add_xrates(u_int8_t *,
498 const struct ieee80211_rateset *);
499
500 static int iwm_sysctl_root_num;
501 static int iwm_lar_disable;
502
503 #ifndef IWM_DEFAULT_MCC
504 #define IWM_DEFAULT_MCC "ZZ"
505 #endif
506 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
507
508 static int
509 iwm_firmload(struct iwm_softc *sc)
510 {
511 struct iwm_fw_info *fw = &sc->sc_fw;
512 firmware_handle_t fwh;
513 int err;
514
515 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
516 return 0;
517
518 /* Open firmware image. */
519 err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
520 if (err) {
521 aprint_error_dev(sc->sc_dev,
522 "could not get firmware handle %s\n", sc->sc_fwname);
523 return err;
524 }
525
526 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
527 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
528 fw->fw_rawdata = NULL;
529 }
530
531 fw->fw_rawsize = firmware_get_size(fwh);
532 /*
533 * Well, this is how the Linux driver checks it ....
534 */
535 if (fw->fw_rawsize < sizeof(uint32_t)) {
536 aprint_error_dev(sc->sc_dev,
537 "firmware too short: %zd bytes\n", fw->fw_rawsize);
538 err = EINVAL;
539 goto out;
540 }
541
542 /* Read the firmware. */
543 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
544 if (fw->fw_rawdata == NULL) {
545 aprint_error_dev(sc->sc_dev,
546 "not enough memory to stock firmware %s\n", sc->sc_fwname);
547 err = ENOMEM;
548 goto out;
549 }
550 err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
551 if (err) {
552 aprint_error_dev(sc->sc_dev,
553 "could not read firmware %s\n", sc->sc_fwname);
554 goto out;
555 }
556
557 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
558 out:
559 /* caller will release memory, if necessary */
560
561 firmware_close(fwh);
562 return err;
563 }
564
565 /*
566 * XXX code from OpenBSD src/sys/net80211/ieee80211_output.c
567 * Copyright (c) 2001 Atsushi Onoe
568 * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
569 * Copyright (c) 2007-2009 Damien Bergamini
570 * All rights reserved.
571 */
572
573 /*
574 * Add an SSID element to a frame (see 7.3.2.1).
575 */
576 static u_int8_t *
577 ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len)
578 {
579 *frm++ = IEEE80211_ELEMID_SSID;
580 *frm++ = len;
581 memcpy(frm, ssid, len);
582 return frm + len;
583 }
584
585 /*
586 * Add a supported rates element to a frame (see 7.3.2.2).
587 */
588 static u_int8_t *
589 ieee80211_add_rates(u_int8_t *frm, const struct ieee80211_rateset *rs)
590 {
591 int nrates;
592
593 *frm++ = IEEE80211_ELEMID_RATES;
594 nrates = min(rs->rs_nrates, IEEE80211_RATE_SIZE);
595 *frm++ = nrates;
596 memcpy(frm, rs->rs_rates, nrates);
597 return frm + nrates;
598 }
599
600 /*
601 * Add an extended supported rates element to a frame (see 7.3.2.14).
602 */
603 static u_int8_t *
604 ieee80211_add_xrates(u_int8_t *frm, const struct ieee80211_rateset *rs)
605 {
606 int nrates;
607
608 KASSERT(rs->rs_nrates > IEEE80211_RATE_SIZE);
609
610 *frm++ = IEEE80211_ELEMID_XRATES;
611 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
612 *frm++ = nrates;
613 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
614 return frm + nrates;
615 }
616
617 /*
618 * just maintaining status quo.
619 */
620 static void
621 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
622 {
623 struct ieee80211com *ic = &sc->sc_ic;
624 struct ieee80211_frame *wh;
625 uint8_t subtype;
626
627 wh = mtod(m, struct ieee80211_frame *);
628
629 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
630 return;
631
632 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
633
634 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
635 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
636 return;
637
638 int chan = le32toh(sc->sc_last_phy_info.channel);
639 if (chan < __arraycount(ic->ic_channels))
640 ic->ic_curchan = &ic->ic_channels[chan];
641 }
642
643 static int
644 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
645 {
646 struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
647
648 if (dlen < sizeof(*l) ||
649 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
650 return EINVAL;
651
652 /* we don't actually store anything for now, always use s/w crypto */
653
654 return 0;
655 }
656
657 static int
658 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
659 uint8_t *data, size_t dlen)
660 {
661 struct iwm_fw_sects *fws;
662 struct iwm_fw_onesect *fwone;
663
664 if (type >= IWM_UCODE_TYPE_MAX)
665 return EINVAL;
666 if (dlen < sizeof(uint32_t))
667 return EINVAL;
668
669 fws = &sc->sc_fw.fw_sects[type];
670 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
671 return EINVAL;
672
673 fwone = &fws->fw_sect[fws->fw_count];
674
675 /* first 32bit are device load offset */
676 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
677
678 /* rest is data */
679 fwone->fws_data = data + sizeof(uint32_t);
680 fwone->fws_len = dlen - sizeof(uint32_t);
681
682 /* for freeing the buffer during driver unload */
683 fwone->fws_alloc = data;
684 fwone->fws_allocsize = dlen;
685
686 fws->fw_count++;
687 fws->fw_totlen += fwone->fws_len;
688
689 return 0;
690 }
691
692 struct iwm_tlv_calib_data {
693 uint32_t ucode_type;
694 struct iwm_tlv_calib_ctrl calib;
695 } __packed;
696
697 static int
698 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
699 {
700 const struct iwm_tlv_calib_data *def_calib = data;
701 uint32_t ucode_type = le32toh(def_calib->ucode_type);
702
703 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
704 DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
705 DEVNAME(sc), ucode_type));
706 return EINVAL;
707 }
708
709 sc->sc_default_calib[ucode_type].flow_trigger =
710 def_calib->calib.flow_trigger;
711 sc->sc_default_calib[ucode_type].event_trigger =
712 def_calib->calib.event_trigger;
713
714 return 0;
715 }
716
717 static int
718 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
719 {
720 struct iwm_fw_info *fw = &sc->sc_fw;
721 struct iwm_tlv_ucode_header *uhdr;
722 struct iwm_ucode_tlv tlv;
723 enum iwm_ucode_tlv_type tlv_type;
724 uint8_t *data;
725 int err, status;
726 size_t len;
727
728 if (ucode_type != IWM_UCODE_TYPE_INIT &&
729 fw->fw_status == IWM_FW_STATUS_DONE)
730 return 0;
731
732 if (fw->fw_status == IWM_FW_STATUS_NONE) {
733 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
734 } else {
735 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
736 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
737 }
738 status = fw->fw_status;
739
740 if (status == IWM_FW_STATUS_DONE)
741 return 0;
742
743 err = iwm_firmload(sc);
744 if (err) {
745 aprint_error_dev(sc->sc_dev,
746 "could not read firmware %s (error %d)\n",
747 sc->sc_fwname, err);
748 goto out;
749 }
750
751 sc->sc_capaflags = 0;
752 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
753 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
754 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
755
756 uhdr = (void *)fw->fw_rawdata;
757 if (*(uint32_t *)fw->fw_rawdata != 0
758 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
759 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
760 sc->sc_fwname);
761 err = EINVAL;
762 goto out;
763 }
764
765 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
766 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
767 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
768 IWM_UCODE_API(le32toh(uhdr->ver)));
769 data = uhdr->data;
770 len = fw->fw_rawsize - sizeof(*uhdr);
771
772 while (len >= sizeof(tlv)) {
773 size_t tlv_len;
774 void *tlv_data;
775
776 memcpy(&tlv, data, sizeof(tlv));
777 tlv_len = le32toh(tlv.length);
778 tlv_type = le32toh(tlv.type);
779
780 len -= sizeof(tlv);
781 data += sizeof(tlv);
782 tlv_data = data;
783
784 if (len < tlv_len) {
785 aprint_error_dev(sc->sc_dev,
786 "firmware too short: %zu bytes\n", len);
787 err = EINVAL;
788 goto parse_out;
789 }
790
791 switch (tlv_type) {
792 case IWM_UCODE_TLV_PROBE_MAX_LEN:
793 if (tlv_len < sizeof(uint32_t)) {
794 err = EINVAL;
795 goto parse_out;
796 }
797 sc->sc_capa_max_probe_len
798 = le32toh(*(uint32_t *)tlv_data);
799 /* limit it to something sensible */
800 if (sc->sc_capa_max_probe_len >
801 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
802 err = EINVAL;
803 goto parse_out;
804 }
805 break;
806 case IWM_UCODE_TLV_PAN:
807 if (tlv_len) {
808 err = EINVAL;
809 goto parse_out;
810 }
811 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
812 break;
813 case IWM_UCODE_TLV_FLAGS:
814 if (tlv_len < sizeof(uint32_t)) {
815 err = EINVAL;
816 goto parse_out;
817 }
818 if (tlv_len % sizeof(uint32_t)) {
819 err = EINVAL;
820 goto parse_out;
821 }
822 /*
823 * Apparently there can be many flags, but Linux driver
824 * parses only the first one, and so do we.
825 *
826 * XXX: why does this override IWM_UCODE_TLV_PAN?
827 * Intentional or a bug? Observations from
828 * current firmware file:
829 * 1) TLV_PAN is parsed first
830 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
831 * ==> this resets TLV_PAN to itself... hnnnk
832 */
833 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
834 break;
835 case IWM_UCODE_TLV_CSCHEME:
836 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
837 if (err)
838 goto parse_out;
839 break;
840 case IWM_UCODE_TLV_NUM_OF_CPU: {
841 uint32_t num_cpu;
842 if (tlv_len != sizeof(uint32_t)) {
843 err = EINVAL;
844 goto parse_out;
845 }
846 num_cpu = le32toh(*(uint32_t *)tlv_data);
847 if (num_cpu == 2) {
848 fw->fw_sects[IWM_UCODE_TYPE_REGULAR].is_dual_cpus =
849 true;
850 fw->fw_sects[IWM_UCODE_TYPE_INIT].is_dual_cpus =
851 true;
852 fw->fw_sects[IWM_UCODE_TYPE_WOW].is_dual_cpus =
853 true;
854 } else if (num_cpu < 1 || num_cpu > 2) {
855 err = EINVAL;
856 goto parse_out;
857 }
858 break;
859 }
860 case IWM_UCODE_TLV_SEC_RT:
861 err = iwm_firmware_store_section(sc,
862 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
863 if (err)
864 goto parse_out;
865 break;
866 case IWM_UCODE_TLV_SEC_INIT:
867 err = iwm_firmware_store_section(sc,
868 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
869 if (err)
870 goto parse_out;
871 break;
872 case IWM_UCODE_TLV_SEC_WOWLAN:
873 err = iwm_firmware_store_section(sc,
874 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
875 if (err)
876 goto parse_out;
877 break;
878 case IWM_UCODE_TLV_DEF_CALIB:
879 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
880 err = EINVAL;
881 goto parse_out;
882 }
883 err = iwm_set_default_calib(sc, tlv_data);
884 if (err)
885 goto parse_out;
886 break;
887 case IWM_UCODE_TLV_PHY_SKU:
888 if (tlv_len != sizeof(uint32_t)) {
889 err = EINVAL;
890 goto parse_out;
891 }
892 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
893 break;
894
895 case IWM_UCODE_TLV_API_CHANGES_SET: {
896 struct iwm_ucode_api *api;
897 uint32_t idx, bits;
898 int i;
899 if (tlv_len != sizeof(*api)) {
900 err = EINVAL;
901 goto parse_out;
902 }
903 api = (struct iwm_ucode_api *)tlv_data;
904 idx = le32toh(api->api_index);
905 bits = le32toh(api->api_flags);
906 if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
907 err = EINVAL;
908 goto parse_out;
909 }
910 for (i = 0; i < 32; i++) {
911 if (!ISSET(bits, __BIT(i)))
912 continue;
913 setbit(sc->sc_ucode_api, i + (32 * idx));
914 }
915 break;
916 }
917
918 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
919 struct iwm_ucode_capa *capa;
920 uint32_t idx, bits;
921 int i;
922 if (tlv_len != sizeof(*capa)) {
923 err = EINVAL;
924 goto parse_out;
925 }
926 capa = (struct iwm_ucode_capa *)tlv_data;
927 idx = le32toh(capa->api_index);
928 bits = le32toh(capa->api_capa);
929 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
930 err = EINVAL;
931 goto parse_out;
932 }
933 for (i = 0; i < 32; i++) {
934 if (!ISSET(bits, __BIT(i)))
935 continue;
936 setbit(sc->sc_enabled_capa, i + (32 * idx));
937 }
938 break;
939 }
940
941 case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
942 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
943 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
944 case IWM_UCODE_TLV_FW_MEM_SEG:
945 /* ignore, not used by current driver */
946 break;
947
948 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
949 err = iwm_firmware_store_section(sc,
950 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
951 tlv_len);
952 if (err)
953 goto parse_out;
954 break;
955
956 case IWM_UCODE_TLV_PAGING: {
957 uint32_t paging_mem_size;
958 if (tlv_len != sizeof(paging_mem_size)) {
959 err = EINVAL;
960 goto parse_out;
961 }
962 paging_mem_size = le32toh(*(uint32_t *)tlv_data);
963 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
964 err = EINVAL;
965 goto parse_out;
966 }
967 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
968 err = EINVAL;
969 goto parse_out;
970 }
971 fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
972 paging_mem_size;
973 fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
974 paging_mem_size;
975 break;
976 }
977
978 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
979 if (tlv_len != sizeof(uint32_t)) {
980 err = EINVAL;
981 goto parse_out;
982 }
983 sc->sc_capa_n_scan_channels =
984 le32toh(*(uint32_t *)tlv_data);
985 break;
986
987 case IWM_UCODE_TLV_FW_VERSION:
988 if (tlv_len != sizeof(uint32_t) * 3) {
989 err = EINVAL;
990 goto parse_out;
991 }
992 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
993 "%d.%d.%d",
994 le32toh(((uint32_t *)tlv_data)[0]),
995 le32toh(((uint32_t *)tlv_data)[1]),
996 le32toh(((uint32_t *)tlv_data)[2]));
997 break;
998
999 default:
1000 DPRINTF(("%s: unknown firmware section %d, abort\n",
1001 DEVNAME(sc), tlv_type));
1002 err = EINVAL;
1003 goto parse_out;
1004 }
1005
1006 len -= roundup(tlv_len, 4);
1007 data += roundup(tlv_len, 4);
1008 }
1009
1010 KASSERT(err == 0);
1011
1012 parse_out:
1013 if (err) {
1014 aprint_error_dev(sc->sc_dev,
1015 "firmware parse error, section type %d\n", tlv_type);
1016 }
1017
1018 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
1019 aprint_error_dev(sc->sc_dev,
1020 "device uses unsupported power ops\n");
1021 err = ENOTSUP;
1022 }
1023
1024 out:
1025 if (err)
1026 fw->fw_status = IWM_FW_STATUS_NONE;
1027 else
1028 fw->fw_status = IWM_FW_STATUS_DONE;
1029 wakeup(&sc->sc_fw);
1030
1031 if (err && fw->fw_rawdata != NULL) {
1032 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
1033 fw->fw_rawdata = NULL;
1034 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
1035 /* don't touch fw->fw_status */
1036 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1037 }
1038 return err;
1039 }
1040
1041 static uint32_t
1042 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1043 {
1044 IWM_WRITE(sc,
1045 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1046 IWM_BARRIER_READ_WRITE(sc);
1047 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1048 }
1049
1050 static void
1051 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1052 {
1053 IWM_WRITE(sc,
1054 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1055 IWM_BARRIER_WRITE(sc);
1056 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1057 }
1058
1059 #ifdef IWM_DEBUG
1060 static int
1061 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1062 {
1063 int offs;
1064 uint32_t *vals = buf;
1065
1066 if (iwm_nic_lock(sc)) {
1067 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1068 for (offs = 0; offs < dwords; offs++)
1069 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1070 iwm_nic_unlock(sc);
1071 return 0;
1072 }
1073 return EBUSY;
1074 }
1075 #endif
1076
1077 static int
1078 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1079 {
1080 int offs;
1081 const uint32_t *vals = buf;
1082
1083 if (iwm_nic_lock(sc)) {
1084 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1085 /* WADDR auto-increments */
1086 for (offs = 0; offs < dwords; offs++) {
1087 uint32_t val = vals ? vals[offs] : 0;
1088 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1089 }
1090 iwm_nic_unlock(sc);
1091 return 0;
1092 }
1093 return EBUSY;
1094 }
1095
1096 static int
1097 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1098 {
1099 return iwm_write_mem(sc, addr, &val, 1);
1100 }
1101
1102 static int
1103 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1104 int timo)
1105 {
1106 for (;;) {
1107 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1108 return 1;
1109 }
1110 if (timo < 10) {
1111 return 0;
1112 }
1113 timo -= 10;
1114 DELAY(10);
1115 }
1116 }
1117
1118 static int
1119 iwm_nic_lock(struct iwm_softc *sc)
1120 {
1121 int rv = 0;
1122
1123 if (sc->sc_cmd_hold_nic_awake)
1124 return 1;
1125
1126 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1127 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1128
1129 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1130 DELAY(2);
1131
1132 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1133 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1134 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1135 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1136 rv = 1;
1137 } else {
1138 DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1139 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1140 }
1141
1142 return rv;
1143 }
1144
1145 static void
1146 iwm_nic_unlock(struct iwm_softc *sc)
1147 {
1148
1149 if (sc->sc_cmd_hold_nic_awake)
1150 return;
1151
1152 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1153 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1154 }
1155
1156 static void
1157 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1158 uint32_t mask)
1159 {
1160 uint32_t val;
1161
1162 /* XXX: no error path? */
1163 if (iwm_nic_lock(sc)) {
1164 val = iwm_read_prph(sc, reg) & mask;
1165 val |= bits;
1166 iwm_write_prph(sc, reg, val);
1167 iwm_nic_unlock(sc);
1168 }
1169 }
1170
1171 static void
1172 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1173 {
1174 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1175 }
1176
1177 static void
1178 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1179 {
1180 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1181 }
1182
1183 static int
1184 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1185 bus_size_t size, bus_size_t alignment)
1186 {
1187 int nsegs, err;
1188 void *va;
1189
1190 dma->tag = tag;
1191 dma->size = size;
1192
1193 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1194 &dma->map);
1195 if (err)
1196 goto fail;
1197
1198 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1199 BUS_DMA_NOWAIT);
1200 if (err)
1201 goto fail;
1202
1203 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1204 if (err)
1205 goto fail;
1206 dma->vaddr = va;
1207
1208 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1209 BUS_DMA_NOWAIT);
1210 if (err)
1211 goto fail;
1212
1213 memset(dma->vaddr, 0, size);
1214 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1215 dma->paddr = dma->map->dm_segs[0].ds_addr;
1216
1217 return 0;
1218
1219 fail: iwm_dma_contig_free(dma);
1220 return err;
1221 }
1222
1223 static void
1224 iwm_dma_contig_free(struct iwm_dma_info *dma)
1225 {
1226 if (dma->map != NULL) {
1227 if (dma->vaddr != NULL) {
1228 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1229 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1230 bus_dmamap_unload(dma->tag, dma->map);
1231 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1232 bus_dmamem_free(dma->tag, &dma->seg, 1);
1233 dma->vaddr = NULL;
1234 }
1235 bus_dmamap_destroy(dma->tag, dma->map);
1236 dma->map = NULL;
1237 }
1238 }
1239
1240 static int
1241 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1242 {
1243 bus_size_t size;
1244 int i, err;
1245
1246 ring->cur = 0;
1247
1248 /* Allocate RX descriptors (256-byte aligned). */
1249 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1250 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1251 if (err) {
1252 aprint_error_dev(sc->sc_dev,
1253 "could not allocate RX ring DMA memory\n");
1254 goto fail;
1255 }
1256 ring->desc = ring->desc_dma.vaddr;
1257
1258 /* Allocate RX status area (16-byte aligned). */
1259 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1260 sizeof(*ring->stat), 16);
1261 if (err) {
1262 aprint_error_dev(sc->sc_dev,
1263 "could not allocate RX status DMA memory\n");
1264 goto fail;
1265 }
1266 ring->stat = ring->stat_dma.vaddr;
1267
1268 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1269 struct iwm_rx_data *data = &ring->data[i];
1270
1271 memset(data, 0, sizeof(*data));
1272 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1273 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1274 &data->map);
1275 if (err) {
1276 aprint_error_dev(sc->sc_dev,
1277 "could not create RX buf DMA map\n");
1278 goto fail;
1279 }
1280
1281 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1282 if (err)
1283 goto fail;
1284 }
1285 return 0;
1286
1287 fail: iwm_free_rx_ring(sc, ring);
1288 return err;
1289 }
1290
1291 static void
1292 iwm_disable_rx_dma(struct iwm_softc *sc)
1293 {
1294 int ntries;
1295
1296 if (iwm_nic_lock(sc)) {
1297 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1298 for (ntries = 0; ntries < 1000; ntries++) {
1299 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1300 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1301 break;
1302 DELAY(10);
1303 }
1304 iwm_nic_unlock(sc);
1305 }
1306 }
1307
1308 void
1309 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1310 {
1311 ring->cur = 0;
1312 memset(ring->stat, 0, sizeof(*ring->stat));
1313 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1314 ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1315 }
1316
1317 static void
1318 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1319 {
1320 int i;
1321
1322 iwm_dma_contig_free(&ring->desc_dma);
1323 iwm_dma_contig_free(&ring->stat_dma);
1324
1325 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1326 struct iwm_rx_data *data = &ring->data[i];
1327
1328 if (data->m != NULL) {
1329 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1330 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1331 bus_dmamap_unload(sc->sc_dmat, data->map);
1332 m_freem(data->m);
1333 data->m = NULL;
1334 }
1335 if (data->map != NULL) {
1336 bus_dmamap_destroy(sc->sc_dmat, data->map);
1337 data->map = NULL;
1338 }
1339 }
1340 }
1341
1342 static int
1343 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1344 {
1345 bus_addr_t paddr;
1346 bus_size_t size;
1347 int i, err, nsegs;
1348
1349 ring->qid = qid;
1350 ring->queued = 0;
1351 ring->cur = 0;
1352
1353 /* Allocate TX descriptors (256-byte aligned). */
1354 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1355 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1356 if (err) {
1357 aprint_error_dev(sc->sc_dev,
1358 "could not allocate TX ring DMA memory\n");
1359 goto fail;
1360 }
1361 ring->desc = ring->desc_dma.vaddr;
1362
1363 /*
1364 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1365 * to allocate commands space for other rings.
1366 */
1367 if (qid > IWM_CMD_QUEUE)
1368 return 0;
1369
1370 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1371 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1372 if (err) {
1373 aprint_error_dev(sc->sc_dev,
1374 "could not allocate TX cmd DMA memory\n");
1375 goto fail;
1376 }
1377 ring->cmd = ring->cmd_dma.vaddr;
1378
1379 paddr = ring->cmd_dma.paddr;
1380 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1381 struct iwm_tx_data *data = &ring->data[i];
1382 size_t mapsize;
1383
1384 data->cmd_paddr = paddr;
1385 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1386 + offsetof(struct iwm_tx_cmd, scratch);
1387 paddr += sizeof(struct iwm_device_cmd);
1388
1389 /* FW commands may require more mapped space than packets. */
1390 if (qid == IWM_CMD_QUEUE) {
1391 mapsize = IWM_RBUF_SIZE;
1392 nsegs = 1;
1393 } else {
1394 mapsize = MCLBYTES;
1395 nsegs = IWM_NUM_OF_TBS - 2;
1396 }
1397 err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1398 0, BUS_DMA_NOWAIT, &data->map);
1399 if (err) {
1400 aprint_error_dev(sc->sc_dev,
1401 "could not create TX buf DMA map\n");
1402 goto fail;
1403 }
1404 }
1405 KASSERT(paddr == ring->cmd_dma.paddr + size);
1406 return 0;
1407
1408 fail: iwm_free_tx_ring(sc, ring);
1409 return err;
1410 }
1411
1412 static void
1413 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1414 {
1415
1416 if (!sc->apmg_wake_up_wa)
1417 return;
1418
1419 if (!sc->sc_cmd_hold_nic_awake) {
1420 aprint_error_dev(sc->sc_dev,
1421 "cmd_hold_nic_awake not set\n");
1422 return;
1423 }
1424
1425 sc->sc_cmd_hold_nic_awake = 0;
1426 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1427 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1428 }
1429
1430 static int
1431 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1432 {
1433 int ret;
1434
1435 /*
1436 * wake up the NIC to make sure that the firmware will see the host
1437 * command - we will let the NIC sleep once all the host commands
1438 * returned. This needs to be done only on NICs that have
1439 * apmg_wake_up_wa set.
1440 */
1441 if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1442
1443 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1444 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1445
1446 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1447 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1448 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1449 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1450 15000);
1451 if (ret == 0) {
1452 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1453 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1454 aprint_error_dev(sc->sc_dev,
1455 "failed to wake NIC for hcmd\n");
1456 return EIO;
1457 }
1458 sc->sc_cmd_hold_nic_awake = 1;
1459 }
1460
1461 return 0;
1462 }
1463 static void
1464 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1465 {
1466 int i;
1467
1468 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1469 struct iwm_tx_data *data = &ring->data[i];
1470
1471 if (data->m != NULL) {
1472 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1473 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1474 bus_dmamap_unload(sc->sc_dmat, data->map);
1475 m_freem(data->m);
1476 data->m = NULL;
1477 }
1478 }
1479 /* Clear TX descriptors. */
1480 memset(ring->desc, 0, ring->desc_dma.size);
1481 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1482 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1483 sc->qfullmsk &= ~(1 << ring->qid);
1484 ring->queued = 0;
1485 ring->cur = 0;
1486
1487 if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1488 iwm_clear_cmd_in_flight(sc);
1489 }
1490
1491 static void
1492 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1493 {
1494 int i;
1495
1496 iwm_dma_contig_free(&ring->desc_dma);
1497 iwm_dma_contig_free(&ring->cmd_dma);
1498
1499 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1500 struct iwm_tx_data *data = &ring->data[i];
1501
1502 if (data->m != NULL) {
1503 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1504 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1505 bus_dmamap_unload(sc->sc_dmat, data->map);
1506 m_freem(data->m);
1507 data->m = NULL;
1508 }
1509 if (data->map != NULL) {
1510 bus_dmamap_destroy(sc->sc_dmat, data->map);
1511 data->map = NULL;
1512 }
1513 }
1514 }
1515
1516 static void
1517 iwm_enable_rfkill_int(struct iwm_softc *sc)
1518 {
1519 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1520 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1521 }
1522
1523 static int
1524 iwm_check_rfkill(struct iwm_softc *sc)
1525 {
1526 uint32_t v;
1527 int s;
1528 int rv;
1529
1530 s = splnet();
1531
1532 /*
1533 * "documentation" is not really helpful here:
1534 * 27: HW_RF_KILL_SW
1535 * Indicates state of (platform's) hardware RF-Kill switch
1536 *
1537 * But apparently when it's off, it's on ...
1538 */
1539 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1540 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1541 if (rv) {
1542 sc->sc_flags |= IWM_FLAG_RFKILL;
1543 } else {
1544 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1545 }
1546
1547 splx(s);
1548 return rv;
1549 }
1550
1551 static void
1552 iwm_enable_interrupts(struct iwm_softc *sc)
1553 {
1554 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1555 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1556 }
1557
1558 static void
1559 iwm_restore_interrupts(struct iwm_softc *sc)
1560 {
1561 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1562 }
1563
1564 static void
1565 iwm_disable_interrupts(struct iwm_softc *sc)
1566 {
1567 int s = splnet();
1568
1569 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1570
1571 /* acknowledge all interrupts */
1572 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1573 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1574
1575 splx(s);
1576 }
1577
1578 static void
1579 iwm_ict_reset(struct iwm_softc *sc)
1580 {
1581 iwm_disable_interrupts(sc);
1582
1583 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1584 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1585 BUS_DMASYNC_PREWRITE);
1586 sc->ict_cur = 0;
1587
1588 /* Set physical address of ICT (4KB aligned). */
1589 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1590 IWM_CSR_DRAM_INT_TBL_ENABLE
1591 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1592 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1593 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1594
1595 /* Switch to ICT interrupt mode in driver. */
1596 sc->sc_flags |= IWM_FLAG_USE_ICT;
1597
1598 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1599 iwm_enable_interrupts(sc);
1600 }
1601
1602 #define IWM_HW_READY_TIMEOUT 50
1603 static int
1604 iwm_set_hw_ready(struct iwm_softc *sc)
1605 {
1606 int ready;
1607
1608 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1609 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1610
1611 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1612 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1613 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1614 IWM_HW_READY_TIMEOUT);
1615 if (ready)
1616 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1617 IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1618
1619 return ready;
1620 }
1621 #undef IWM_HW_READY_TIMEOUT
1622
1623 static int
1624 iwm_prepare_card_hw(struct iwm_softc *sc)
1625 {
1626 int t = 0;
1627
1628 if (iwm_set_hw_ready(sc))
1629 return 0;
1630
1631 DELAY(100);
1632
1633 /* If HW is not ready, prepare the conditions to check again */
1634 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1635 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1636
1637 do {
1638 if (iwm_set_hw_ready(sc))
1639 return 0;
1640 DELAY(200);
1641 t += 200;
1642 } while (t < 150000);
1643
1644 return ETIMEDOUT;
1645 }
1646
1647 static void
1648 iwm_apm_config(struct iwm_softc *sc)
1649 {
1650 pcireg_t reg;
1651
1652 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1653 sc->sc_cap_off + PCIE_LCSR);
1654 if (reg & PCIE_LCSR_ASPM_L1) {
1655 /* Um the Linux driver prints "Disabling L0S for this one ... */
1656 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1657 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1658 } else {
1659 /* ... and "Enabling" here */
1660 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1661 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1662 }
1663 }
1664
1665 /*
1666 * Start up NIC's basic functionality after it has been reset
1667 * e.g. after platform boot or shutdown.
1668 * NOTE: This does not load uCode nor start the embedded processor
1669 */
1670 static int
1671 iwm_apm_init(struct iwm_softc *sc)
1672 {
1673 int err = 0;
1674
1675 /* Disable L0S exit timer (platform NMI workaround) */
1676 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1677 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1678 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1679 }
1680
1681 /*
1682 * Disable L0s without affecting L1;
1683 * don't wait for ICH L0s (ICH bug W/A)
1684 */
1685 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1686 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1687
1688 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1689 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1690
1691 /*
1692 * Enable HAP INTA (interrupt from management bus) to
1693 * wake device's PCI Express link L1a -> L0s
1694 */
1695 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1696 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1697
1698 iwm_apm_config(sc);
1699
1700 #if 0 /* not for 7k/8k */
1701 /* Configure analog phase-lock-loop before activating to D0A */
1702 if (trans->cfg->base_params->pll_cfg_val)
1703 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1704 trans->cfg->base_params->pll_cfg_val);
1705 #endif
1706
1707 /*
1708 * Set "initialization complete" bit to move adapter from
1709 * D0U* --> D0A* (powered-up active) state.
1710 */
1711 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1712
1713 /*
1714 * Wait for clock stabilization; once stabilized, access to
1715 * device-internal resources is supported, e.g. iwm_write_prph()
1716 * and accesses to uCode SRAM.
1717 */
1718 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1719 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1720 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1721 aprint_error_dev(sc->sc_dev,
1722 "timeout waiting for clock stabilization\n");
1723 err = ETIMEDOUT;
1724 goto out;
1725 }
1726
1727 if (sc->host_interrupt_operation_mode) {
1728 /*
1729 * This is a bit of an abuse - This is needed for 7260 / 3160
1730 * only check host_interrupt_operation_mode even if this is
1731 * not related to host_interrupt_operation_mode.
1732 *
1733 * Enable the oscillator to count wake up time for L1 exit. This
1734 * consumes slightly more power (100uA) - but allows to be sure
1735 * that we wake up from L1 on time.
1736 *
1737 * This looks weird: read twice the same register, discard the
1738 * value, set a bit, and yet again, read that same register
1739 * just to discard the value. But that's the way the hardware
1740 * seems to like it.
1741 */
1742 iwm_read_prph(sc, IWM_OSC_CLK);
1743 iwm_read_prph(sc, IWM_OSC_CLK);
1744 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1745 iwm_read_prph(sc, IWM_OSC_CLK);
1746 iwm_read_prph(sc, IWM_OSC_CLK);
1747 }
1748
1749 /*
1750 * Enable DMA clock and wait for it to stabilize.
1751 *
1752 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1753 * do not disable clocks. This preserves any hardware bits already
1754 * set by default in "CLK_CTRL_REG" after reset.
1755 */
1756 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1757 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1758 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1759 DELAY(20);
1760
1761 /* Disable L1-Active */
1762 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1763 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1764
1765 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1766 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1767 IWM_APMG_RTC_INT_STT_RFKILL);
1768 }
1769 out:
1770 if (err)
1771 aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1772 return err;
1773 }
1774
1775 static void
1776 iwm_apm_stop(struct iwm_softc *sc)
1777 {
1778 /* stop device's busmaster DMA activity */
1779 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1780
1781 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1782 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1783 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1784 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1785 DPRINTF(("iwm apm stop\n"));
1786 }
1787
1788 static int
1789 iwm_start_hw(struct iwm_softc *sc)
1790 {
1791 int err;
1792
1793 err = iwm_prepare_card_hw(sc);
1794 if (err)
1795 return err;
1796
1797 /* Reset the entire device */
1798 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1799 DELAY(10);
1800
1801 err = iwm_apm_init(sc);
1802 if (err)
1803 return err;
1804
1805 iwm_enable_rfkill_int(sc);
1806 iwm_check_rfkill(sc);
1807
1808 return 0;
1809 }
1810
1811 static void
1812 iwm_stop_device(struct iwm_softc *sc)
1813 {
1814 int chnl, ntries;
1815 int qid;
1816
1817 iwm_disable_interrupts(sc);
1818 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1819
1820 /* Deactivate TX scheduler. */
1821 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1822
1823 /* Stop all DMA channels. */
1824 if (iwm_nic_lock(sc)) {
1825 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1826 IWM_WRITE(sc,
1827 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1828 for (ntries = 0; ntries < 200; ntries++) {
1829 uint32_t r;
1830
1831 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1832 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1833 chnl))
1834 break;
1835 DELAY(20);
1836 }
1837 }
1838 iwm_nic_unlock(sc);
1839 }
1840 iwm_disable_rx_dma(sc);
1841
1842 iwm_reset_rx_ring(sc, &sc->rxq);
1843
1844 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1845 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1846
1847 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1848 /* Power-down device's busmaster DMA clocks */
1849 if (iwm_nic_lock(sc)) {
1850 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1851 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1852 DELAY(5);
1853 iwm_nic_unlock(sc);
1854 }
1855 }
1856
1857 /* Make sure (redundant) we've released our request to stay awake */
1858 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1859 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1860
1861 /* Stop the device, and put it in low power state */
1862 iwm_apm_stop(sc);
1863
1864 /*
1865 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1866 * Clean again the interrupt here
1867 */
1868 iwm_disable_interrupts(sc);
1869
1870 /* Reset the on-board processor. */
1871 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1872
1873 /* Even though we stop the HW we still want the RF kill interrupt. */
1874 iwm_enable_rfkill_int(sc);
1875 iwm_check_rfkill(sc);
1876 }
1877
1878 static void
1879 iwm_nic_config(struct iwm_softc *sc)
1880 {
1881 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1882 uint32_t reg_val = 0;
1883
1884 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1885 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1886 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1887 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1888 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1889 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1890
1891 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1892 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1893 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1894 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1895
1896 /* radio configuration */
1897 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1898 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1899 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1900
1901 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1902
1903 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1904 radio_cfg_step, radio_cfg_dash));
1905
1906 /*
1907 * W/A : NIC is stuck in a reset state after Early PCIe power off
1908 * (PCIe power is lost before PERST# is asserted), causing ME FW
1909 * to lose ownership and not being able to obtain it back.
1910 */
1911 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1912 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1913 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1914 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1915 }
1916 }
1917
1918 static int
1919 iwm_nic_rx_init(struct iwm_softc *sc)
1920 {
1921 if (!iwm_nic_lock(sc))
1922 return EBUSY;
1923
1924 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1925 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1926 0, sc->rxq.stat_dma.size,
1927 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1928
1929 iwm_disable_rx_dma(sc);
1930 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1931 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1932 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1933 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1934
1935 /* Set physical address of RX ring (256-byte aligned). */
1936 IWM_WRITE(sc,
1937 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1938
1939 /* Set physical address of RX status (16-byte aligned). */
1940 IWM_WRITE(sc,
1941 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1942
1943 /* Enable RX. */
1944 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1945 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1946 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1947 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1948 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1949 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1950 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1951 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1952
1953 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1954
1955 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1956 if (sc->host_interrupt_operation_mode)
1957 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1958
1959 /*
1960 * This value should initially be 0 (before preparing any RBs),
1961 * and should be 8 after preparing the first 8 RBs (for example).
1962 */
1963 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1964
1965 iwm_nic_unlock(sc);
1966
1967 return 0;
1968 }
1969
1970 static int
1971 iwm_nic_tx_init(struct iwm_softc *sc)
1972 {
1973 int qid;
1974
1975 if (!iwm_nic_lock(sc))
1976 return EBUSY;
1977
1978 /* Deactivate TX scheduler. */
1979 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1980
1981 /* Set physical address of "keep warm" page (16-byte aligned). */
1982 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1983
1984 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1985 struct iwm_tx_ring *txq = &sc->txq[qid];
1986
1987 /* Set physical address of TX ring (256-byte aligned). */
1988 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1989 txq->desc_dma.paddr >> 8);
1990 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1991 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1992 }
1993
1994 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1995
1996 iwm_nic_unlock(sc);
1997
1998 return 0;
1999 }
2000
2001 static int
2002 iwm_nic_init(struct iwm_softc *sc)
2003 {
2004 int err;
2005
2006 iwm_apm_init(sc);
2007 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2008 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2009 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2010 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2011 }
2012
2013 iwm_nic_config(sc);
2014
2015 err = iwm_nic_rx_init(sc);
2016 if (err)
2017 return err;
2018
2019 err = iwm_nic_tx_init(sc);
2020 if (err)
2021 return err;
2022
2023 DPRINTF(("shadow registers enabled\n"));
2024 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2025
2026 return 0;
2027 }
2028
2029 static const uint8_t iwm_ac_to_tx_fifo[] = {
2030 IWM_TX_FIFO_VO,
2031 IWM_TX_FIFO_VI,
2032 IWM_TX_FIFO_BE,
2033 IWM_TX_FIFO_BK,
2034 };
2035
2036 static int
2037 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
2038 {
2039 if (!iwm_nic_lock(sc)) {
2040 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
2041 return EBUSY;
2042 }
2043
2044 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2045
2046 if (qid == IWM_CMD_QUEUE) {
2047 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2048 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2049 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2050
2051 iwm_nic_unlock(sc);
2052
2053 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2054
2055 if (!iwm_nic_lock(sc))
2056 return EBUSY;
2057 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2058 iwm_nic_unlock(sc);
2059
2060 iwm_write_mem32(sc,
2061 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2062
2063 /* Set scheduler window size and frame limit. */
2064 iwm_write_mem32(sc,
2065 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2066 sizeof(uint32_t),
2067 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2068 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2069 ((IWM_FRAME_LIMIT
2070 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2071 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2072
2073 if (!iwm_nic_lock(sc))
2074 return EBUSY;
2075 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2076 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2077 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2078 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2079 IWM_SCD_QUEUE_STTS_REG_MSK);
2080 } else {
2081 struct iwm_scd_txq_cfg_cmd cmd;
2082 int err;
2083
2084 iwm_nic_unlock(sc);
2085
2086 memset(&cmd, 0, sizeof(cmd));
2087 cmd.scd_queue = qid;
2088 cmd.enable = 1;
2089 cmd.sta_id = sta_id;
2090 cmd.tx_fifo = fifo;
2091 cmd.aggregate = 0;
2092 cmd.window = IWM_FRAME_LIMIT;
2093
2094 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
2095 &cmd);
2096 if (err)
2097 return err;
2098
2099 if (!iwm_nic_lock(sc))
2100 return EBUSY;
2101 }
2102
2103 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2104 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2105
2106 iwm_nic_unlock(sc);
2107
2108 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2109
2110 return 0;
2111 }
2112
2113 static int
2114 iwm_post_alive(struct iwm_softc *sc)
2115 {
2116 int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2117 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2118 int err, chnl;
2119 uint32_t base;
2120
2121 if (!iwm_nic_lock(sc))
2122 return EBUSY;
2123
2124 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2125 if (sc->sched_base != base) {
2126 DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2127 DEVNAME(sc), sc->sched_base, base));
2128 sc->sched_base = base;
2129 }
2130
2131 iwm_nic_unlock(sc);
2132
2133 iwm_ict_reset(sc);
2134
2135 /* Clear TX scheduler state in SRAM. */
2136 err = iwm_write_mem(sc,
2137 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2138 if (err)
2139 return err;
2140
2141 if (!iwm_nic_lock(sc))
2142 return EBUSY;
2143
2144 /* Set physical address of TX scheduler rings (1KB aligned). */
2145 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2146
2147 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2148
2149 iwm_nic_unlock(sc);
2150
2151 /* enable command channel */
2152 err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2153 if (err)
2154 return err;
2155
2156 if (!iwm_nic_lock(sc))
2157 return EBUSY;
2158
2159 /* Activate TX scheduler. */
2160 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2161
2162 /* Enable DMA channels. */
2163 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2164 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2165 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2166 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2167 }
2168
2169 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2170 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2171
2172 /* Enable L1-Active */
2173 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2174 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2175 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2176 }
2177
2178 iwm_nic_unlock(sc);
2179
2180 return 0;
2181 }
2182
2183 static struct iwm_phy_db_entry *
2184 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2185 uint16_t chg_id)
2186 {
2187 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2188
2189 if (type >= IWM_PHY_DB_MAX)
2190 return NULL;
2191
2192 switch (type) {
2193 case IWM_PHY_DB_CFG:
2194 return &phy_db->cfg;
2195 case IWM_PHY_DB_CALIB_NCH:
2196 return &phy_db->calib_nch;
2197 case IWM_PHY_DB_CALIB_CHG_PAPD:
2198 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2199 return NULL;
2200 return &phy_db->calib_ch_group_papd[chg_id];
2201 case IWM_PHY_DB_CALIB_CHG_TXP:
2202 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2203 return NULL;
2204 return &phy_db->calib_ch_group_txp[chg_id];
2205 default:
2206 return NULL;
2207 }
2208 return NULL;
2209 }
2210
2211 static int
2212 iwm_phy_db_set_section(struct iwm_softc *sc,
2213 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2214 {
2215 struct iwm_phy_db_entry *entry;
2216 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2217 uint16_t chg_id = 0;
2218
2219 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2220 type == IWM_PHY_DB_CALIB_CHG_TXP)
2221 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2222
2223 entry = iwm_phy_db_get_section(sc, type, chg_id);
2224 if (!entry)
2225 return EINVAL;
2226
2227 if (entry->data)
2228 kmem_intr_free(entry->data, entry->size);
2229 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2230 if (!entry->data) {
2231 entry->size = 0;
2232 return ENOMEM;
2233 }
2234 memcpy(entry->data, phy_db_notif->data, size);
2235 entry->size = size;
2236
2237 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2238 __func__, __LINE__, type, size, entry->data));
2239
2240 return 0;
2241 }
2242
2243 static int
2244 iwm_is_valid_channel(uint16_t ch_id)
2245 {
2246 if (ch_id <= 14 ||
2247 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2248 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2249 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2250 return 1;
2251 return 0;
2252 }
2253
2254 static uint8_t
2255 iwm_ch_id_to_ch_index(uint16_t ch_id)
2256 {
2257 if (!iwm_is_valid_channel(ch_id))
2258 return 0xff;
2259
2260 if (ch_id <= 14)
2261 return ch_id - 1;
2262 if (ch_id <= 64)
2263 return (ch_id + 20) / 4;
2264 if (ch_id <= 140)
2265 return (ch_id - 12) / 4;
2266 return (ch_id - 13) / 4;
2267 }
2268
2269
2270 static uint16_t
2271 iwm_channel_id_to_papd(uint16_t ch_id)
2272 {
2273 if (!iwm_is_valid_channel(ch_id))
2274 return 0xff;
2275
2276 if (1 <= ch_id && ch_id <= 14)
2277 return 0;
2278 if (36 <= ch_id && ch_id <= 64)
2279 return 1;
2280 if (100 <= ch_id && ch_id <= 140)
2281 return 2;
2282 return 3;
2283 }
2284
2285 static uint16_t
2286 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2287 {
2288 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2289 struct iwm_phy_db_chg_txp *txp_chg;
2290 int i;
2291 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2292
2293 if (ch_index == 0xff)
2294 return 0xff;
2295
2296 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2297 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2298 if (!txp_chg)
2299 return 0xff;
2300 /*
2301 * Looking for the first channel group the max channel
2302 * of which is higher than the requested channel.
2303 */
2304 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2305 return i;
2306 }
2307 return 0xff;
2308 }
2309
2310 static int
2311 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2312 uint16_t *size, uint16_t ch_id)
2313 {
2314 struct iwm_phy_db_entry *entry;
2315 uint16_t ch_group_id = 0;
2316
2317 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2318 ch_group_id = iwm_channel_id_to_papd(ch_id);
2319 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2320 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2321
2322 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2323 if (!entry)
2324 return EINVAL;
2325
2326 *data = entry->data;
2327 *size = entry->size;
2328
2329 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2330 __func__, __LINE__, type, *size));
2331
2332 return 0;
2333 }
2334
2335 static int
2336 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2337 void *data)
2338 {
2339 struct iwm_phy_db_cmd phy_db_cmd;
2340 struct iwm_host_cmd cmd = {
2341 .id = IWM_PHY_DB_CMD,
2342 .flags = IWM_CMD_ASYNC,
2343 };
2344
2345 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2346 type, length));
2347
2348 phy_db_cmd.type = le16toh(type);
2349 phy_db_cmd.length = le16toh(length);
2350
2351 cmd.data[0] = &phy_db_cmd;
2352 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2353 cmd.data[1] = data;
2354 cmd.len[1] = length;
2355
2356 return iwm_send_cmd(sc, &cmd);
2357 }
2358
2359 static int
2360 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2361 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2362 {
2363 uint16_t i;
2364 int err;
2365 struct iwm_phy_db_entry *entry;
2366
2367 /* Send all the channel-specific groups to operational fw */
2368 for (i = 0; i < max_ch_groups; i++) {
2369 entry = iwm_phy_db_get_section(sc, type, i);
2370 if (!entry)
2371 return EINVAL;
2372
2373 if (!entry->size)
2374 continue;
2375
2376 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2377 if (err) {
2378 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2379 "err %d\n", DEVNAME(sc), type, i, err));
2380 return err;
2381 }
2382
2383 DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2384 DEVNAME(sc), type, i));
2385
2386 DELAY(1000);
2387 }
2388
2389 return 0;
2390 }
2391
2392 static int
2393 iwm_send_phy_db_data(struct iwm_softc *sc)
2394 {
2395 uint8_t *data = NULL;
2396 uint16_t size = 0;
2397 int err;
2398
2399 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2400 if (err)
2401 return err;
2402
2403 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2404 if (err)
2405 return err;
2406
2407 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2408 &data, &size, 0);
2409 if (err)
2410 return err;
2411
2412 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2413 if (err)
2414 return err;
2415
2416 err = iwm_phy_db_send_all_channel_groups(sc,
2417 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2418 if (err)
2419 return err;
2420
2421 err = iwm_phy_db_send_all_channel_groups(sc,
2422 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2423 if (err)
2424 return err;
2425
2426 return 0;
2427 }
2428
2429 /*
2430 * For the high priority TE use a time event type that has similar priority to
2431 * the FW's action scan priority.
2432 */
2433 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2434 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2435
2436 /* used to convert from time event API v2 to v1 */
2437 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2438 IWM_TE_V2_EVENT_SOCIOPATHIC)
2439 static inline uint16_t
2440 iwm_te_v2_get_notify(uint16_t policy)
2441 {
2442 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2443 }
2444
2445 static inline uint16_t
2446 iwm_te_v2_get_dep_policy(uint16_t policy)
2447 {
2448 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2449 IWM_TE_V2_PLACEMENT_POS;
2450 }
2451
2452 static inline uint16_t
2453 iwm_te_v2_get_absence(uint16_t policy)
2454 {
2455 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2456 }
2457
2458 static void
2459 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2460 struct iwm_time_event_cmd_v1 *cmd_v1)
2461 {
2462 cmd_v1->id_and_color = cmd_v2->id_and_color;
2463 cmd_v1->action = cmd_v2->action;
2464 cmd_v1->id = cmd_v2->id;
2465 cmd_v1->apply_time = cmd_v2->apply_time;
2466 cmd_v1->max_delay = cmd_v2->max_delay;
2467 cmd_v1->depends_on = cmd_v2->depends_on;
2468 cmd_v1->interval = cmd_v2->interval;
2469 cmd_v1->duration = cmd_v2->duration;
2470 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2471 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2472 else
2473 cmd_v1->repeat = htole32(cmd_v2->repeat);
2474 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2475 cmd_v1->interval_reciprocal = 0; /* unused */
2476
2477 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2478 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2479 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2480 }
2481
2482 static int
2483 iwm_send_time_event_cmd(struct iwm_softc *sc,
2484 const struct iwm_time_event_cmd_v2 *cmd)
2485 {
2486 struct iwm_time_event_cmd_v1 cmd_v1;
2487
2488 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2489 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2490 cmd);
2491
2492 iwm_te_v2_to_v1(cmd, &cmd_v1);
2493 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2494 &cmd_v1);
2495 }
2496
2497 static void
2498 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2499 uint32_t duration, uint32_t max_delay)
2500 {
2501 struct iwm_time_event_cmd_v2 time_cmd;
2502
2503 memset(&time_cmd, 0, sizeof(time_cmd));
2504
2505 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2506 time_cmd.id_and_color =
2507 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2508 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2509
2510 time_cmd.apply_time = htole32(0);
2511
2512 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2513 time_cmd.max_delay = htole32(max_delay);
2514 /* TODO: why do we need to interval = bi if it is not periodic? */
2515 time_cmd.interval = htole32(1);
2516 time_cmd.duration = htole32(duration);
2517 time_cmd.repeat = 1;
2518 time_cmd.policy
2519 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2520 IWM_TE_V2_NOTIF_HOST_EVENT_END |
2521 IWM_T2_V2_START_IMMEDIATELY);
2522
2523 iwm_send_time_event_cmd(sc, &time_cmd);
2524 }
2525
2526 /*
2527 * NVM read access and content parsing. We do not support
2528 * external NVM or writing NVM.
2529 */
2530
2531 /* list of NVM sections we are allowed/need to read */
2532 static const int iwm_nvm_to_read[] = {
2533 IWM_NVM_SECTION_TYPE_HW,
2534 IWM_NVM_SECTION_TYPE_SW,
2535 IWM_NVM_SECTION_TYPE_REGULATORY,
2536 IWM_NVM_SECTION_TYPE_CALIBRATION,
2537 IWM_NVM_SECTION_TYPE_PRODUCTION,
2538 IWM_NVM_SECTION_TYPE_HW_8000,
2539 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2540 IWM_NVM_SECTION_TYPE_PHY_SKU,
2541 };
2542
2543 /* Default NVM size to read */
2544 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2545 #define IWM_MAX_NVM_SECTION_SIZE_7000 (16 * 512 * sizeof(uint16_t)) /*16 KB*/
2546 #define IWM_MAX_NVM_SECTION_SIZE_8000 (32 * 512 * sizeof(uint16_t)) /*32 KB*/
2547
2548 #define IWM_NVM_WRITE_OPCODE 1
2549 #define IWM_NVM_READ_OPCODE 0
2550
2551 static int
2552 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2553 uint16_t length, uint8_t *data, uint16_t *len)
2554 {
2555 offset = 0;
2556 struct iwm_nvm_access_cmd nvm_access_cmd = {
2557 .offset = htole16(offset),
2558 .length = htole16(length),
2559 .type = htole16(section),
2560 .op_code = IWM_NVM_READ_OPCODE,
2561 };
2562 struct iwm_nvm_access_resp *nvm_resp;
2563 struct iwm_rx_packet *pkt;
2564 struct iwm_host_cmd cmd = {
2565 .id = IWM_NVM_ACCESS_CMD,
2566 .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2567 .data = { &nvm_access_cmd, },
2568 };
2569 int err, offset_read;
2570 size_t bytes_read;
2571 uint8_t *resp_data;
2572
2573 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2574
2575 err = iwm_send_cmd(sc, &cmd);
2576 if (err) {
2577 DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2578 DEVNAME(sc), err));
2579 return err;
2580 }
2581
2582 pkt = cmd.resp_pkt;
2583 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2584 err = EIO;
2585 goto exit;
2586 }
2587
2588 /* Extract NVM response */
2589 nvm_resp = (void *)pkt->data;
2590
2591 err = le16toh(nvm_resp->status);
2592 bytes_read = le16toh(nvm_resp->length);
2593 offset_read = le16toh(nvm_resp->offset);
2594 resp_data = nvm_resp->data;
2595 if (err) {
2596 err = EINVAL;
2597 goto exit;
2598 }
2599
2600 if (offset_read != offset) {
2601 err = EINVAL;
2602 goto exit;
2603 }
2604 if (bytes_read > length) {
2605 err = EINVAL;
2606 goto exit;
2607 }
2608
2609 memcpy(data + offset, resp_data, bytes_read);
2610 *len = bytes_read;
2611
2612 exit:
2613 iwm_free_resp(sc, &cmd);
2614 return err;
2615 }
2616
2617 /*
2618 * Reads an NVM section completely.
2619 * NICs prior to 7000 family doesn't have a real NVM, but just read
2620 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2621 * by uCode, we need to manually check in this case that we don't
2622 * overflow and try to read more than the EEPROM size.
2623 */
2624 static int
2625 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2626 uint16_t *len, size_t max_len)
2627 {
2628 uint16_t chunklen, seglen;
2629 int err;
2630
2631 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2632 *len = 0;
2633
2634 /* Read NVM chunks until exhausted (reading less than requested) */
2635 while (seglen == chunklen && *len < max_len) {
2636 err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2637 &seglen);
2638 if (err) {
2639 DPRINTF(("%s: Cannot read NVM from section %d "
2640 "offset %d, length %d\n",
2641 DEVNAME(sc), section, *len, chunklen));
2642 return err;
2643 }
2644 *len += seglen;
2645 }
2646
2647 DPRINTFN(4, ("NVM section %d read completed\n", section));
2648 return 0;
2649 }
2650
2651 static uint8_t
2652 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2653 {
2654 uint8_t tx_ant;
2655
2656 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2657 >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2658
2659 if (sc->sc_nvm.valid_tx_ant)
2660 tx_ant &= sc->sc_nvm.valid_tx_ant;
2661
2662 return tx_ant;
2663 }
2664
2665 static uint8_t
2666 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2667 {
2668 uint8_t rx_ant;
2669
2670 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2671 >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2672
2673 if (sc->sc_nvm.valid_rx_ant)
2674 rx_ant &= sc->sc_nvm.valid_rx_ant;
2675
2676 return rx_ant;
2677 }
2678
2679 static void
2680 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2681 const uint8_t *nvm_channels, size_t nchan)
2682 {
2683 struct ieee80211com *ic = &sc->sc_ic;
2684 struct iwm_nvm_data *data = &sc->sc_nvm;
2685 int ch_idx;
2686 struct ieee80211_channel *channel;
2687 uint16_t ch_flags;
2688 int is_5ghz;
2689 int flags, hw_value;
2690
2691 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2692 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2693 aprint_debug_dev(sc->sc_dev,
2694 "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2695 " %cwide %c40MHz %c80MHz %c160MHz\n",
2696 nvm_channels[ch_idx],
2697 ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2698 ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2699 ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2700 ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2701 ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2702 ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2703 ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2704 ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2705 ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2706
2707 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2708 !data->sku_cap_band_52GHz_enable)
2709 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2710
2711 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2712 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2713 nvm_channels[ch_idx], ch_flags,
2714 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2715 continue;
2716 }
2717
2718 hw_value = nvm_channels[ch_idx];
2719 channel = &ic->ic_channels[hw_value];
2720
2721 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2722 if (!is_5ghz) {
2723 flags = IEEE80211_CHAN_2GHZ;
2724 channel->ic_flags
2725 = IEEE80211_CHAN_CCK
2726 | IEEE80211_CHAN_OFDM
2727 | IEEE80211_CHAN_DYN
2728 | IEEE80211_CHAN_2GHZ;
2729 } else {
2730 flags = IEEE80211_CHAN_5GHZ;
2731 channel->ic_flags =
2732 IEEE80211_CHAN_A;
2733 }
2734 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2735
2736 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2737 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2738
2739 #ifndef IEEE80211_NO_HT
2740 if (data->sku_cap_11n_enable)
2741 channel->ic_flags |= IEEE80211_CHAN_HT;
2742 #endif
2743 }
2744 }
2745
2746 #ifndef IEEE80211_NO_HT
2747 static void
2748 iwm_setup_ht_rates(struct iwm_softc *sc)
2749 {
2750 struct ieee80211com *ic = &sc->sc_ic;
2751
2752 /* TX is supported with the same MCS as RX. */
2753 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2754
2755 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2756
2757 #ifdef notyet
2758 if (sc->sc_nvm.sku_cap_mimo_disable)
2759 return;
2760
2761 if (iwm_fw_valid_rx_ant(sc) > 1)
2762 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2763 if (iwm_fw_valid_rx_ant(sc) > 2)
2764 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2765 #endif
2766 }
2767
2768 #define IWM_MAX_RX_BA_SESSIONS 16
2769
2770 static void
2771 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2772 uint16_t ssn, int start)
2773 {
2774 struct ieee80211com *ic = &sc->sc_ic;
2775 struct iwm_add_sta_cmd_v7 cmd;
2776 struct iwm_node *in = (struct iwm_node *)ni;
2777 int err, s;
2778 uint32_t status;
2779
2780 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2781 ieee80211_addba_req_refuse(ic, ni, tid);
2782 return;
2783 }
2784
2785 memset(&cmd, 0, sizeof(cmd));
2786
2787 cmd.sta_id = IWM_STATION_ID;
2788 cmd.mac_id_n_color
2789 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2790 cmd.add_modify = IWM_STA_MODE_MODIFY;
2791
2792 if (start) {
2793 cmd.add_immediate_ba_tid = (uint8_t)tid;
2794 cmd.add_immediate_ba_ssn = ssn;
2795 } else {
2796 cmd.remove_immediate_ba_tid = (uint8_t)tid;
2797 }
2798 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2799 IWM_STA_MODIFY_REMOVE_BA_TID;
2800
2801 status = IWM_ADD_STA_SUCCESS;
2802 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2803 &status);
2804
2805 s = splnet();
2806 if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2807 if (start) {
2808 sc->sc_rx_ba_sessions++;
2809 ieee80211_addba_req_accept(ic, ni, tid);
2810 } else if (sc->sc_rx_ba_sessions > 0)
2811 sc->sc_rx_ba_sessions--;
2812 } else if (start)
2813 ieee80211_addba_req_refuse(ic, ni, tid);
2814 splx(s);
2815 }
2816
2817 static void
2818 iwm_htprot_task(void *arg)
2819 {
2820 struct iwm_softc *sc = arg;
2821 struct ieee80211com *ic = &sc->sc_ic;
2822 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2823 int err;
2824
2825 /* This call updates HT protection based on in->in_ni.ni_htop1. */
2826 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2827 if (err)
2828 aprint_error_dev(sc->sc_dev,
2829 "could not change HT protection: error %d\n", err);
2830 }
2831
2832 /*
2833 * This function is called by upper layer when HT protection settings in
2834 * beacons have changed.
2835 */
2836 static void
2837 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2838 {
2839 struct iwm_softc *sc = ic->ic_softc;
2840
2841 /* assumes that ni == ic->ic_bss */
2842 task_add(systq, &sc->htprot_task);
2843 }
2844
2845 static void
2846 iwm_ba_task(void *arg)
2847 {
2848 struct iwm_softc *sc = arg;
2849 struct ieee80211com *ic = &sc->sc_ic;
2850 struct ieee80211_node *ni = ic->ic_bss;
2851
2852 if (sc->ba_start)
2853 iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2854 else
2855 iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2856 }
2857
2858 /*
2859 * This function is called by upper layer when an ADDBA request is received
2860 * from another STA and before the ADDBA response is sent.
2861 */
2862 static int
2863 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2864 uint8_t tid)
2865 {
2866 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2867 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2868
2869 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2870 return ENOSPC;
2871
2872 sc->ba_start = 1;
2873 sc->ba_tid = tid;
2874 sc->ba_ssn = htole16(ba->ba_winstart);
2875 task_add(systq, &sc->ba_task);
2876
2877 return EBUSY;
2878 }
2879
2880 /*
2881 * This function is called by upper layer on teardown of an HT-immediate
2882 * Block Ack agreement (eg. upon receipt of a DELBA frame).
2883 */
2884 static void
2885 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2886 uint8_t tid)
2887 {
2888 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2889
2890 sc->ba_start = 0;
2891 sc->ba_tid = tid;
2892 task_add(systq, &sc->ba_task);
2893 }
2894 #endif
2895
2896 static void
2897 iwm_free_fw_paging(struct iwm_softc *sc)
2898 {
2899 int i;
2900
2901 if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
2902 return;
2903
2904 for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
2905 iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
2906 }
2907
2908 memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
2909 }
2910
2911 static int
2912 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2913 {
2914 int sec_idx, idx;
2915 uint32_t offset = 0;
2916
2917 /*
2918 * find where is the paging image start point:
2919 * if CPU2 exist and it's in paging format, then the image looks like:
2920 * CPU1 sections (2 or more)
2921 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
2922 * CPU2 sections (not paged)
2923 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
2924 * non paged to CPU2 paging sec
2925 * CPU2 paging CSS
2926 * CPU2 paging image (including instruction and data)
2927 */
2928 for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
2929 if (fws->fw_sect[sec_idx].fws_devoff ==
2930 IWM_PAGING_SEPARATOR_SECTION) {
2931 sec_idx++;
2932 break;
2933 }
2934 }
2935
2936 /*
2937 * If paging is enabled there should be at least 2 more sections left
2938 * (one for CSS and one for Paging data)
2939 */
2940 if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
2941 aprint_verbose_dev(sc->sc_dev,
2942 "Paging: Missing CSS and/or paging sections\n");
2943 iwm_free_fw_paging(sc);
2944 return EINVAL;
2945 }
2946
2947 /* copy the CSS block to the dram */
2948 DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
2949 sec_idx));
2950
2951 memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
2952 fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
2953
2954 DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
2955 DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
2956
2957 sec_idx++;
2958
2959 /*
2960 * copy the paging blocks to the dram
2961 * loop index start from 1 since that CSS block already copied to dram
2962 * and CSS index is 0.
2963 * loop stop at num_of_paging_blk since that last block is not full.
2964 */
2965 for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
2966 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2967 (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2968 sc->fw_paging_db[idx].fw_paging_size);
2969
2970 DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
2971 DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
2972
2973 offset += sc->fw_paging_db[idx].fw_paging_size;
2974 }
2975
2976 /* copy the last paging block */
2977 if (sc->num_of_pages_in_last_blk > 0) {
2978 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2979 (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2980 IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
2981
2982 DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
2983 DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
2984 }
2985
2986 return 0;
2987 }
2988
2989 static int
2990 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2991 {
2992 int blk_idx = 0;
2993 int error, num_of_pages;
2994 bus_dmamap_t dmap;
2995
2996 if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
2997 int i;
2998 /* Device got reset, and we setup firmware paging again */
2999 for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
3000 dmap = sc->fw_paging_db[i].fw_paging_block.map;
3001 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3002 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3003 }
3004 return 0;
3005 }
3006
3007 /* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
3008 CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
3009
3010 num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
3011 sc->num_of_paging_blk =
3012 howmany(num_of_pages, IWM_NUM_OF_PAGE_PER_GROUP);
3013 sc->num_of_pages_in_last_blk = num_of_pages -
3014 IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
3015
3016 DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
3017 "each block holds 8 pages, last block holds %d pages\n",
3018 DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
3019
3020 /* allocate block of 4Kbytes for paging CSS */
3021 error = iwm_dma_contig_alloc(sc->sc_dmat,
3022 &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
3023 4096);
3024 if (error) {
3025 /* free all the previous pages since we failed */
3026 iwm_free_fw_paging(sc);
3027 return ENOMEM;
3028 }
3029
3030 sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
3031
3032 DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
3033 DEVNAME(sc)));
3034
3035 /*
3036 * allocate blocks in dram.
3037 * since that CSS allocated in fw_paging_db[0] loop start from index 1
3038 */
3039 for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3040 /* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
3041 /* XXX Use iwm_dma_contig_alloc for allocating */
3042 error = iwm_dma_contig_alloc(sc->sc_dmat,
3043 &sc->fw_paging_db[blk_idx].fw_paging_block,
3044 IWM_PAGING_BLOCK_SIZE, 4096);
3045 if (error) {
3046 /* free all the previous pages since we failed */
3047 iwm_free_fw_paging(sc);
3048 return ENOMEM;
3049 }
3050
3051 sc->fw_paging_db[blk_idx].fw_paging_size =
3052 IWM_PAGING_BLOCK_SIZE;
3053
3054 DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
3055 "paging.\n", DEVNAME(sc)));
3056 }
3057
3058 return 0;
3059 }
3060
3061 static int
3062 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3063 {
3064 int err;
3065
3066 err = iwm_alloc_fw_paging_mem(sc, fws);
3067 if (err)
3068 return err;
3069
3070 return iwm_fill_paging_mem(sc, fws);
3071 }
3072
3073 static bool
3074 iwm_has_new_tx_api(struct iwm_softc *sc)
3075 {
3076 /* XXX */
3077 return false;
3078 }
3079
3080 /* send paging cmd to FW in case CPU2 has paging image */
3081 static int
3082 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3083 {
3084 struct iwm_fw_paging_cmd fw_paging_cmd = {
3085 .flags = htole32(IWM_PAGING_CMD_IS_SECURED |
3086 IWM_PAGING_CMD_IS_ENABLED |
3087 (sc->num_of_pages_in_last_blk <<
3088 IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
3089 .block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
3090 .block_num = htole32(sc->num_of_paging_blk),
3091 };
3092 size_t size = sizeof(fw_paging_cmd);
3093 int blk_idx;
3094 bus_dmamap_t dmap;
3095
3096 if (!iwm_has_new_tx_api(sc))
3097 size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
3098 IWM_NUM_OF_FW_PAGING_BLOCKS;
3099
3100 /* loop for for all paging blocks + CSS block */
3101 for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3102 bus_addr_t dev_phy_addr =
3103 sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
3104 if (iwm_has_new_tx_api(sc)) {
3105 fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
3106 htole64(dev_phy_addr);
3107 } else {
3108 dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
3109 fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
3110 htole32(dev_phy_addr);
3111 }
3112 dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map,
3113 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3114 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3115 }
3116
3117 return iwm_send_cmd_pdu(sc,
3118 iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_ALWAYS_LONG_GROUP, 0),
3119 0, size, &fw_paging_cmd);
3120 }
3121
3122 static void
3123 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3124 const uint16_t *mac_override, const uint16_t *nvm_hw)
3125 {
3126 static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
3127 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3128 };
3129 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
3130 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
3131 };
3132 const uint8_t *hw_addr;
3133
3134 if (mac_override) {
3135 hw_addr = (const uint8_t *)(mac_override +
3136 IWM_MAC_ADDRESS_OVERRIDE_8000);
3137
3138 /*
3139 * Store the MAC address from MAO section.
3140 * No byte swapping is required in MAO section
3141 */
3142 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3143
3144 /*
3145 * Force the use of the OTP MAC address in case of reserved MAC
3146 * address in the NVM, or if address is given but invalid.
3147 */
3148 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3149 (memcmp(etherbroadcastaddr, data->hw_addr,
3150 sizeof(etherbroadcastaddr)) != 0) &&
3151 (memcmp(etheranyaddr, data->hw_addr,
3152 sizeof(etheranyaddr)) != 0) &&
3153 !ETHER_IS_MULTICAST(data->hw_addr))
3154 return;
3155 }
3156
3157 if (nvm_hw) {
3158 /* Read the mac address from WFMP registers. */
3159 uint32_t mac_addr0 =
3160 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3161 uint32_t mac_addr1 =
3162 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3163
3164 hw_addr = (const uint8_t *)&mac_addr0;
3165 data->hw_addr[0] = hw_addr[3];
3166 data->hw_addr[1] = hw_addr[2];
3167 data->hw_addr[2] = hw_addr[1];
3168 data->hw_addr[3] = hw_addr[0];
3169
3170 hw_addr = (const uint8_t *)&mac_addr1;
3171 data->hw_addr[4] = hw_addr[1];
3172 data->hw_addr[5] = hw_addr[0];
3173
3174 return;
3175 }
3176
3177 aprint_error_dev(sc->sc_dev, "mac address not found\n");
3178 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3179 }
3180
3181 static int
3182 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3183 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3184 const uint16_t *mac_override, const uint16_t *phy_sku,
3185 const uint16_t *regulatory)
3186 {
3187 struct iwm_nvm_data *data = &sc->sc_nvm;
3188 uint8_t hw_addr[ETHER_ADDR_LEN];
3189 uint32_t sku;
3190
3191 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3192 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3193 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3194 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3195 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3196 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3197
3198 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3199 sku = le16_to_cpup(nvm_sw + IWM_SKU);
3200 } else {
3201 uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
3202 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3203 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3204 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3205 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3206 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3207 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3208
3209 data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
3210 sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
3211 }
3212
3213 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3214 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3215 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3216 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3217
3218 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3219
3220 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3221 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3222 data->hw_addr[0] = hw_addr[1];
3223 data->hw_addr[1] = hw_addr[0];
3224 data->hw_addr[2] = hw_addr[3];
3225 data->hw_addr[3] = hw_addr[2];
3226 data->hw_addr[4] = hw_addr[5];
3227 data->hw_addr[5] = hw_addr[4];
3228 } else
3229 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3230
3231 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3232 uint16_t lar_offset, lar_config;
3233 lar_offset = data->nvm_version < 0xE39 ?
3234 IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
3235 lar_config = le16_to_cpup(regulatory + lar_offset);
3236 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
3237 }
3238
3239 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
3240 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3241 iwm_nvm_channels, __arraycount(iwm_nvm_channels));
3242 else
3243 iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
3244 iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
3245
3246 data->calib_version = 255; /* TODO:
3247 this value will prevent some checks from
3248 failing, we need to check if this
3249 field is still needed, and if it does,
3250 where is it in the NVM */
3251
3252 return 0;
3253 }
3254
3255 static int
3256 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3257 {
3258 const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3259 const uint16_t *regulatory = NULL;
3260
3261 /* Checking for required sections */
3262 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3263 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3264 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3265 return ENOENT;
3266 }
3267
3268 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3269 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3270 /* SW and REGULATORY sections are mandatory */
3271 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3272 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3273 return ENOENT;
3274 }
3275 /* MAC_OVERRIDE or at least HW section must exist */
3276 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3277 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3278 return ENOENT;
3279 }
3280
3281 /* PHY_SKU section is mandatory in B0 */
3282 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3283 return ENOENT;
3284 }
3285
3286 regulatory = (const uint16_t *)
3287 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3288 hw = (const uint16_t *)
3289 sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3290 mac_override =
3291 (const uint16_t *)
3292 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3293 phy_sku = (const uint16_t *)
3294 sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3295 } else {
3296 panic("unknown device family %d\n", sc->sc_device_family);
3297 }
3298
3299 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3300 calib = (const uint16_t *)
3301 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3302
3303 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3304 phy_sku, regulatory);
3305 }
3306
3307 static int
3308 iwm_nvm_init(struct iwm_softc *sc)
3309 {
3310 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3311 int i, section, err;
3312 uint16_t len;
3313 uint8_t *buf;
3314 const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
3315 IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
3316
3317 /* Read From FW NVM */
3318 DPRINTF(("Read NVM\n"));
3319
3320 memset(nvm_sections, 0, sizeof(nvm_sections));
3321
3322 buf = kmem_alloc(bufsz, KM_SLEEP);
3323 if (buf == NULL)
3324 return ENOMEM;
3325
3326 for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
3327 section = iwm_nvm_to_read[i];
3328 KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3329
3330 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3331 if (err) {
3332 err = 0;
3333 continue;
3334 }
3335 nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3336 if (nvm_sections[section].data == NULL) {
3337 err = ENOMEM;
3338 break;
3339 }
3340 memcpy(nvm_sections[section].data, buf, len);
3341 nvm_sections[section].length = len;
3342 }
3343 kmem_free(buf, bufsz);
3344 if (err == 0)
3345 err = iwm_parse_nvm_sections(sc, nvm_sections);
3346
3347 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3348 if (nvm_sections[i].data != NULL)
3349 kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3350 }
3351
3352 return err;
3353 }
3354
3355 static int
3356 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3357 const uint8_t *section, uint32_t byte_cnt)
3358 {
3359 int err = EINVAL;
3360 uint32_t chunk_sz, offset;
3361
3362 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3363
3364 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3365 uint32_t addr, len;
3366 const uint8_t *data;
3367 bool is_extended = false;
3368
3369 addr = dst_addr + offset;
3370 len = MIN(chunk_sz, byte_cnt - offset);
3371 data = section + offset;
3372
3373 if (addr >= IWM_FW_MEM_EXTENDED_START &&
3374 addr <= IWM_FW_MEM_EXTENDED_END)
3375 is_extended = true;
3376
3377 if (is_extended)
3378 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3379 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3380
3381 err = iwm_firmware_load_chunk(sc, addr, data, len);
3382
3383 if (is_extended)
3384 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3385 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3386
3387 if (err)
3388 break;
3389 }
3390
3391 return err;
3392 }
3393
3394 static int
3395 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3396 const uint8_t *section, uint32_t byte_cnt)
3397 {
3398 struct iwm_dma_info *dma = &sc->fw_dma;
3399 int err;
3400
3401 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
3402 memcpy(dma->vaddr, section, byte_cnt);
3403 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3404 BUS_DMASYNC_PREWRITE);
3405
3406 sc->sc_fw_chunk_done = 0;
3407
3408 if (!iwm_nic_lock(sc))
3409 return EBUSY;
3410
3411 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3412 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3413 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3414 dst_addr);
3415 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3416 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3417 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3418 (iwm_get_dma_hi_addr(dma->paddr)
3419 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3420 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3421 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3422 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3423 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3424 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3425 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3426 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3427 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3428
3429 iwm_nic_unlock(sc);
3430
3431 /* Wait for this segment to load. */
3432 err = 0;
3433 while (!sc->sc_fw_chunk_done) {
3434 err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3435 if (err)
3436 break;
3437 }
3438 if (!sc->sc_fw_chunk_done) {
3439 DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3440 DEVNAME(sc), dst_addr, byte_cnt));
3441 }
3442
3443 return err;
3444 }
3445
3446 static int
3447 iwm_load_cpu_sections_7000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3448 int cpu, int *first_ucode_section)
3449 {
3450 int i, err = 0;
3451 uint32_t last_read_idx = 0;
3452 void *data;
3453 uint32_t dlen;
3454 uint32_t offset;
3455
3456 if (cpu == 1) {
3457 *first_ucode_section = 0;
3458 } else {
3459 (*first_ucode_section)++;
3460 }
3461
3462 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3463 last_read_idx = i;
3464 data = fws->fw_sect[i].fws_data;
3465 dlen = fws->fw_sect[i].fws_len;
3466 offset = fws->fw_sect[i].fws_devoff;
3467
3468 /*
3469 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3470 * CPU1 to CPU2.
3471 * PAGING_SEPARATOR_SECTION delimiter - separate between
3472 * CPU2 non paged to CPU2 paging sec.
3473 */
3474 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3475 offset == IWM_PAGING_SEPARATOR_SECTION)
3476 break;
3477
3478 if (dlen > sc->sc_fwdmasegsz) {
3479 err = EFBIG;
3480 } else
3481 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3482 if (err) {
3483 DPRINTF(("%s: could not load firmware chunk %d "
3484 "(error %d)\n", DEVNAME(sc), i, err));
3485 return err;
3486 }
3487 }
3488
3489 *first_ucode_section = last_read_idx;
3490
3491 return 0;
3492 }
3493
3494 static int
3495 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3496 {
3497 struct iwm_fw_sects *fws;
3498 int err = 0;
3499 int first_ucode_section;
3500
3501 fws = &sc->sc_fw.fw_sects[ucode_type];
3502
3503 DPRINTF(("%s: working with %s CPU\n", DEVNAME(sc),
3504 fws->is_dual_cpus ? "dual" : "single"));
3505
3506 /* load to FW the binary Secured sections of CPU1 */
3507 err = iwm_load_cpu_sections_7000(sc, fws, 1, &first_ucode_section);
3508 if (err)
3509 return err;
3510
3511 if (fws->is_dual_cpus) {
3512 /* set CPU2 header address */
3513 if (iwm_nic_lock(sc)) {
3514 iwm_write_prph(sc,
3515 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
3516 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
3517 iwm_nic_unlock(sc);
3518 }
3519
3520 /* load to FW the binary sections of CPU2 */
3521 err = iwm_load_cpu_sections_7000(sc, fws, 2,
3522 &first_ucode_section);
3523 if (err)
3524 return err;
3525 }
3526
3527 /* release CPU reset */
3528 IWM_WRITE(sc, IWM_CSR_RESET, 0);
3529
3530 return 0;
3531 }
3532
3533 static int
3534 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3535 int cpu, int *first_ucode_section)
3536 {
3537 int shift_param;
3538 int i, err = 0, sec_num = 0x1;
3539 uint32_t val, last_read_idx = 0;
3540 void *data;
3541 uint32_t dlen;
3542 uint32_t offset;
3543
3544 if (cpu == 1) {
3545 shift_param = 0;
3546 *first_ucode_section = 0;
3547 } else {
3548 shift_param = 16;
3549 (*first_ucode_section)++;
3550 }
3551
3552 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3553 last_read_idx = i;
3554 data = fws->fw_sect[i].fws_data;
3555 dlen = fws->fw_sect[i].fws_len;
3556 offset = fws->fw_sect[i].fws_devoff;
3557
3558 /*
3559 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3560 * CPU1 to CPU2.
3561 * PAGING_SEPARATOR_SECTION delimiter - separate between
3562 * CPU2 non paged to CPU2 paging sec.
3563 */
3564 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3565 offset == IWM_PAGING_SEPARATOR_SECTION)
3566 break;
3567
3568 if (dlen > sc->sc_fwdmasegsz) {
3569 err = EFBIG;
3570 } else
3571 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3572 if (err) {
3573 DPRINTF(("%s: could not load firmware chunk %d "
3574 "(error %d)\n", DEVNAME(sc), i, err));
3575 return err;
3576 }
3577
3578 /* Notify the ucode of the loaded section number and status */
3579 if (iwm_nic_lock(sc)) {
3580 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3581 val = val | (sec_num << shift_param);
3582 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3583 sec_num = (sec_num << 1) | 0x1;
3584 iwm_nic_unlock(sc);
3585
3586 /*
3587 * The firmware won't load correctly without this delay.
3588 */
3589 DELAY(8000);
3590 }
3591 }
3592
3593 *first_ucode_section = last_read_idx;
3594
3595 if (iwm_nic_lock(sc)) {
3596 if (cpu == 1)
3597 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3598 else
3599 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3600 iwm_nic_unlock(sc);
3601 }
3602
3603 return 0;
3604 }
3605
3606 static int
3607 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3608 {
3609 struct iwm_fw_sects *fws;
3610 int err = 0;
3611 int first_ucode_section;
3612
3613 fws = &sc->sc_fw.fw_sects[ucode_type];
3614
3615 /* configure the ucode to be ready to get the secured image */
3616 /* release CPU reset */
3617 if (iwm_nic_lock(sc)) {
3618 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3619 IWM_RELEASE_CPU_RESET_BIT);
3620 iwm_nic_unlock(sc);
3621 }
3622
3623 /* load to FW the binary Secured sections of CPU1 */
3624 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3625 if (err)
3626 return err;
3627
3628 /* load to FW the binary sections of CPU2 */
3629 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3630 }
3631
3632 static int
3633 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3634 {
3635 int err, w;
3636
3637 sc->sc_uc.uc_intr = 0;
3638
3639 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3640 err = iwm_load_firmware_8000(sc, ucode_type);
3641 else
3642 err = iwm_load_firmware_7000(sc, ucode_type);
3643 if (err)
3644 return err;
3645
3646 /* wait for the firmware to load */
3647 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3648 err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3649 if (err || !sc->sc_uc.uc_ok) {
3650 aprint_error_dev(sc->sc_dev,
3651 "could not load firmware (error %d, ok %d)\n",
3652 err, sc->sc_uc.uc_ok);
3653 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3654 aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3655 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3656 aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3657 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3658 }
3659 }
3660
3661 return err;
3662 }
3663
3664 static int
3665 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3666 {
3667 int err;
3668
3669 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3670
3671 err = iwm_nic_init(sc);
3672 if (err) {
3673 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3674 return err;
3675 }
3676
3677 /* make sure rfkill handshake bits are cleared */
3678 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3679 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3680 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3681
3682 /* clear (again), then enable host interrupts */
3683 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3684 iwm_enable_interrupts(sc);
3685
3686 /* really make sure rfkill handshake bits are cleared */
3687 /* maybe we should write a few times more? just to make sure */
3688 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3689 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3690
3691 return iwm_load_firmware(sc, ucode_type);
3692 }
3693
3694 static int
3695 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3696 {
3697 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3698 .valid = htole32(valid_tx_ant),
3699 };
3700
3701 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3702 sizeof(tx_ant_cmd), &tx_ant_cmd);
3703 }
3704
3705 static int
3706 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3707 {
3708 struct iwm_phy_cfg_cmd phy_cfg_cmd;
3709 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3710
3711 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3712 phy_cfg_cmd.calib_control.event_trigger =
3713 sc->sc_default_calib[ucode_type].event_trigger;
3714 phy_cfg_cmd.calib_control.flow_trigger =
3715 sc->sc_default_calib[ucode_type].flow_trigger;
3716
3717 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3718 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3719 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3720 }
3721
3722 static int
3723 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3724 {
3725 struct iwm_fw_sects *fws;
3726 enum iwm_ucode_type old_type = sc->sc_uc_current;
3727 int err;
3728
3729 err = iwm_read_firmware(sc, ucode_type);
3730 if (err)
3731 return err;
3732
3733 sc->sc_uc_current = ucode_type;
3734 err = iwm_start_fw(sc, ucode_type);
3735 if (err) {
3736 sc->sc_uc_current = old_type;
3737 return err;
3738 }
3739
3740 err = iwm_post_alive(sc);
3741 if (err)
3742 return err;
3743
3744 fws = &sc->sc_fw.fw_sects[ucode_type];
3745 if (fws->paging_mem_size) {
3746 err = iwm_save_fw_paging(sc, fws);
3747 if (err)
3748 return err;
3749
3750 err = iwm_send_paging_cmd(sc, fws);
3751 if (err) {
3752 iwm_free_fw_paging(sc);
3753 return err;
3754 }
3755 }
3756
3757 return 0;
3758 }
3759
3760 static int
3761 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3762 {
3763 int err;
3764
3765 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3766 aprint_error_dev(sc->sc_dev,
3767 "radio is disabled by hardware switch\n");
3768 return EPERM;
3769 }
3770
3771 sc->sc_init_complete = 0;
3772 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3773 if (err) {
3774 DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3775 return err;
3776 }
3777
3778 if (justnvm) {
3779 err = iwm_nvm_init(sc);
3780 if (err) {
3781 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3782 return err;
3783 }
3784
3785 memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3786 ETHER_ADDR_LEN);
3787 return 0;
3788 }
3789
3790 err = iwm_send_bt_init_conf(sc);
3791 if (err)
3792 return err;
3793
3794 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3795 if (err)
3796 return err;
3797
3798 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3799 if (err)
3800 return err;
3801
3802 /*
3803 * Send phy configurations command to init uCode
3804 * to start the 16.0 uCode init image internal calibrations.
3805 */
3806 err = iwm_send_phy_cfg_cmd(sc);
3807 if (err)
3808 return err;
3809
3810 /*
3811 * Nothing to do but wait for the init complete notification
3812 * from the firmware
3813 */
3814 while (!sc->sc_init_complete) {
3815 err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3816 if (err)
3817 break;
3818 }
3819
3820 return err;
3821 }
3822
3823 static int
3824 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3825 {
3826 struct iwm_rx_ring *ring = &sc->rxq;
3827 struct iwm_rx_data *data = &ring->data[idx];
3828 struct mbuf *m;
3829 int err;
3830 int fatal = 0;
3831
3832 m = m_gethdr(M_DONTWAIT, MT_DATA);
3833 if (m == NULL)
3834 return ENOBUFS;
3835
3836 if (size <= MCLBYTES) {
3837 MCLGET(m, M_DONTWAIT);
3838 } else {
3839 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3840 }
3841 if ((m->m_flags & M_EXT) == 0) {
3842 m_freem(m);
3843 return ENOBUFS;
3844 }
3845
3846 if (data->m != NULL) {
3847 bus_dmamap_unload(sc->sc_dmat, data->map);
3848 fatal = 1;
3849 }
3850
3851 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3852 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3853 BUS_DMA_READ|BUS_DMA_NOWAIT);
3854 if (err) {
3855 /* XXX */
3856 if (fatal)
3857 panic("iwm: could not load RX mbuf");
3858 m_freem(m);
3859 return err;
3860 }
3861 data->m = m;
3862 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3863
3864 /* Update RX descriptor. */
3865 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3866 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3867 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3868
3869 return 0;
3870 }
3871
3872 #define IWM_RSSI_OFFSET 50
3873 static int
3874 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3875 {
3876 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3877 uint32_t agc_a, agc_b;
3878 uint32_t val;
3879
3880 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3881 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3882 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3883
3884 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3885 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3886 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3887
3888 /*
3889 * dBm = rssi dB - agc dB - constant.
3890 * Higher AGC (higher radio gain) means lower signal.
3891 */
3892 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3893 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3894 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3895
3896 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3897 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3898
3899 return max_rssi_dbm;
3900 }
3901
3902 /*
3903 * RSSI values are reported by the FW as positive values - need to negate
3904 * to obtain their dBM. Account for missing antennas by replacing 0
3905 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3906 */
3907 static int
3908 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3909 {
3910 int energy_a, energy_b, energy_c, max_energy;
3911 uint32_t val;
3912
3913 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3914 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3915 IWM_RX_INFO_ENERGY_ANT_A_POS;
3916 energy_a = energy_a ? -energy_a : -256;
3917 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3918 IWM_RX_INFO_ENERGY_ANT_B_POS;
3919 energy_b = energy_b ? -energy_b : -256;
3920 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3921 IWM_RX_INFO_ENERGY_ANT_C_POS;
3922 energy_c = energy_c ? -energy_c : -256;
3923 max_energy = MAX(energy_a, energy_b);
3924 max_energy = MAX(max_energy, energy_c);
3925
3926 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3927 energy_a, energy_b, energy_c, max_energy));
3928
3929 return max_energy;
3930 }
3931
3932 static void
3933 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3934 struct iwm_rx_data *data)
3935 {
3936 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3937
3938 DPRINTFN(20, ("received PHY stats\n"));
3939 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3940 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3941
3942 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3943 }
3944
3945 /*
3946 * Retrieve the average noise (in dBm) among receivers.
3947 */
3948 static int
3949 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3950 {
3951 int i, total, nbant, noise;
3952
3953 total = nbant = noise = 0;
3954 for (i = 0; i < 3; i++) {
3955 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3956 if (noise) {
3957 total += noise;
3958 nbant++;
3959 }
3960 }
3961
3962 /* There should be at least one antenna but check anyway. */
3963 return (nbant == 0) ? -127 : (total / nbant) - 107;
3964 }
3965
3966 static void
3967 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3968 struct iwm_rx_data *data)
3969 {
3970 struct ieee80211com *ic = &sc->sc_ic;
3971 struct ieee80211_frame *wh;
3972 struct ieee80211_node *ni;
3973 struct ieee80211_channel *c = NULL;
3974 struct mbuf *m;
3975 struct iwm_rx_phy_info *phy_info;
3976 struct iwm_rx_mpdu_res_start *rx_res;
3977 int device_timestamp;
3978 uint32_t len;
3979 uint32_t rx_pkt_status;
3980 int rssi;
3981 int s;
3982
3983 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3984 BUS_DMASYNC_POSTREAD);
3985
3986 phy_info = &sc->sc_last_phy_info;
3987 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3988 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3989 len = le16toh(rx_res->byte_count);
3990 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3991 sizeof(*rx_res) + len));
3992
3993 m = data->m;
3994 m->m_data = pkt->data + sizeof(*rx_res);
3995 m->m_pkthdr.len = m->m_len = len;
3996
3997 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3998 DPRINTF(("dsp size out of range [0,20]: %d\n",
3999 phy_info->cfg_phy_cnt));
4000 return;
4001 }
4002
4003 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4004 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4005 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
4006 return; /* drop */
4007 }
4008
4009 device_timestamp = le32toh(phy_info->system_timestamp);
4010
4011 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
4012 rssi = iwm_get_signal_strength(sc, phy_info);
4013 } else {
4014 rssi = iwm_calc_rssi(sc, phy_info);
4015 }
4016 rssi = -rssi;
4017
4018 if (ic->ic_state == IEEE80211_S_SCAN)
4019 iwm_fix_channel(sc, m);
4020
4021 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
4022 return;
4023
4024 m->m_pkthdr.rcvif = IC2IFP(ic);
4025
4026 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
4027 c = &ic->ic_channels[le32toh(phy_info->channel)];
4028
4029 s = splnet();
4030
4031 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4032 if (c)
4033 ni->ni_chan = c;
4034
4035 if (__predict_false(sc->sc_drvbpf != NULL)) {
4036 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4037
4038 tap->wr_flags = 0;
4039 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
4040 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4041 tap->wr_chan_freq =
4042 htole16(ic->ic_channels[phy_info->channel].ic_freq);
4043 tap->wr_chan_flags =
4044 htole16(ic->ic_channels[phy_info->channel].ic_flags);
4045 tap->wr_dbm_antsignal = (int8_t)rssi;
4046 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4047 tap->wr_tsft = phy_info->system_timestamp;
4048 if (phy_info->phy_flags &
4049 htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
4050 uint8_t mcs = (phy_info->rate_n_flags &
4051 htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
4052 IWM_RATE_HT_MCS_NSS_MSK));
4053 tap->wr_rate = (0x80 | mcs);
4054 } else {
4055 uint8_t rate = (phy_info->rate_n_flags &
4056 htole32(IWM_RATE_LEGACY_RATE_MSK));
4057 switch (rate) {
4058 /* CCK rates. */
4059 case 10: tap->wr_rate = 2; break;
4060 case 20: tap->wr_rate = 4; break;
4061 case 55: tap->wr_rate = 11; break;
4062 case 110: tap->wr_rate = 22; break;
4063 /* OFDM rates. */
4064 case 0xd: tap->wr_rate = 12; break;
4065 case 0xf: tap->wr_rate = 18; break;
4066 case 0x5: tap->wr_rate = 24; break;
4067 case 0x7: tap->wr_rate = 36; break;
4068 case 0x9: tap->wr_rate = 48; break;
4069 case 0xb: tap->wr_rate = 72; break;
4070 case 0x1: tap->wr_rate = 96; break;
4071 case 0x3: tap->wr_rate = 108; break;
4072 /* Unknown rate: should not happen. */
4073 default: tap->wr_rate = 0;
4074 }
4075 }
4076
4077 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
4078 }
4079 ieee80211_input(ic, m, ni, rssi, device_timestamp);
4080 ieee80211_free_node(ni);
4081
4082 splx(s);
4083 }
4084
4085 static void
4086 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4087 struct iwm_node *in)
4088 {
4089 struct ieee80211com *ic = &sc->sc_ic;
4090 struct ifnet *ifp = IC2IFP(ic);
4091 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4092 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4093 int failack = tx_resp->failure_frame;
4094
4095 KASSERT(tx_resp->frame_count == 1);
4096
4097 /* Update rate control statistics. */
4098 in->in_amn.amn_txcnt++;
4099 if (failack > 0) {
4100 in->in_amn.amn_retrycnt++;
4101 }
4102
4103 if (status != IWM_TX_STATUS_SUCCESS &&
4104 status != IWM_TX_STATUS_DIRECT_DONE)
4105 ifp->if_oerrors++;
4106 else
4107 ifp->if_opackets++;
4108 }
4109
4110 static void
4111 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4112 struct iwm_rx_data *data)
4113 {
4114 struct ieee80211com *ic = &sc->sc_ic;
4115 struct ifnet *ifp = IC2IFP(ic);
4116 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4117 int idx = cmd_hdr->idx;
4118 int qid = cmd_hdr->qid;
4119 struct iwm_tx_ring *ring = &sc->txq[qid];
4120 struct iwm_tx_data *txd = &ring->data[idx];
4121 struct iwm_node *in = txd->in;
4122 int s;
4123
4124 s = splnet();
4125
4126 if (txd->done) {
4127 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
4128 DEVNAME(sc)));
4129 splx(s);
4130 return;
4131 }
4132
4133 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4134 BUS_DMASYNC_POSTREAD);
4135
4136 sc->sc_tx_timer = 0;
4137
4138 iwm_rx_tx_cmd_single(sc, pkt, in);
4139
4140 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4141 BUS_DMASYNC_POSTWRITE);
4142 bus_dmamap_unload(sc->sc_dmat, txd->map);
4143 m_freem(txd->m);
4144
4145 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
4146 KASSERT(txd->done == 0);
4147 txd->done = 1;
4148 KASSERT(txd->in);
4149
4150 txd->m = NULL;
4151 txd->in = NULL;
4152 ieee80211_free_node(&in->in_ni);
4153
4154 if (--ring->queued < IWM_TX_RING_LOMARK) {
4155 sc->qfullmsk &= ~(1 << qid);
4156 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
4157 ifp->if_flags &= ~IFF_OACTIVE;
4158 KASSERT(KERNEL_LOCKED_P());
4159 iwm_start(ifp);
4160 }
4161 }
4162
4163 splx(s);
4164 }
4165
4166 static int
4167 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4168 {
4169 struct iwm_binding_cmd cmd;
4170 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4171 int i, err;
4172 uint32_t status;
4173
4174 memset(&cmd, 0, sizeof(cmd));
4175
4176 cmd.id_and_color
4177 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4178 cmd.action = htole32(action);
4179 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4180
4181 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4182 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4183 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4184
4185 status = 0;
4186 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4187 sizeof(cmd), &cmd, &status);
4188 if (err == 0 && status != 0)
4189 err = EIO;
4190
4191 return err;
4192 }
4193
4194 static void
4195 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4196 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4197 {
4198 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4199
4200 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4201 ctxt->color));
4202 cmd->action = htole32(action);
4203 cmd->apply_time = htole32(apply_time);
4204 }
4205
4206 static void
4207 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4208 struct ieee80211_channel *chan, uint8_t chains_static,
4209 uint8_t chains_dynamic)
4210 {
4211 struct ieee80211com *ic = &sc->sc_ic;
4212 uint8_t active_cnt, idle_cnt;
4213
4214 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4215 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4216
4217 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4218 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4219 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4220
4221 /* Set rx the chains */
4222 idle_cnt = chains_static;
4223 active_cnt = chains_dynamic;
4224
4225 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4226 IWM_PHY_RX_CHAIN_VALID_POS);
4227 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4228 cmd->rxchain_info |= htole32(active_cnt <<
4229 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4230
4231 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4232 }
4233
4234 static int
4235 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4236 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4237 uint32_t apply_time)
4238 {
4239 struct iwm_phy_context_cmd cmd;
4240
4241 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4242
4243 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4244 chains_static, chains_dynamic);
4245
4246 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4247 sizeof(struct iwm_phy_context_cmd), &cmd);
4248 }
4249
4250 static int
4251 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4252 {
4253 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4254 struct iwm_tfd *desc;
4255 struct iwm_tx_data *txdata;
4256 struct iwm_device_cmd *cmd;
4257 struct mbuf *m;
4258 bus_addr_t paddr;
4259 uint32_t addr_lo;
4260 int err = 0, i, paylen, off, s;
4261 int code;
4262 int async, wantresp;
4263 int group_id;
4264 size_t hdrlen, datasz;
4265 uint8_t *data;
4266
4267 code = hcmd->id;
4268 async = hcmd->flags & IWM_CMD_ASYNC;
4269 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
4270
4271 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
4272 paylen += hcmd->len[i];
4273 }
4274
4275 /* if the command wants an answer, busy sc_cmd_resp */
4276 if (wantresp) {
4277 KASSERT(!async);
4278 while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
4279 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
4280 sc->sc_wantresp = ring->qid << 16 | ring->cur;
4281 }
4282
4283 /*
4284 * Is the hardware still available? (after e.g. above wait).
4285 */
4286 s = splnet();
4287 if (sc->sc_flags & IWM_FLAG_STOPPED) {
4288 err = ENXIO;
4289 goto out;
4290 }
4291
4292 desc = &ring->desc[ring->cur];
4293 txdata = &ring->data[ring->cur];
4294
4295 group_id = iwm_cmd_groupid(code);
4296 if (group_id != 0) {
4297 hdrlen = sizeof(cmd->hdr_wide);
4298 datasz = sizeof(cmd->data_wide);
4299 } else {
4300 hdrlen = sizeof(cmd->hdr);
4301 datasz = sizeof(cmd->data);
4302 }
4303
4304 if (paylen > datasz) {
4305 /* Command is too large to fit in pre-allocated space. */
4306 size_t totlen = hdrlen + paylen;
4307 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4308 aprint_error_dev(sc->sc_dev,
4309 "firmware command too long (%zd bytes)\n", totlen);
4310 err = EINVAL;
4311 goto out;
4312 }
4313 m = m_gethdr(M_DONTWAIT, MT_DATA);
4314 if (m == NULL) {
4315 err = ENOMEM;
4316 goto out;
4317 }
4318 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
4319 if (!(m->m_flags & M_EXT)) {
4320 aprint_error_dev(sc->sc_dev,
4321 "could not get fw cmd mbuf (%zd bytes)\n", totlen);
4322 m_freem(m);
4323 err = ENOMEM;
4324 goto out;
4325 }
4326 cmd = mtod(m, struct iwm_device_cmd *);
4327 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4328 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4329 if (err) {
4330 aprint_error_dev(sc->sc_dev,
4331 "could not load fw cmd mbuf (%zd bytes)\n", totlen);
4332 m_freem(m);
4333 goto out;
4334 }
4335 txdata->m = m;
4336 paddr = txdata->map->dm_segs[0].ds_addr;
4337 } else {
4338 cmd = &ring->cmd[ring->cur];
4339 paddr = txdata->cmd_paddr;
4340 }
4341
4342 if (group_id != 0) {
4343 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4344 cmd->hdr_wide.group_id = group_id;
4345 cmd->hdr_wide.qid = ring->qid;
4346 cmd->hdr_wide.idx = ring->cur;
4347 cmd->hdr_wide.length = htole16(paylen);
4348 cmd->hdr_wide.version = iwm_cmd_version(code);
4349 data = cmd->data_wide;
4350 } else {
4351 cmd->hdr.code = code;
4352 cmd->hdr.flags = 0;
4353 cmd->hdr.qid = ring->qid;
4354 cmd->hdr.idx = ring->cur;
4355 data = cmd->data;
4356 }
4357
4358 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
4359 if (hcmd->len[i] == 0)
4360 continue;
4361 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4362 off += hcmd->len[i];
4363 }
4364 KASSERT(off == paylen);
4365
4366 /* lo field is not aligned */
4367 addr_lo = htole32((uint32_t)paddr);
4368 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4369 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
4370 | ((hdrlen + paylen) << 4));
4371 desc->num_tbs = 1;
4372
4373 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
4374 code, hdrlen + paylen, async ? " (async)" : ""));
4375
4376 if (paylen > datasz) {
4377 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
4378 BUS_DMASYNC_PREWRITE);
4379 } else {
4380 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4381 (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
4382 BUS_DMASYNC_PREWRITE);
4383 }
4384 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4385 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4386 BUS_DMASYNC_PREWRITE);
4387
4388 err = iwm_set_cmd_in_flight(sc);
4389 if (err)
4390 goto out;
4391 ring->queued++;
4392
4393 #if 0
4394 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4395 #endif
4396 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
4397 code, ring->qid, ring->cur));
4398
4399 /* Kick command ring. */
4400 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4401 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4402
4403 if (!async) {
4404 int generation = sc->sc_generation;
4405 err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4406 if (err == 0) {
4407 /* if hardware is no longer up, return error */
4408 if (generation != sc->sc_generation) {
4409 err = ENXIO;
4410 } else {
4411 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4412 }
4413 }
4414 }
4415 out:
4416 if (wantresp && err) {
4417 iwm_free_resp(sc, hcmd);
4418 }
4419 splx(s);
4420
4421 return err;
4422 }
4423
4424 static int
4425 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4426 uint16_t len, const void *data)
4427 {
4428 struct iwm_host_cmd cmd = {
4429 .id = id,
4430 .len = { len, },
4431 .data = { data, },
4432 .flags = flags,
4433 };
4434
4435 return iwm_send_cmd(sc, &cmd);
4436 }
4437
4438 static int
4439 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4440 uint32_t *status)
4441 {
4442 struct iwm_rx_packet *pkt;
4443 struct iwm_cmd_response *resp;
4444 int err, resp_len;
4445
4446 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4447 cmd->flags |= IWM_CMD_WANT_SKB;
4448
4449 err = iwm_send_cmd(sc, cmd);
4450 if (err)
4451 return err;
4452 pkt = cmd->resp_pkt;
4453
4454 /* Can happen if RFKILL is asserted */
4455 if (!pkt) {
4456 err = 0;
4457 goto out_free_resp;
4458 }
4459
4460 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4461 err = EIO;
4462 goto out_free_resp;
4463 }
4464
4465 resp_len = iwm_rx_packet_payload_len(pkt);
4466 if (resp_len != sizeof(*resp)) {
4467 err = EIO;
4468 goto out_free_resp;
4469 }
4470
4471 resp = (void *)pkt->data;
4472 *status = le32toh(resp->status);
4473 out_free_resp:
4474 iwm_free_resp(sc, cmd);
4475 return err;
4476 }
4477
4478 static int
4479 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4480 const void *data, uint32_t *status)
4481 {
4482 struct iwm_host_cmd cmd = {
4483 .id = id,
4484 .len = { len, },
4485 .data = { data, },
4486 };
4487
4488 return iwm_send_cmd_status(sc, &cmd, status);
4489 }
4490
4491 static void
4492 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4493 {
4494 KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4495 KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4496 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4497 wakeup(&sc->sc_wantresp);
4498 }
4499
4500 static void
4501 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4502 {
4503 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4504 struct iwm_tx_data *data;
4505 int s;
4506
4507 if (qid != IWM_CMD_QUEUE) {
4508 return; /* Not a command ack. */
4509 }
4510
4511 s = splnet();
4512
4513 data = &ring->data[idx];
4514
4515 if (data->m != NULL) {
4516 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4517 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4518 bus_dmamap_unload(sc->sc_dmat, data->map);
4519 m_freem(data->m);
4520 data->m = NULL;
4521 }
4522 wakeup(&ring->desc[idx]);
4523
4524 if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4525 aprint_error_dev(sc->sc_dev,
4526 "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4527 idx, ring->queued, ring->cur);
4528 }
4529
4530 KASSERT(ring->queued > 0);
4531 if (--ring->queued == 0)
4532 iwm_clear_cmd_in_flight(sc);
4533
4534 splx(s);
4535 }
4536
4537 #if 0
4538 /*
4539 * necessary only for block ack mode
4540 */
4541 void
4542 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4543 uint16_t len)
4544 {
4545 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4546 uint16_t w_val;
4547
4548 scd_bc_tbl = sc->sched_dma.vaddr;
4549
4550 len += 8; /* magic numbers came naturally from paris */
4551 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4552 len = roundup(len, 4) / 4;
4553
4554 w_val = htole16(sta_id << 12 | len);
4555
4556 /* Update TX scheduler. */
4557 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4558 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4559 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4560 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4561
4562 /* I really wonder what this is ?!? */
4563 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4564 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4565 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4566 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4567 (char *)(void *)sc->sched_dma.vaddr,
4568 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4569 }
4570 }
4571 #endif
4572
4573 /*
4574 * Fill in various bit for management frames, and leave them
4575 * unfilled for data frames (firmware takes care of that).
4576 * Return the selected TX rate.
4577 */
4578 static const struct iwm_rate *
4579 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4580 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4581 {
4582 struct ieee80211com *ic = &sc->sc_ic;
4583 struct ieee80211_node *ni = &in->in_ni;
4584 const struct iwm_rate *rinfo;
4585 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4586 int ridx, rate_flags, i, ind;
4587 int nrates = ni->ni_rates.rs_nrates;
4588
4589 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4590 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4591
4592 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4593 type != IEEE80211_FC0_TYPE_DATA) {
4594 /* for non-data, use the lowest supported rate */
4595 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4596 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4597 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4598 #ifndef IEEE80211_NO_HT
4599 } else if (ic->ic_fixed_mcs != -1) {
4600 ridx = sc->sc_fixed_ridx;
4601 #endif
4602 } else if (ic->ic_fixed_rate != -1) {
4603 ridx = sc->sc_fixed_ridx;
4604 } else {
4605 /* for data frames, use RS table */
4606 tx->initial_rate_index = 0;
4607 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4608 DPRINTFN(12, ("start with txrate %d\n",
4609 tx->initial_rate_index));
4610 #ifndef IEEE80211_NO_HT
4611 if (ni->ni_flags & IEEE80211_NODE_HT) {
4612 ridx = iwm_mcs2ridx[ni->ni_txmcs];
4613 return &iwm_rates[ridx];
4614 }
4615 #endif
4616 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4617 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4618 for (i = 0; i < nrates; i++) {
4619 if (iwm_rates[i].rate == (ni->ni_txrate &
4620 IEEE80211_RATE_VAL)) {
4621 ridx = i;
4622 break;
4623 }
4624 }
4625 return &iwm_rates[ridx];
4626 }
4627
4628 rinfo = &iwm_rates[ridx];
4629 for (i = 0, ind = sc->sc_mgmt_last_antenna;
4630 i < IWM_RATE_MCS_ANT_NUM; i++) {
4631 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4632 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4633 sc->sc_mgmt_last_antenna = ind;
4634 break;
4635 }
4636 }
4637 rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4638 if (IWM_RIDX_IS_CCK(ridx))
4639 rate_flags |= IWM_RATE_MCS_CCK_MSK;
4640 #ifndef IEEE80211_NO_HT
4641 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4642 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4643 rate_flags |= IWM_RATE_MCS_HT_MSK;
4644 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4645 } else
4646 #endif
4647 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4648
4649 return rinfo;
4650 }
4651
4652 #define TB0_SIZE 16
4653 static int
4654 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4655 {
4656 struct ieee80211com *ic = &sc->sc_ic;
4657 struct iwm_node *in = (struct iwm_node *)ni;
4658 struct iwm_tx_ring *ring;
4659 struct iwm_tx_data *data;
4660 struct iwm_tfd *desc;
4661 struct iwm_device_cmd *cmd;
4662 struct iwm_tx_cmd *tx;
4663 struct ieee80211_frame *wh;
4664 struct ieee80211_key *k = NULL;
4665 struct mbuf *m1;
4666 const struct iwm_rate *rinfo;
4667 uint32_t flags;
4668 u_int hdrlen;
4669 bus_dma_segment_t *seg;
4670 uint8_t tid, type;
4671 int i, totlen, err, pad;
4672
4673 wh = mtod(m, struct ieee80211_frame *);
4674 hdrlen = ieee80211_anyhdrsize(wh);
4675 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4676
4677 tid = 0;
4678
4679 ring = &sc->txq[ac];
4680 desc = &ring->desc[ring->cur];
4681 memset(desc, 0, sizeof(*desc));
4682 data = &ring->data[ring->cur];
4683
4684 cmd = &ring->cmd[ring->cur];
4685 cmd->hdr.code = IWM_TX_CMD;
4686 cmd->hdr.flags = 0;
4687 cmd->hdr.qid = ring->qid;
4688 cmd->hdr.idx = ring->cur;
4689
4690 tx = (void *)cmd->data;
4691 memset(tx, 0, sizeof(*tx));
4692
4693 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4694
4695 if (__predict_false(sc->sc_drvbpf != NULL)) {
4696 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4697
4698 tap->wt_flags = 0;
4699 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4700 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4701 #ifndef IEEE80211_NO_HT
4702 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4703 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4704 type == IEEE80211_FC0_TYPE_DATA &&
4705 rinfo->plcp == IWM_RATE_INVM_PLCP) {
4706 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4707 } else
4708 #endif
4709 tap->wt_rate = rinfo->rate;
4710 tap->wt_hwqueue = ac;
4711 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4712 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4713
4714 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4715 }
4716
4717 /* Encrypt the frame if need be. */
4718 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4719 k = ieee80211_crypto_encap(ic, ni, m);
4720 if (k == NULL) {
4721 m_freem(m);
4722 return ENOBUFS;
4723 }
4724 /* Packet header may have moved, reset our local pointer. */
4725 wh = mtod(m, struct ieee80211_frame *);
4726 }
4727 totlen = m->m_pkthdr.len;
4728
4729 flags = 0;
4730 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4731 flags |= IWM_TX_CMD_FLG_ACK;
4732 }
4733
4734 if (type == IEEE80211_FC0_TYPE_DATA &&
4735 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4736 (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4737 (ic->ic_flags & IEEE80211_F_USEPROT)))
4738 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4739
4740 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4741 type != IEEE80211_FC0_TYPE_DATA)
4742 tx->sta_id = IWM_AUX_STA_ID;
4743 else
4744 tx->sta_id = IWM_STATION_ID;
4745
4746 if (type == IEEE80211_FC0_TYPE_MGT) {
4747 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4748
4749 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4750 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4751 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4752 else
4753 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4754 } else {
4755 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4756 }
4757
4758 if (hdrlen & 3) {
4759 /* First segment length must be a multiple of 4. */
4760 flags |= IWM_TX_CMD_FLG_MH_PAD;
4761 pad = 4 - (hdrlen & 3);
4762 } else
4763 pad = 0;
4764
4765 tx->driver_txop = 0;
4766 tx->next_frame_len = 0;
4767
4768 tx->len = htole16(totlen);
4769 tx->tid_tspec = tid;
4770 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4771
4772 /* Set physical address of "scratch area". */
4773 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4774 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4775
4776 /* Copy 802.11 header in TX command. */
4777 memcpy(tx + 1, wh, hdrlen);
4778
4779 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4780
4781 tx->sec_ctl = 0;
4782 tx->tx_flags |= htole32(flags);
4783
4784 /* Trim 802.11 header. */
4785 m_adj(m, hdrlen);
4786
4787 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4788 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4789 if (err) {
4790 if (err != EFBIG) {
4791 aprint_error_dev(sc->sc_dev,
4792 "can't map mbuf (error %d)\n", err);
4793 m_freem(m);
4794 return err;
4795 }
4796 /* Too many DMA segments, linearize mbuf. */
4797 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4798 if (m1 == NULL) {
4799 m_freem(m);
4800 return ENOBUFS;
4801 }
4802 if (m->m_pkthdr.len > MHLEN) {
4803 MCLGET(m1, M_DONTWAIT);
4804 if (!(m1->m_flags & M_EXT)) {
4805 m_freem(m);
4806 m_freem(m1);
4807 return ENOBUFS;
4808 }
4809 }
4810 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4811 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4812 m_freem(m);
4813 m = m1;
4814
4815 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4816 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4817 if (err) {
4818 aprint_error_dev(sc->sc_dev,
4819 "can't map mbuf (error %d)\n", err);
4820 m_freem(m);
4821 return err;
4822 }
4823 }
4824 data->m = m;
4825 data->in = in;
4826 data->done = 0;
4827
4828 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4829 KASSERT(data->in != NULL);
4830
4831 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4832 "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4833 ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4834 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4835 le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4836 le32toh(tx->rate_n_flags)));
4837
4838 /* Fill TX descriptor. */
4839 desc->num_tbs = 2 + data->map->dm_nsegs;
4840
4841 desc->tbs[0].lo = htole32(data->cmd_paddr);
4842 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4843 (TB0_SIZE << 4);
4844 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4845 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4846 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4847 + hdrlen + pad - TB0_SIZE) << 4);
4848
4849 /* Other DMA segments are for data payload. */
4850 seg = data->map->dm_segs;
4851 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4852 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4853 desc->tbs[i+2].hi_n_len =
4854 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4855 | ((seg->ds_len) << 4);
4856 }
4857
4858 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4859 BUS_DMASYNC_PREWRITE);
4860 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4861 (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4862 BUS_DMASYNC_PREWRITE);
4863 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4864 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4865 BUS_DMASYNC_PREWRITE);
4866
4867 #if 0
4868 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4869 le16toh(tx->len));
4870 #endif
4871
4872 /* Kick TX ring. */
4873 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4874 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4875
4876 /* Mark TX ring as full if we reach a certain threshold. */
4877 if (++ring->queued > IWM_TX_RING_HIMARK) {
4878 sc->qfullmsk |= 1 << ring->qid;
4879 }
4880
4881 return 0;
4882 }
4883
4884 #if 0
4885 /* not necessary? */
4886 static int
4887 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4888 {
4889 struct iwm_tx_path_flush_cmd flush_cmd = {
4890 .queues_ctl = htole32(tfd_msk),
4891 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4892 };
4893 int err;
4894
4895 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4896 sizeof(flush_cmd), &flush_cmd);
4897 if (err)
4898 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4899 err);
4900 return err;
4901 }
4902 #endif
4903
4904 static void
4905 iwm_led_enable(struct iwm_softc *sc)
4906 {
4907 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4908 }
4909
4910 static void
4911 iwm_led_disable(struct iwm_softc *sc)
4912 {
4913 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4914 }
4915
4916 static int
4917 iwm_led_is_enabled(struct iwm_softc *sc)
4918 {
4919 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4920 }
4921
4922 static void
4923 iwm_led_blink_timeout(void *arg)
4924 {
4925 struct iwm_softc *sc = arg;
4926
4927 if (iwm_led_is_enabled(sc))
4928 iwm_led_disable(sc);
4929 else
4930 iwm_led_enable(sc);
4931
4932 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4933 }
4934
4935 static void
4936 iwm_led_blink_start(struct iwm_softc *sc)
4937 {
4938 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4939 }
4940
4941 static void
4942 iwm_led_blink_stop(struct iwm_softc *sc)
4943 {
4944 callout_stop(&sc->sc_led_blink_to);
4945 iwm_led_disable(sc);
4946 }
4947
4948 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4949
4950 static int
4951 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4952 struct iwm_beacon_filter_cmd *cmd)
4953 {
4954 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4955 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4956 }
4957
4958 static void
4959 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4960 struct iwm_beacon_filter_cmd *cmd)
4961 {
4962 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4963 }
4964
4965 static int
4966 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4967 {
4968 struct iwm_beacon_filter_cmd cmd = {
4969 IWM_BF_CMD_CONFIG_DEFAULTS,
4970 .bf_enable_beacon_filter = htole32(1),
4971 .ba_enable_beacon_abort = htole32(enable),
4972 };
4973
4974 if (!sc->sc_bf.bf_enabled)
4975 return 0;
4976
4977 sc->sc_bf.ba_enabled = enable;
4978 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4979 return iwm_beacon_filter_send_cmd(sc, &cmd);
4980 }
4981
4982 static void
4983 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4984 struct iwm_mac_power_cmd *cmd)
4985 {
4986 struct ieee80211_node *ni = &in->in_ni;
4987 int dtim_period, dtim_msec, keep_alive;
4988
4989 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4990 in->in_color));
4991 if (ni->ni_dtim_period)
4992 dtim_period = ni->ni_dtim_period;
4993 else
4994 dtim_period = 1;
4995
4996 /*
4997 * Regardless of power management state the driver must set
4998 * keep alive period. FW will use it for sending keep alive NDPs
4999 * immediately after association. Check that keep alive period
5000 * is at least 3 * DTIM.
5001 */
5002 dtim_msec = dtim_period * ni->ni_intval;
5003 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
5004 keep_alive = roundup(keep_alive, 1000) / 1000;
5005 cmd->keep_alive_seconds = htole16(keep_alive);
5006
5007 #ifdef notyet
5008 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5009 cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
5010 cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
5011 #endif
5012 }
5013
5014 static int
5015 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
5016 {
5017 int err;
5018 int ba_enable;
5019 struct iwm_mac_power_cmd cmd;
5020
5021 memset(&cmd, 0, sizeof(cmd));
5022
5023 iwm_power_build_cmd(sc, in, &cmd);
5024
5025 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
5026 sizeof(cmd), &cmd);
5027 if (err)
5028 return err;
5029
5030 ba_enable = !!(cmd.flags &
5031 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5032 return iwm_update_beacon_abort(sc, in, ba_enable);
5033 }
5034
5035 static int
5036 iwm_power_update_device(struct iwm_softc *sc)
5037 {
5038 struct iwm_device_power_cmd cmd = {
5039 #ifdef notyet
5040 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
5041 #else
5042 .flags = 0,
5043 #endif
5044 };
5045
5046 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
5047 return 0;
5048
5049 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
5050 DPRINTF(("Sending device power command with flags = 0x%X\n",
5051 cmd.flags));
5052
5053 return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5054 }
5055
5056 #ifdef notyet
5057 static int
5058 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
5059 {
5060 struct iwm_beacon_filter_cmd cmd = {
5061 IWM_BF_CMD_CONFIG_DEFAULTS,
5062 .bf_enable_beacon_filter = htole32(1),
5063 };
5064 int err;
5065
5066 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5067 err = iwm_beacon_filter_send_cmd(sc, &cmd);
5068
5069 if (err == 0)
5070 sc->sc_bf.bf_enabled = 1;
5071
5072 return err;
5073 }
5074 #endif
5075
5076 static int
5077 iwm_disable_beacon_filter(struct iwm_softc *sc)
5078 {
5079 struct iwm_beacon_filter_cmd cmd;
5080 int err;
5081
5082 memset(&cmd, 0, sizeof(cmd));
5083 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
5084 return 0;
5085
5086 err = iwm_beacon_filter_send_cmd(sc, &cmd);
5087 if (err == 0)
5088 sc->sc_bf.bf_enabled = 0;
5089
5090 return err;
5091 }
5092
5093 static int
5094 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5095 {
5096 struct iwm_add_sta_cmd_v7 add_sta_cmd;
5097 int err;
5098 uint32_t status;
5099
5100 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5101
5102 add_sta_cmd.sta_id = IWM_STATION_ID;
5103 add_sta_cmd.mac_id_n_color
5104 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5105 if (!update) {
5106 int ac;
5107 for (ac = 0; ac < WME_NUM_AC; ac++) {
5108 add_sta_cmd.tfd_queue_msk |=
5109 htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
5110 }
5111 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5112 }
5113 add_sta_cmd.add_modify = update ? 1 : 0;
5114 add_sta_cmd.station_flags_msk
5115 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5116 add_sta_cmd.tid_disable_tx = htole16(0xffff);
5117 if (update)
5118 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5119
5120 #ifndef IEEE80211_NO_HT
5121 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5122 add_sta_cmd.station_flags_msk
5123 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5124 IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5125
5126 add_sta_cmd.station_flags
5127 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5128 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5129 case IEEE80211_AMPDU_PARAM_SS_2:
5130 add_sta_cmd.station_flags
5131 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5132 break;
5133 case IEEE80211_AMPDU_PARAM_SS_4:
5134 add_sta_cmd.station_flags
5135 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5136 break;
5137 case IEEE80211_AMPDU_PARAM_SS_8:
5138 add_sta_cmd.station_flags
5139 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5140 break;
5141 case IEEE80211_AMPDU_PARAM_SS_16:
5142 add_sta_cmd.station_flags
5143 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5144 break;
5145 default:
5146 break;
5147 }
5148 }
5149 #endif
5150
5151 status = IWM_ADD_STA_SUCCESS;
5152 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
5153 &add_sta_cmd, &status);
5154 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5155 err = EIO;
5156
5157 return err;
5158 }
5159
5160 static int
5161 iwm_add_aux_sta(struct iwm_softc *sc)
5162 {
5163 struct iwm_add_sta_cmd_v7 cmd;
5164 int err;
5165 uint32_t status;
5166
5167 err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
5168 if (err)
5169 return err;
5170
5171 memset(&cmd, 0, sizeof(cmd));
5172 cmd.sta_id = IWM_AUX_STA_ID;
5173 cmd.mac_id_n_color =
5174 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5175 cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
5176 cmd.tid_disable_tx = htole16(0xffff);
5177
5178 status = IWM_ADD_STA_SUCCESS;
5179 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
5180 &status);
5181 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5182 err = EIO;
5183
5184 return err;
5185 }
5186
5187 #define IWM_PLCP_QUIET_THRESH 1
5188 #define IWM_ACTIVE_QUIET_TIME 10
5189 #define LONG_OUT_TIME_PERIOD 600
5190 #define SHORT_OUT_TIME_PERIOD 200
5191 #define SUSPEND_TIME_PERIOD 100
5192
5193 static uint16_t
5194 iwm_scan_rx_chain(struct iwm_softc *sc)
5195 {
5196 uint16_t rx_chain;
5197 uint8_t rx_ant;
5198
5199 rx_ant = iwm_fw_valid_rx_ant(sc);
5200 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5201 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5202 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5203 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5204 return htole16(rx_chain);
5205 }
5206
5207 static uint32_t
5208 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5209 {
5210 uint32_t tx_ant;
5211 int i, ind;
5212
5213 for (i = 0, ind = sc->sc_scan_last_antenna;
5214 i < IWM_RATE_MCS_ANT_NUM; i++) {
5215 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5216 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5217 sc->sc_scan_last_antenna = ind;
5218 break;
5219 }
5220 }
5221 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5222
5223 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5224 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5225 tx_ant);
5226 else
5227 return htole32(IWM_RATE_6M_PLCP | tx_ant);
5228 }
5229
5230 #ifdef notyet
5231 /*
5232 * If req->n_ssids > 0, it means we should do an active scan.
5233 * In case of active scan w/o directed scan, we receive a zero-length SSID
5234 * just to notify that this scan is active and not passive.
5235 * In order to notify the FW of the number of SSIDs we wish to scan (including
5236 * the zero-length one), we need to set the corresponding bits in chan->type,
5237 * one for each SSID, and set the active bit (first). If the first SSID is
5238 * already included in the probe template, so we need to set only
5239 * req->n_ssids - 1 bits in addition to the first bit.
5240 */
5241 static uint16_t
5242 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
5243 {
5244 if (flags & IEEE80211_CHAN_2GHZ)
5245 return 30 + 3 * (n_ssids + 1);
5246 return 20 + 2 * (n_ssids + 1);
5247 }
5248
5249 static uint16_t
5250 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
5251 {
5252 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
5253 }
5254 #endif
5255
5256 static uint8_t
5257 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5258 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
5259 {
5260 struct ieee80211com *ic = &sc->sc_ic;
5261 struct ieee80211_channel *c;
5262 uint8_t nchan;
5263
5264 for (nchan = 0, c = &ic->ic_channels[1];
5265 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5266 nchan < sc->sc_capa_n_scan_channels;
5267 c++) {
5268 if (c->ic_flags == 0)
5269 continue;
5270
5271 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5272 chan->iter_count = htole16(1);
5273 chan->iter_interval = htole32(0);
5274 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5275 chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
5276 if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
5277 chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
5278 chan++;
5279 nchan++;
5280 }
5281
5282 return nchan;
5283 }
5284
5285 static uint8_t
5286 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5287 struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
5288 {
5289 struct ieee80211com *ic = &sc->sc_ic;
5290 struct ieee80211_channel *c;
5291 uint8_t nchan;
5292
5293 for (nchan = 0, c = &ic->ic_channels[1];
5294 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5295 nchan < sc->sc_capa_n_scan_channels;
5296 c++) {
5297 if (c->ic_flags == 0)
5298 continue;
5299
5300 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5301 chan->iter_count = 1;
5302 chan->iter_interval = htole16(0);
5303 chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
5304 chan++;
5305 nchan++;
5306 }
5307
5308 return nchan;
5309 }
5310
5311 static int
5312 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5313 {
5314 struct ieee80211com *ic = &sc->sc_ic;
5315 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5316 struct ieee80211_rateset *rs;
5317 size_t remain = sizeof(preq->buf);
5318 uint8_t *frm, *pos;
5319
5320 memset(preq, 0, sizeof(*preq));
5321
5322 if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5323 return ENOBUFS;
5324
5325 /*
5326 * Build a probe request frame. Most of the following code is a
5327 * copy & paste of what is done in net80211.
5328 */
5329 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5330 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5331 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5332 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5333 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5334 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5335 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
5336 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
5337
5338 frm = (uint8_t *)(wh + 1);
5339 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5340
5341 /* Tell the firmware where the MAC header is. */
5342 preq->mac_header.offset = 0;
5343 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5344 remain -= frm - (uint8_t *)wh;
5345
5346 /* Fill in 2GHz IEs and tell firmware where they are. */
5347 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5348 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5349 if (remain < 4 + rs->rs_nrates)
5350 return ENOBUFS;
5351 } else if (remain < 2 + rs->rs_nrates)
5352 return ENOBUFS;
5353 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5354 pos = frm;
5355 frm = ieee80211_add_rates(frm, rs);
5356 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5357 frm = ieee80211_add_xrates(frm, rs);
5358 preq->band_data[0].len = htole16(frm - pos);
5359 remain -= frm - pos;
5360
5361 if (isset(sc->sc_enabled_capa,
5362 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5363 if (remain < 3)
5364 return ENOBUFS;
5365 *frm++ = IEEE80211_ELEMID_DSPARMS;
5366 *frm++ = 1;
5367 *frm++ = 0;
5368 remain -= 3;
5369 }
5370
5371 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5372 /* Fill in 5GHz IEs. */
5373 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5374 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5375 if (remain < 4 + rs->rs_nrates)
5376 return ENOBUFS;
5377 } else if (remain < 2 + rs->rs_nrates)
5378 return ENOBUFS;
5379 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5380 pos = frm;
5381 frm = ieee80211_add_rates(frm, rs);
5382 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5383 frm = ieee80211_add_xrates(frm, rs);
5384 preq->band_data[1].len = htole16(frm - pos);
5385 remain -= frm - pos;
5386 }
5387
5388 #ifndef IEEE80211_NO_HT
5389 /* Send 11n IEs on both 2GHz and 5GHz bands. */
5390 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5391 pos = frm;
5392 if (ic->ic_flags & IEEE80211_F_HTON) {
5393 if (remain < 28)
5394 return ENOBUFS;
5395 frm = ieee80211_add_htcaps(frm, ic);
5396 /* XXX add WME info? */
5397 }
5398 #endif
5399
5400 preq->common_data.len = htole16(frm - pos);
5401
5402 return 0;
5403 }
5404
5405 static int
5406 iwm_lmac_scan(struct iwm_softc *sc)
5407 {
5408 struct ieee80211com *ic = &sc->sc_ic;
5409 struct iwm_host_cmd hcmd = {
5410 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5411 .len = { 0, },
5412 .data = { NULL, },
5413 .flags = 0,
5414 };
5415 struct iwm_scan_req_lmac *req;
5416 size_t req_len;
5417 int err;
5418
5419 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5420
5421 req_len = sizeof(struct iwm_scan_req_lmac) +
5422 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5423 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5424 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5425 return ENOMEM;
5426 req = kmem_zalloc(req_len, KM_SLEEP);
5427 if (req == NULL)
5428 return ENOMEM;
5429
5430 hcmd.len[0] = (uint16_t)req_len;
5431 hcmd.data[0] = (void *)req;
5432
5433 /* These timings correspond to iwlwifi's UNASSOC scan. */
5434 req->active_dwell = 10;
5435 req->passive_dwell = 110;
5436 req->fragmented_dwell = 44;
5437 req->extended_dwell = 90;
5438 req->max_out_time = 0;
5439 req->suspend_time = 0;
5440
5441 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5442 req->rx_chain_select = iwm_scan_rx_chain(sc);
5443 req->iter_num = htole32(1);
5444 req->delay = 0;
5445
5446 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5447 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5448 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5449 if (ic->ic_des_esslen == 0)
5450 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5451 else
5452 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5453 if (isset(sc->sc_enabled_capa,
5454 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5455 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5456
5457 req->flags = htole32(IWM_PHY_BAND_24);
5458 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5459 req->flags |= htole32(IWM_PHY_BAND_5);
5460 req->filter_flags =
5461 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5462
5463 /* Tx flags 2 GHz. */
5464 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5465 IWM_TX_CMD_FLG_BT_DIS);
5466 req->tx_cmd[0].rate_n_flags =
5467 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5468 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5469
5470 /* Tx flags 5 GHz. */
5471 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5472 IWM_TX_CMD_FLG_BT_DIS);
5473 req->tx_cmd[1].rate_n_flags =
5474 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5475 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5476
5477 /* Check if we're doing an active directed scan. */
5478 if (ic->ic_des_esslen != 0) {
5479 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5480 req->direct_scan[0].len = ic->ic_des_esslen;
5481 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5482 ic->ic_des_esslen);
5483 }
5484
5485 req->n_channels = iwm_lmac_scan_fill_channels(sc,
5486 (struct iwm_scan_channel_cfg_lmac *)req->data,
5487 ic->ic_des_esslen != 0);
5488
5489 err = iwm_fill_probe_req(sc,
5490 (struct iwm_scan_probe_req *)(req->data +
5491 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5492 sc->sc_capa_n_scan_channels)));
5493 if (err) {
5494 kmem_free(req, req_len);
5495 return err;
5496 }
5497
5498 /* Specify the scan plan: We'll do one iteration. */
5499 req->schedule[0].iterations = 1;
5500 req->schedule[0].full_scan_mul = 1;
5501
5502 /* Disable EBS. */
5503 req->channel_opt[0].non_ebs_ratio = 1;
5504 req->channel_opt[1].non_ebs_ratio = 1;
5505
5506 err = iwm_send_cmd(sc, &hcmd);
5507 kmem_free(req, req_len);
5508 return err;
5509 }
5510
5511 static int
5512 iwm_config_umac_scan(struct iwm_softc *sc)
5513 {
5514 struct ieee80211com *ic = &sc->sc_ic;
5515 struct iwm_scan_config *scan_config;
5516 int err, nchan;
5517 size_t cmd_size;
5518 struct ieee80211_channel *c;
5519 struct iwm_host_cmd hcmd = {
5520 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5521 .flags = 0,
5522 };
5523 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5524 IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5525 IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5526 IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5527 IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5528 IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5529 IWM_SCAN_CONFIG_RATE_54M);
5530
5531 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5532
5533 scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5534 if (scan_config == NULL)
5535 return ENOMEM;
5536
5537 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5538 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5539 scan_config->legacy_rates = htole32(rates |
5540 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5541
5542 /* These timings correspond to iwlwifi's UNASSOC scan. */
5543 scan_config->dwell_active = 10;
5544 scan_config->dwell_passive = 110;
5545 scan_config->dwell_fragmented = 44;
5546 scan_config->dwell_extended = 90;
5547 scan_config->out_of_channel_time = htole32(0);
5548 scan_config->suspend_time = htole32(0);
5549
5550 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5551
5552 scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5553 scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5554 IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5555 IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5556
5557 for (c = &ic->ic_channels[1], nchan = 0;
5558 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5559 nchan < sc->sc_capa_n_scan_channels; c++) {
5560 if (c->ic_flags == 0)
5561 continue;
5562 scan_config->channel_array[nchan++] =
5563 ieee80211_mhz2ieee(c->ic_freq, 0);
5564 }
5565
5566 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5567 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5568 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5569 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5570 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5571 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5572 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5573 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5574 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5575 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5576 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5577
5578 hcmd.data[0] = scan_config;
5579 hcmd.len[0] = cmd_size;
5580
5581 err = iwm_send_cmd(sc, &hcmd);
5582 kmem_free(scan_config, cmd_size);
5583 return err;
5584 }
5585
5586 static int
5587 iwm_umac_scan(struct iwm_softc *sc)
5588 {
5589 struct ieee80211com *ic = &sc->sc_ic;
5590 struct iwm_host_cmd hcmd = {
5591 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5592 .len = { 0, },
5593 .data = { NULL, },
5594 .flags = 0,
5595 };
5596 struct iwm_scan_req_umac *req;
5597 struct iwm_scan_req_umac_tail *tail;
5598 size_t req_len;
5599 int err;
5600
5601 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5602
5603 req_len = sizeof(struct iwm_scan_req_umac) +
5604 (sizeof(struct iwm_scan_channel_cfg_umac) *
5605 sc->sc_capa_n_scan_channels) +
5606 sizeof(struct iwm_scan_req_umac_tail);
5607 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5608 return ENOMEM;
5609 req = kmem_zalloc(req_len, KM_SLEEP);
5610 if (req == NULL)
5611 return ENOMEM;
5612
5613 hcmd.len[0] = (uint16_t)req_len;
5614 hcmd.data[0] = (void *)req;
5615
5616 /* These timings correspond to iwlwifi's UNASSOC scan. */
5617 req->active_dwell = 10;
5618 req->passive_dwell = 110;
5619 req->fragmented_dwell = 44;
5620 req->extended_dwell = 90;
5621 req->max_out_time = 0;
5622 req->suspend_time = 0;
5623
5624 req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5625 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5626
5627 req->n_channels = iwm_umac_scan_fill_channels(sc,
5628 (struct iwm_scan_channel_cfg_umac *)req->data,
5629 ic->ic_des_esslen != 0);
5630
5631 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5632 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5633 IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5634
5635 tail = (struct iwm_scan_req_umac_tail *)(req->data +
5636 sizeof(struct iwm_scan_channel_cfg_umac) *
5637 sc->sc_capa_n_scan_channels);
5638
5639 /* Check if we're doing an active directed scan. */
5640 if (ic->ic_des_esslen != 0) {
5641 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5642 tail->direct_scan[0].len = ic->ic_des_esslen;
5643 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5644 ic->ic_des_esslen);
5645 req->general_flags |=
5646 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5647 } else
5648 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5649
5650 if (isset(sc->sc_enabled_capa,
5651 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5652 req->general_flags |=
5653 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5654
5655 err = iwm_fill_probe_req(sc, &tail->preq);
5656 if (err) {
5657 kmem_free(req, req_len);
5658 return err;
5659 }
5660
5661 /* Specify the scan plan: We'll do one iteration. */
5662 tail->schedule[0].interval = 0;
5663 tail->schedule[0].iter_count = 1;
5664
5665 err = iwm_send_cmd(sc, &hcmd);
5666 kmem_free(req, req_len);
5667 return err;
5668 }
5669
5670 static uint8_t
5671 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5672 {
5673 int i;
5674 uint8_t rval;
5675
5676 for (i = 0; i < rs->rs_nrates; i++) {
5677 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5678 if (rval == iwm_rates[ridx].rate)
5679 return rs->rs_rates[i];
5680 }
5681 return 0;
5682 }
5683
5684 static void
5685 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5686 int *ofdm_rates)
5687 {
5688 struct ieee80211_node *ni = &in->in_ni;
5689 struct ieee80211_rateset *rs = &ni->ni_rates;
5690 int lowest_present_ofdm = -1;
5691 int lowest_present_cck = -1;
5692 uint8_t cck = 0;
5693 uint8_t ofdm = 0;
5694 int i;
5695
5696 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5697 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5698 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5699 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5700 continue;
5701 cck |= (1 << i);
5702 if (lowest_present_cck == -1 || lowest_present_cck > i)
5703 lowest_present_cck = i;
5704 }
5705 }
5706 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5707 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5708 continue;
5709 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5710 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5711 lowest_present_ofdm = i;
5712 }
5713
5714 /*
5715 * Now we've got the basic rates as bitmaps in the ofdm and cck
5716 * variables. This isn't sufficient though, as there might not
5717 * be all the right rates in the bitmap. E.g. if the only basic
5718 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5719 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5720 *
5721 * [...] a STA responding to a received frame shall transmit
5722 * its Control Response frame [...] at the highest rate in the
5723 * BSSBasicRateSet parameter that is less than or equal to the
5724 * rate of the immediately previous frame in the frame exchange
5725 * sequence ([...]) and that is of the same modulation class
5726 * ([...]) as the received frame. If no rate contained in the
5727 * BSSBasicRateSet parameter meets these conditions, then the
5728 * control frame sent in response to a received frame shall be
5729 * transmitted at the highest mandatory rate of the PHY that is
5730 * less than or equal to the rate of the received frame, and
5731 * that is of the same modulation class as the received frame.
5732 *
5733 * As a consequence, we need to add all mandatory rates that are
5734 * lower than all of the basic rates to these bitmaps.
5735 */
5736
5737 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5738 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5739 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5740 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5741 /* 6M already there or needed so always add */
5742 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5743
5744 /*
5745 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5746 * Note, however:
5747 * - if no CCK rates are basic, it must be ERP since there must
5748 * be some basic rates at all, so they're OFDM => ERP PHY
5749 * (or we're in 5 GHz, and the cck bitmap will never be used)
5750 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5751 * - if 5.5M is basic, 1M and 2M are mandatory
5752 * - if 2M is basic, 1M is mandatory
5753 * - if 1M is basic, that's the only valid ACK rate.
5754 * As a consequence, it's not as complicated as it sounds, just add
5755 * any lower rates to the ACK rate bitmap.
5756 */
5757 if (IWM_RATE_11M_INDEX < lowest_present_cck)
5758 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5759 if (IWM_RATE_5M_INDEX < lowest_present_cck)
5760 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5761 if (IWM_RATE_2M_INDEX < lowest_present_cck)
5762 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5763 /* 1M already there or needed so always add */
5764 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5765
5766 *cck_rates = cck;
5767 *ofdm_rates = ofdm;
5768 }
5769
5770 static void
5771 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5772 struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5773 {
5774 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5775 struct ieee80211com *ic = &sc->sc_ic;
5776 struct ieee80211_node *ni = ic->ic_bss;
5777 int cck_ack_rates, ofdm_ack_rates;
5778 int i;
5779
5780 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5781 in->in_color));
5782 cmd->action = htole32(action);
5783
5784 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5785 cmd->tsf_id = htole32(IWM_TSF_ID_A);
5786
5787 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5788 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5789
5790 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5791 cmd->cck_rates = htole32(cck_ack_rates);
5792 cmd->ofdm_rates = htole32(ofdm_ack_rates);
5793
5794 cmd->cck_short_preamble
5795 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5796 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5797 cmd->short_slot
5798 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5799 ? IWM_MAC_FLG_SHORT_SLOT : 0);
5800
5801 for (i = 0; i < WME_NUM_AC; i++) {
5802 struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5803 int txf = iwm_ac_to_tx_fifo[i];
5804
5805 cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5806 cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5807 cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5808 cmd->ac[txf].fifos_mask = (1 << txf);
5809 cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5810 }
5811 if (ni->ni_flags & IEEE80211_NODE_QOS)
5812 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5813
5814 #ifndef IEEE80211_NO_HT
5815 if (ni->ni_flags & IEEE80211_NODE_HT) {
5816 enum ieee80211_htprot htprot =
5817 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5818 switch (htprot) {
5819 case IEEE80211_HTPROT_NONE:
5820 break;
5821 case IEEE80211_HTPROT_NONMEMBER:
5822 case IEEE80211_HTPROT_NONHT_MIXED:
5823 cmd->protection_flags |=
5824 htole32(IWM_MAC_PROT_FLG_HT_PROT);
5825 case IEEE80211_HTPROT_20MHZ:
5826 cmd->protection_flags |=
5827 htole32(IWM_MAC_PROT_FLG_HT_PROT |
5828 IWM_MAC_PROT_FLG_FAT_PROT);
5829 break;
5830 default:
5831 break;
5832 }
5833
5834 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5835 }
5836 #endif
5837
5838 if (ic->ic_flags & IEEE80211_F_USEPROT)
5839 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5840
5841 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5842 #undef IWM_EXP2
5843 }
5844
5845 static void
5846 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5847 struct iwm_mac_data_sta *sta, int assoc)
5848 {
5849 struct ieee80211_node *ni = &in->in_ni;
5850 uint32_t dtim_off;
5851 uint64_t tsf;
5852
5853 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5854 tsf = le64toh(ni->ni_tstamp.tsf);
5855
5856 sta->is_assoc = htole32(assoc);
5857 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5858 sta->dtim_tsf = htole64(tsf + dtim_off);
5859 sta->bi = htole32(ni->ni_intval);
5860 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5861 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5862 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5863 sta->listen_interval = htole32(10);
5864 sta->assoc_id = htole32(ni->ni_associd);
5865 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5866 }
5867
5868 static int
5869 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5870 int assoc)
5871 {
5872 struct ieee80211_node *ni = &in->in_ni;
5873 struct iwm_mac_ctx_cmd cmd;
5874
5875 memset(&cmd, 0, sizeof(cmd));
5876
5877 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5878
5879 /* Allow beacons to pass through as long as we are not associated or we
5880 * do not have dtim period information */
5881 if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5882 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5883 else
5884 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5885
5886 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5887 }
5888
5889 #define IWM_MISSED_BEACONS_THRESHOLD 8
5890
5891 static void
5892 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5893 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5894 {
5895 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5896 int s;
5897
5898 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5899 le32toh(mb->mac_id),
5900 le32toh(mb->consec_missed_beacons),
5901 le32toh(mb->consec_missed_beacons_since_last_rx),
5902 le32toh(mb->num_recvd_beacons),
5903 le32toh(mb->num_expected_beacons)));
5904
5905 /*
5906 * TODO: the threshold should be adjusted based on latency conditions,
5907 * and/or in case of a CS flow on one of the other AP vifs.
5908 */
5909 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5910 IWM_MISSED_BEACONS_THRESHOLD) {
5911 s = splnet();
5912 ieee80211_beacon_miss(&sc->sc_ic);
5913 splx(s);
5914 }
5915 }
5916
5917 static int
5918 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5919 {
5920 struct iwm_time_quota_cmd cmd;
5921 int i, idx, num_active_macs, quota, quota_rem;
5922 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5923 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5924 uint16_t id;
5925
5926 memset(&cmd, 0, sizeof(cmd));
5927
5928 /* currently, PHY ID == binding ID */
5929 if (in) {
5930 id = in->in_phyctxt->id;
5931 KASSERT(id < IWM_MAX_BINDINGS);
5932 colors[id] = in->in_phyctxt->color;
5933
5934 if (1)
5935 n_ifs[id] = 1;
5936 }
5937
5938 /*
5939 * The FW's scheduling session consists of
5940 * IWM_MAX_QUOTA fragments. Divide these fragments
5941 * equally between all the bindings that require quota
5942 */
5943 num_active_macs = 0;
5944 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5945 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5946 num_active_macs += n_ifs[i];
5947 }
5948
5949 quota = 0;
5950 quota_rem = 0;
5951 if (num_active_macs) {
5952 quota = IWM_MAX_QUOTA / num_active_macs;
5953 quota_rem = IWM_MAX_QUOTA % num_active_macs;
5954 }
5955
5956 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5957 if (colors[i] < 0)
5958 continue;
5959
5960 cmd.quotas[idx].id_and_color =
5961 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5962
5963 if (n_ifs[i] <= 0) {
5964 cmd.quotas[idx].quota = htole32(0);
5965 cmd.quotas[idx].max_duration = htole32(0);
5966 } else {
5967 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5968 cmd.quotas[idx].max_duration = htole32(0);
5969 }
5970 idx++;
5971 }
5972
5973 /* Give the remainder of the session to the first binding */
5974 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5975
5976 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5977 }
5978
5979 static int
5980 iwm_auth(struct iwm_softc *sc)
5981 {
5982 struct ieee80211com *ic = &sc->sc_ic;
5983 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5984 uint32_t duration;
5985 int err;
5986
5987 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5988 if (err)
5989 return err;
5990
5991 err = iwm_allow_mcast(sc);
5992 if (err)
5993 return err;
5994
5995 sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5996 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5997 IWM_FW_CTXT_ACTION_MODIFY, 0);
5998 if (err)
5999 return err;
6000 in->in_phyctxt = &sc->sc_phyctxt[0];
6001
6002 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
6003 if (err) {
6004 aprint_error_dev(sc->sc_dev,
6005 "could not add MAC context (error %d)\n", err);
6006 return err;
6007 }
6008
6009 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
6010 if (err)
6011 return err;
6012
6013 err = iwm_add_sta_cmd(sc, in, 0);
6014 if (err)
6015 return err;
6016
6017 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
6018 if (err) {
6019 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6020 return err;
6021 }
6022
6023 /*
6024 * Prevent the FW from wandering off channel during association
6025 * by "protecting" the session with a time event.
6026 */
6027 if (in->in_ni.ni_intval)
6028 duration = in->in_ni.ni_intval * 2;
6029 else
6030 duration = IEEE80211_DUR_TU;
6031 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
6032 DELAY(100);
6033
6034 return 0;
6035 }
6036
6037 static int
6038 iwm_assoc(struct iwm_softc *sc)
6039 {
6040 struct ieee80211com *ic = &sc->sc_ic;
6041 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6042 int err;
6043
6044 err = iwm_add_sta_cmd(sc, in, 1);
6045 if (err)
6046 return err;
6047
6048 return 0;
6049 }
6050
6051 static struct ieee80211_node *
6052 iwm_node_alloc(struct ieee80211_node_table *nt)
6053 {
6054 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
6055 }
6056
6057 static void
6058 iwm_calib_timeout(void *arg)
6059 {
6060 struct iwm_softc *sc = arg;
6061 struct ieee80211com *ic = &sc->sc_ic;
6062 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6063 #ifndef IEEE80211_NO_HT
6064 struct ieee80211_node *ni = &in->in_ni;
6065 int otxrate;
6066 #endif
6067 int s;
6068
6069 s = splnet();
6070 if ((ic->ic_fixed_rate == -1
6071 #ifndef IEEE80211_NO_HT
6072 || ic->ic_fixed_mcs == -1
6073 #endif
6074 ) &&
6075 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
6076 #ifndef IEEE80211_NO_HT
6077 if (ni->ni_flags & IEEE80211_NODE_HT)
6078 otxrate = ni->ni_txmcs;
6079 else
6080 otxrate = ni->ni_txrate;
6081 #endif
6082 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6083
6084 #ifndef IEEE80211_NO_HT
6085 /*
6086 * If AMRR has chosen a new TX rate we must update
6087 * the firwmare's LQ rate table from process context.
6088 */
6089 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6090 otxrate != ni->ni_txmcs)
6091 softint_schedule(sc->setrates_task);
6092 else if (otxrate != ni->ni_txrate)
6093 softint_schedule(sc->setrates_task);
6094 #endif
6095 }
6096 splx(s);
6097
6098 callout_schedule(&sc->sc_calib_to, mstohz(500));
6099 }
6100
6101 #ifndef IEEE80211_NO_HT
6102 static void
6103 iwm_setrates_task(void *arg)
6104 {
6105 struct iwm_softc *sc = arg;
6106 struct ieee80211com *ic = &sc->sc_ic;
6107 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6108
6109 /* Update rates table based on new TX rate determined by AMRR. */
6110 iwm_setrates(in);
6111 }
6112
6113 static int
6114 iwm_setrates(struct iwm_node *in)
6115 {
6116 struct ieee80211_node *ni = &in->in_ni;
6117 struct ieee80211com *ic = ni->ni_ic;
6118 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6119 struct iwm_lq_cmd *lq = &in->in_lq;
6120 struct ieee80211_rateset *rs = &ni->ni_rates;
6121 int i, j, ridx, ridx_min, tab = 0;
6122 #ifndef IEEE80211_NO_HT
6123 int sgi_ok;
6124 #endif
6125 struct iwm_host_cmd cmd = {
6126 .id = IWM_LQ_CMD,
6127 .len = { sizeof(in->in_lq), },
6128 };
6129
6130 memset(lq, 0, sizeof(*lq));
6131 lq->sta_id = IWM_STATION_ID;
6132
6133 if (ic->ic_flags & IEEE80211_F_USEPROT)
6134 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6135
6136 #ifndef IEEE80211_NO_HT
6137 sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
6138 (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
6139 #endif
6140
6141
6142 /*
6143 * Fill the LQ rate selection table with legacy and/or HT rates
6144 * in descending order, i.e. with the node's current TX rate first.
6145 * In cases where throughput of an HT rate corresponds to a legacy
6146 * rate it makes no sense to add both. We rely on the fact that
6147 * iwm_rates is laid out such that equivalent HT/legacy rates share
6148 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6149 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6150 */
6151 j = 0;
6152 ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6153 IWM_RIDX_OFDM : IWM_RIDX_CCK;
6154 for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
6155 if (j >= __arraycount(lq->rs_table))
6156 break;
6157 tab = 0;
6158 #ifndef IEEE80211_NO_HT
6159 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6160 iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6161 for (i = ni->ni_txmcs; i >= 0; i--) {
6162 if (isclr(ni->ni_rxmcs, i))
6163 continue;
6164 if (ridx == iwm_mcs2ridx[i]) {
6165 tab = iwm_rates[ridx].ht_plcp;
6166 tab |= IWM_RATE_MCS_HT_MSK;
6167 if (sgi_ok)
6168 tab |= IWM_RATE_MCS_SGI_MSK;
6169 break;
6170 }
6171 }
6172 }
6173 #endif
6174 if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
6175 for (i = ni->ni_txrate; i >= 0; i--) {
6176 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6177 IEEE80211_RATE_VAL)) {
6178 tab = iwm_rates[ridx].plcp;
6179 break;
6180 }
6181 }
6182 }
6183
6184 if (tab == 0)
6185 continue;
6186
6187 tab |= 1 << IWM_RATE_MCS_ANT_POS;
6188 if (IWM_RIDX_IS_CCK(ridx))
6189 tab |= IWM_RATE_MCS_CCK_MSK;
6190 DPRINTFN(2, ("station rate %d %x\n", i, tab));
6191 lq->rs_table[j++] = htole32(tab);
6192 }
6193
6194 /* Fill the rest with the lowest possible rate */
6195 i = j > 0 ? j - 1 : 0;
6196 while (j < __arraycount(lq->rs_table))
6197 lq->rs_table[j++] = lq->rs_table[i];
6198
6199 lq->single_stream_ant_msk = IWM_ANT_A;
6200 lq->dual_stream_ant_msk = IWM_ANT_AB;
6201
6202 lq->agg_time_limit = htole16(4000); /* 4ms */
6203 lq->agg_disable_start_th = 3;
6204 #ifdef notyet
6205 lq->agg_frame_cnt_limit = 0x3f;
6206 #else
6207 lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6208 #endif
6209
6210 cmd.data[0] = &in->in_lq;
6211 return iwm_send_cmd(sc, &cmd);
6212 }
6213 #endif
6214
6215 static int
6216 iwm_media_change(struct ifnet *ifp)
6217 {
6218 struct iwm_softc *sc = ifp->if_softc;
6219 struct ieee80211com *ic = &sc->sc_ic;
6220 uint8_t rate, ridx;
6221 int err;
6222
6223 err = ieee80211_media_change(ifp);
6224 if (err != ENETRESET)
6225 return err;
6226
6227 #ifndef IEEE80211_NO_HT
6228 if (ic->ic_fixed_mcs != -1)
6229 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6230 else
6231 #endif
6232 if (ic->ic_fixed_rate != -1) {
6233 rate = ic->ic_sup_rates[ic->ic_curmode].
6234 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6235 /* Map 802.11 rate to HW rate index. */
6236 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6237 if (iwm_rates[ridx].rate == rate)
6238 break;
6239 sc->sc_fixed_ridx = ridx;
6240 }
6241
6242 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6243 (IFF_UP | IFF_RUNNING)) {
6244 iwm_stop(ifp, 0);
6245 err = iwm_init(ifp);
6246 }
6247 return err;
6248 }
6249
6250 static int
6251 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6252 {
6253 struct ifnet *ifp = IC2IFP(ic);
6254 struct iwm_softc *sc = ifp->if_softc;
6255 enum ieee80211_state ostate = ic->ic_state;
6256 struct iwm_node *in;
6257 int err;
6258
6259 DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
6260 ieee80211_state_name[nstate]));
6261
6262 if (ostate == IEEE80211_S_SCAN && nstate != ostate)
6263 iwm_led_blink_stop(sc);
6264
6265 if (ostate == IEEE80211_S_RUN && nstate != ostate)
6266 iwm_disable_beacon_filter(sc);
6267
6268 /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
6269 /* XXX Is there a way to switch states without a full reset? */
6270 if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
6271 /*
6272 * Upon receiving a deauth frame from AP the net80211 stack
6273 * puts the driver into AUTH state. This will fail with this
6274 * driver so bring the FSM from RUN to SCAN in this case.
6275 */
6276 if (nstate != IEEE80211_S_INIT) {
6277 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
6278 /* Always pass arg as -1 since we can't Tx right now. */
6279 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6280 iwm_stop(ifp, 0);
6281 iwm_init(ifp);
6282 return 0;
6283 }
6284
6285 iwm_stop_device(sc);
6286 iwm_init_hw(sc);
6287 }
6288
6289 switch (nstate) {
6290 case IEEE80211_S_INIT:
6291 break;
6292
6293 case IEEE80211_S_SCAN:
6294 if (ostate == nstate &&
6295 ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
6296 return 0;
6297 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6298 err = iwm_umac_scan(sc);
6299 else
6300 err = iwm_lmac_scan(sc);
6301 if (err) {
6302 DPRINTF(("%s: could not initiate scan: %d\n",
6303 DEVNAME(sc), err));
6304 return err;
6305 }
6306 SET(sc->sc_flags, IWM_FLAG_SCANNING);
6307 ic->ic_state = nstate;
6308 iwm_led_blink_start(sc);
6309 return 0;
6310
6311 case IEEE80211_S_AUTH:
6312 err = iwm_auth(sc);
6313 if (err) {
6314 DPRINTF(("%s: could not move to auth state: %d\n",
6315 DEVNAME(sc), err));
6316 return err;
6317 }
6318 break;
6319
6320 case IEEE80211_S_ASSOC:
6321 err = iwm_assoc(sc);
6322 if (err) {
6323 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
6324 err));
6325 return err;
6326 }
6327 break;
6328
6329 case IEEE80211_S_RUN:
6330 in = (struct iwm_node *)ic->ic_bss;
6331
6332 /* We have now been assigned an associd by the AP. */
6333 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6334 if (err) {
6335 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6336 return err;
6337 }
6338
6339 err = iwm_power_update_device(sc);
6340 if (err) {
6341 aprint_error_dev(sc->sc_dev,
6342 "could send power command (error %d)\n", err);
6343 return err;
6344 }
6345 #ifdef notyet
6346 /*
6347 * Disabled for now. Default beacon filter settings
6348 * prevent net80211 from getting ERP and HT protection
6349 * updates from beacons.
6350 */
6351 err = iwm_enable_beacon_filter(sc, in);
6352 if (err) {
6353 aprint_error_dev(sc->sc_dev,
6354 "could not enable beacon filter\n");
6355 return err;
6356 }
6357 #endif
6358 err = iwm_power_mac_update_mode(sc, in);
6359 if (err) {
6360 aprint_error_dev(sc->sc_dev,
6361 "could not update MAC power (error %d)\n", err);
6362 return err;
6363 }
6364
6365 err = iwm_update_quotas(sc, in);
6366 if (err) {
6367 aprint_error_dev(sc->sc_dev,
6368 "could not update quotas (error %d)\n", err);
6369 return err;
6370 }
6371
6372 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6373
6374 /* Start at lowest available bit-rate, AMRR will raise. */
6375 in->in_ni.ni_txrate = 0;
6376 #ifndef IEEE80211_NO_HT
6377 in->in_ni.ni_txmcs = 0;
6378 iwm_setrates(in);
6379 #endif
6380
6381 callout_schedule(&sc->sc_calib_to, mstohz(500));
6382 iwm_led_enable(sc);
6383 break;
6384
6385 default:
6386 break;
6387 }
6388
6389 return sc->sc_newstate(ic, nstate, arg);
6390 }
6391
6392 static void
6393 iwm_newstate_cb(struct work *wk, void *v)
6394 {
6395 struct iwm_softc *sc = v;
6396 struct ieee80211com *ic = &sc->sc_ic;
6397 struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
6398 enum ieee80211_state nstate = iwmns->ns_nstate;
6399 int generation = iwmns->ns_generation;
6400 int arg = iwmns->ns_arg;
6401 int s;
6402
6403 kmem_free(iwmns, sizeof(*iwmns));
6404
6405 s = splnet();
6406
6407 DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6408 if (sc->sc_generation != generation) {
6409 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6410 if (nstate == IEEE80211_S_INIT) {
6411 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6412 "calling sc_newstate()\n"));
6413 (void) sc->sc_newstate(ic, nstate, arg);
6414 }
6415 } else
6416 (void) iwm_do_newstate(ic, nstate, arg);
6417
6418 splx(s);
6419 }
6420
6421 static int
6422 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6423 {
6424 struct iwm_newstate_state *iwmns;
6425 struct ifnet *ifp = IC2IFP(ic);
6426 struct iwm_softc *sc = ifp->if_softc;
6427
6428 callout_stop(&sc->sc_calib_to);
6429
6430 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6431 if (!iwmns) {
6432 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6433 return ENOMEM;
6434 }
6435
6436 iwmns->ns_nstate = nstate;
6437 iwmns->ns_arg = arg;
6438 iwmns->ns_generation = sc->sc_generation;
6439
6440 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6441
6442 return 0;
6443 }
6444
6445 static void
6446 iwm_endscan(struct iwm_softc *sc)
6447 {
6448 struct ieee80211com *ic = &sc->sc_ic;
6449 int s;
6450
6451 DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6452
6453 s = splnet();
6454 if (ic->ic_state == IEEE80211_S_SCAN)
6455 ieee80211_end_scan(ic);
6456 splx(s);
6457 }
6458
6459 /*
6460 * Aging and idle timeouts for the different possible scenarios
6461 * in default configuration
6462 */
6463 static const uint32_t
6464 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6465 {
6466 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6467 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6468 },
6469 {
6470 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6471 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6472 },
6473 {
6474 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6475 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6476 },
6477 {
6478 htole32(IWM_SF_BA_AGING_TIMER_DEF),
6479 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6480 },
6481 {
6482 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6483 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6484 },
6485 };
6486
6487 /*
6488 * Aging and idle timeouts for the different possible scenarios
6489 * in single BSS MAC configuration.
6490 */
6491 static const uint32_t
6492 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6493 {
6494 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6495 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6496 },
6497 {
6498 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6499 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6500 },
6501 {
6502 htole32(IWM_SF_MCAST_AGING_TIMER),
6503 htole32(IWM_SF_MCAST_IDLE_TIMER)
6504 },
6505 {
6506 htole32(IWM_SF_BA_AGING_TIMER),
6507 htole32(IWM_SF_BA_IDLE_TIMER)
6508 },
6509 {
6510 htole32(IWM_SF_TX_RE_AGING_TIMER),
6511 htole32(IWM_SF_TX_RE_IDLE_TIMER)
6512 },
6513 };
6514
6515 static void
6516 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6517 struct ieee80211_node *ni)
6518 {
6519 int i, j, watermark;
6520
6521 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6522
6523 /*
6524 * If we are in association flow - check antenna configuration
6525 * capabilities of the AP station, and choose the watermark accordingly.
6526 */
6527 if (ni) {
6528 #ifndef IEEE80211_NO_HT
6529 if (ni->ni_flags & IEEE80211_NODE_HT) {
6530 #ifdef notyet
6531 if (ni->ni_rxmcs[2] != 0)
6532 watermark = IWM_SF_W_MARK_MIMO3;
6533 else if (ni->ni_rxmcs[1] != 0)
6534 watermark = IWM_SF_W_MARK_MIMO2;
6535 else
6536 #endif
6537 watermark = IWM_SF_W_MARK_SISO;
6538 } else
6539 #endif
6540 watermark = IWM_SF_W_MARK_LEGACY;
6541 /* default watermark value for unassociated mode. */
6542 } else {
6543 watermark = IWM_SF_W_MARK_MIMO2;
6544 }
6545 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6546
6547 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6548 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6549 sf_cmd->long_delay_timeouts[i][j] =
6550 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6551 }
6552 }
6553
6554 if (ni) {
6555 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6556 sizeof(iwm_sf_full_timeout));
6557 } else {
6558 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6559 sizeof(iwm_sf_full_timeout_def));
6560 }
6561 }
6562
6563 static int
6564 iwm_sf_config(struct iwm_softc *sc, int new_state)
6565 {
6566 struct ieee80211com *ic = &sc->sc_ic;
6567 struct iwm_sf_cfg_cmd sf_cmd = {
6568 .state = htole32(IWM_SF_FULL_ON),
6569 };
6570
6571 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6572 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6573
6574 switch (new_state) {
6575 case IWM_SF_UNINIT:
6576 case IWM_SF_INIT_OFF:
6577 iwm_fill_sf_command(sc, &sf_cmd, NULL);
6578 break;
6579 case IWM_SF_FULL_ON:
6580 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6581 break;
6582 default:
6583 return EINVAL;
6584 }
6585
6586 return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6587 sizeof(sf_cmd), &sf_cmd);
6588 }
6589
6590 static int
6591 iwm_send_bt_init_conf(struct iwm_softc *sc)
6592 {
6593 struct iwm_bt_coex_cmd bt_cmd;
6594
6595 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6596 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6597
6598 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6599 }
6600
6601 static bool
6602 iwm_is_lar_supported(struct iwm_softc *sc)
6603 {
6604 bool nvm_lar = sc->sc_nvm.lar_enabled;
6605 bool tlv_lar = isset(sc->sc_enabled_capa,
6606 IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6607
6608 if (iwm_lar_disable)
6609 return false;
6610
6611 /*
6612 * Enable LAR only if it is supported by the FW (TLV) &&
6613 * enabled in the NVM
6614 */
6615 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6616 return nvm_lar && tlv_lar;
6617 else
6618 return tlv_lar;
6619 }
6620
6621 static int
6622 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6623 {
6624 struct iwm_mcc_update_cmd mcc_cmd;
6625 struct iwm_host_cmd hcmd = {
6626 .id = IWM_MCC_UPDATE_CMD,
6627 .flags = IWM_CMD_WANT_SKB,
6628 .data = { &mcc_cmd },
6629 };
6630 int err;
6631 int resp_v2 = isset(sc->sc_enabled_capa,
6632 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6633
6634 if (!iwm_is_lar_supported(sc)) {
6635 DPRINTF(("%s: no LAR support\n", __func__));
6636 return 0;
6637 }
6638
6639 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6640 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6641 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6642 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6643 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6644 else
6645 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6646
6647 if (resp_v2)
6648 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6649 else
6650 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6651
6652 err = iwm_send_cmd(sc, &hcmd);
6653 if (err)
6654 return err;
6655
6656 iwm_free_resp(sc, &hcmd);
6657
6658 return 0;
6659 }
6660
6661 static void
6662 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6663 {
6664 struct iwm_host_cmd cmd = {
6665 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6666 .len = { sizeof(uint32_t), },
6667 .data = { &backoff, },
6668 };
6669
6670 iwm_send_cmd(sc, &cmd);
6671 }
6672
6673 static int
6674 iwm_init_hw(struct iwm_softc *sc)
6675 {
6676 struct ieee80211com *ic = &sc->sc_ic;
6677 int err, i, ac;
6678
6679 err = iwm_preinit(sc);
6680 if (err)
6681 return err;
6682
6683 err = iwm_start_hw(sc);
6684 if (err) {
6685 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6686 return err;
6687 }
6688
6689 err = iwm_run_init_mvm_ucode(sc, 0);
6690 if (err)
6691 return err;
6692
6693 /* Should stop and start HW since INIT image just loaded. */
6694 iwm_stop_device(sc);
6695 err = iwm_start_hw(sc);
6696 if (err) {
6697 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6698 return err;
6699 }
6700
6701 /* Restart, this time with the regular firmware */
6702 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6703 if (err) {
6704 aprint_error_dev(sc->sc_dev,
6705 "could not load firmware (error %d)\n", err);
6706 goto err;
6707 }
6708
6709 err = iwm_send_bt_init_conf(sc);
6710 if (err) {
6711 aprint_error_dev(sc->sc_dev,
6712 "could not init bt coex (error %d)\n", err);
6713 goto err;
6714 }
6715
6716 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6717 if (err) {
6718 aprint_error_dev(sc->sc_dev,
6719 "could not init tx ant config (error %d)\n", err);
6720 goto err;
6721 }
6722
6723 /* Send phy db control command and then phy db calibration*/
6724 err = iwm_send_phy_db_data(sc);
6725 if (err) {
6726 aprint_error_dev(sc->sc_dev,
6727 "could not init phy db (error %d)\n", err);
6728 goto err;
6729 }
6730
6731 err = iwm_send_phy_cfg_cmd(sc);
6732 if (err) {
6733 aprint_error_dev(sc->sc_dev,
6734 "could not send phy config (error %d)\n", err);
6735 goto err;
6736 }
6737
6738 /* Add auxiliary station for scanning */
6739 err = iwm_add_aux_sta(sc);
6740 if (err) {
6741 aprint_error_dev(sc->sc_dev,
6742 "could not add aux station (error %d)\n", err);
6743 goto err;
6744 }
6745
6746 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6747 /*
6748 * The channel used here isn't relevant as it's
6749 * going to be overwritten in the other flows.
6750 * For now use the first channel we have.
6751 */
6752 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6753 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6754 IWM_FW_CTXT_ACTION_ADD, 0);
6755 if (err) {
6756 aprint_error_dev(sc->sc_dev,
6757 "could not add phy context %d (error %d)\n",
6758 i, err);
6759 goto err;
6760 }
6761 }
6762
6763 /* Initialize tx backoffs to the minimum. */
6764 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6765 iwm_tt_tx_backoff(sc, 0);
6766
6767 err = iwm_power_update_device(sc);
6768 if (err) {
6769 aprint_error_dev(sc->sc_dev,
6770 "could send power command (error %d)\n", err);
6771 goto err;
6772 }
6773
6774 err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6775 if (err) {
6776 aprint_error_dev(sc->sc_dev,
6777 "could not init LAR (error %d)\n", err);
6778 goto err;
6779 }
6780
6781 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6782 err = iwm_config_umac_scan(sc);
6783 if (err) {
6784 aprint_error_dev(sc->sc_dev,
6785 "could not configure scan (error %d)\n", err);
6786 goto err;
6787 }
6788 }
6789
6790 for (ac = 0; ac < WME_NUM_AC; ac++) {
6791 err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6792 iwm_ac_to_tx_fifo[ac]);
6793 if (err) {
6794 aprint_error_dev(sc->sc_dev,
6795 "could not enable Tx queue %d (error %d)\n",
6796 i, err);
6797 goto err;
6798 }
6799 }
6800
6801 err = iwm_disable_beacon_filter(sc);
6802 if (err) {
6803 aprint_error_dev(sc->sc_dev,
6804 "could not disable beacon filter (error %d)\n", err);
6805 goto err;
6806 }
6807
6808 return 0;
6809
6810 err:
6811 iwm_stop_device(sc);
6812 return err;
6813 }
6814
6815 /* Allow multicast from our BSSID. */
6816 static int
6817 iwm_allow_mcast(struct iwm_softc *sc)
6818 {
6819 struct ieee80211com *ic = &sc->sc_ic;
6820 struct ieee80211_node *ni = ic->ic_bss;
6821 struct iwm_mcast_filter_cmd *cmd;
6822 size_t size;
6823 int err;
6824
6825 size = roundup(sizeof(*cmd), 4);
6826 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6827 if (cmd == NULL)
6828 return ENOMEM;
6829 cmd->filter_own = 1;
6830 cmd->port_id = 0;
6831 cmd->count = 0;
6832 cmd->pass_all = 1;
6833 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6834
6835 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6836 kmem_intr_free(cmd, size);
6837 return err;
6838 }
6839
6840 static int
6841 iwm_init(struct ifnet *ifp)
6842 {
6843 struct iwm_softc *sc = ifp->if_softc;
6844 int err;
6845
6846 if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6847 return 0;
6848
6849 sc->sc_generation++;
6850 sc->sc_flags &= ~IWM_FLAG_STOPPED;
6851
6852 err = iwm_init_hw(sc);
6853 if (err) {
6854 iwm_stop(ifp, 1);
6855 return err;
6856 }
6857
6858 ifp->if_flags &= ~IFF_OACTIVE;
6859 ifp->if_flags |= IFF_RUNNING;
6860
6861 ieee80211_begin_scan(&sc->sc_ic, 0);
6862 SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6863
6864 return 0;
6865 }
6866
6867 static void
6868 iwm_start(struct ifnet *ifp)
6869 {
6870 struct iwm_softc *sc = ifp->if_softc;
6871 struct ieee80211com *ic = &sc->sc_ic;
6872 struct ieee80211_node *ni;
6873 struct ether_header *eh;
6874 struct mbuf *m;
6875 int ac;
6876
6877 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6878 return;
6879
6880 for (;;) {
6881 /* why isn't this done per-queue? */
6882 if (sc->qfullmsk != 0) {
6883 ifp->if_flags |= IFF_OACTIVE;
6884 break;
6885 }
6886
6887 /* need to send management frames even if we're not RUNning */
6888 IF_DEQUEUE(&ic->ic_mgtq, m);
6889 if (m) {
6890 ni = M_GETCTX(m, struct ieee80211_node *);
6891 m->m_pkthdr.rcvif = NULL;
6892 ac = WME_AC_BE;
6893 goto sendit;
6894 }
6895 if (ic->ic_state != IEEE80211_S_RUN) {
6896 break;
6897 }
6898
6899 IFQ_DEQUEUE(&ifp->if_snd, m);
6900 if (m == NULL)
6901 break;
6902
6903 if (m->m_len < sizeof (*eh) &&
6904 (m = m_pullup(m, sizeof (*eh))) == NULL) {
6905 ifp->if_oerrors++;
6906 continue;
6907 }
6908
6909 eh = mtod(m, struct ether_header *);
6910 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6911 if (ni == NULL) {
6912 m_freem(m);
6913 ifp->if_oerrors++;
6914 continue;
6915 }
6916
6917 /* classify mbuf so we can find which tx ring to use */
6918 if (ieee80211_classify(ic, m, ni) != 0) {
6919 m_freem(m);
6920 ieee80211_free_node(ni);
6921 ifp->if_oerrors++;
6922 continue;
6923 }
6924
6925 /* No QoS encapsulation for EAPOL frames. */
6926 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6927 M_WME_GETAC(m) : WME_AC_BE;
6928
6929 bpf_mtap(ifp, m);
6930
6931 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6932 ieee80211_free_node(ni);
6933 ifp->if_oerrors++;
6934 continue;
6935 }
6936
6937 sendit:
6938 bpf_mtap3(ic->ic_rawbpf, m);
6939
6940 if (iwm_tx(sc, m, ni, ac) != 0) {
6941 ieee80211_free_node(ni);
6942 ifp->if_oerrors++;
6943 continue;
6944 }
6945
6946 if (ifp->if_flags & IFF_UP) {
6947 sc->sc_tx_timer = 15;
6948 ifp->if_timer = 1;
6949 }
6950 }
6951 }
6952
6953 static void
6954 iwm_stop(struct ifnet *ifp, int disable)
6955 {
6956 struct iwm_softc *sc = ifp->if_softc;
6957 struct ieee80211com *ic = &sc->sc_ic;
6958 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6959
6960 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6961 sc->sc_flags |= IWM_FLAG_STOPPED;
6962 sc->sc_generation++;
6963 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6964
6965 if (in)
6966 in->in_phyctxt = NULL;
6967
6968 if (ic->ic_state != IEEE80211_S_INIT)
6969 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6970
6971 callout_stop(&sc->sc_calib_to);
6972 iwm_led_blink_stop(sc);
6973 ifp->if_timer = sc->sc_tx_timer = 0;
6974 iwm_stop_device(sc);
6975 }
6976
6977 static void
6978 iwm_watchdog(struct ifnet *ifp)
6979 {
6980 struct iwm_softc *sc = ifp->if_softc;
6981
6982 ifp->if_timer = 0;
6983 if (sc->sc_tx_timer > 0) {
6984 if (--sc->sc_tx_timer == 0) {
6985 aprint_error_dev(sc->sc_dev, "device timeout\n");
6986 #ifdef IWM_DEBUG
6987 iwm_nic_error(sc);
6988 #endif
6989 ifp->if_flags &= ~IFF_UP;
6990 iwm_stop(ifp, 1);
6991 ifp->if_oerrors++;
6992 return;
6993 }
6994 ifp->if_timer = 1;
6995 }
6996
6997 ieee80211_watchdog(&sc->sc_ic);
6998 }
6999
7000 static int
7001 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
7002 {
7003 struct iwm_softc *sc = ifp->if_softc;
7004 struct ieee80211com *ic = &sc->sc_ic;
7005 const struct sockaddr *sa;
7006 int s, err = 0;
7007
7008 s = splnet();
7009
7010 switch (cmd) {
7011 case SIOCSIFADDR:
7012 ifp->if_flags |= IFF_UP;
7013 /* FALLTHROUGH */
7014 case SIOCSIFFLAGS:
7015 err = ifioctl_common(ifp, cmd, data);
7016 if (err)
7017 break;
7018 if (ifp->if_flags & IFF_UP) {
7019 if (!(ifp->if_flags & IFF_RUNNING)) {
7020 err = iwm_init(ifp);
7021 if (err)
7022 ifp->if_flags &= ~IFF_UP;
7023 }
7024 } else {
7025 if (ifp->if_flags & IFF_RUNNING)
7026 iwm_stop(ifp, 1);
7027 }
7028 break;
7029
7030 case SIOCADDMULTI:
7031 case SIOCDELMULTI:
7032 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
7033 err = ENXIO;
7034 break;
7035 }
7036 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
7037 err = (cmd == SIOCADDMULTI) ?
7038 ether_addmulti(sa, &sc->sc_ec) :
7039 ether_delmulti(sa, &sc->sc_ec);
7040 if (err == ENETRESET)
7041 err = 0;
7042 break;
7043
7044 default:
7045 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
7046 err = ether_ioctl(ifp, cmd, data);
7047 break;
7048 }
7049 err = ieee80211_ioctl(ic, cmd, data);
7050 break;
7051 }
7052
7053 if (err == ENETRESET) {
7054 err = 0;
7055 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7056 (IFF_UP | IFF_RUNNING)) {
7057 iwm_stop(ifp, 0);
7058 err = iwm_init(ifp);
7059 }
7060 }
7061
7062 splx(s);
7063 return err;
7064 }
7065
7066 /*
7067 * Note: This structure is read from the device with IO accesses,
7068 * and the reading already does the endian conversion. As it is
7069 * read with uint32_t-sized accesses, any members with a different size
7070 * need to be ordered correctly though!
7071 */
7072 struct iwm_error_event_table {
7073 uint32_t valid; /* (nonzero) valid, (0) log is empty */
7074 uint32_t error_id; /* type of error */
7075 uint32_t trm_hw_status0; /* TRM HW status */
7076 uint32_t trm_hw_status1; /* TRM HW status */
7077 uint32_t blink2; /* branch link */
7078 uint32_t ilink1; /* interrupt link */
7079 uint32_t ilink2; /* interrupt link */
7080 uint32_t data1; /* error-specific data */
7081 uint32_t data2; /* error-specific data */
7082 uint32_t data3; /* error-specific data */
7083 uint32_t bcon_time; /* beacon timer */
7084 uint32_t tsf_low; /* network timestamp function timer */
7085 uint32_t tsf_hi; /* network timestamp function timer */
7086 uint32_t gp1; /* GP1 timer register */
7087 uint32_t gp2; /* GP2 timer register */
7088 uint32_t fw_rev_type; /* firmware revision type */
7089 uint32_t major; /* uCode version major */
7090 uint32_t minor; /* uCode version minor */
7091 uint32_t hw_ver; /* HW Silicon version */
7092 uint32_t brd_ver; /* HW board version */
7093 uint32_t log_pc; /* log program counter */
7094 uint32_t frame_ptr; /* frame pointer */
7095 uint32_t stack_ptr; /* stack pointer */
7096 uint32_t hcmd; /* last host command header */
7097 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
7098 * rxtx_flag */
7099 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
7100 * host_flag */
7101 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
7102 * enc_flag */
7103 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
7104 * time_flag */
7105 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
7106 * wico interrupt */
7107 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
7108 uint32_t wait_event; /* wait event() caller address */
7109 uint32_t l2p_control; /* L2pControlField */
7110 uint32_t l2p_duration; /* L2pDurationField */
7111 uint32_t l2p_mhvalid; /* L2pMhValidBits */
7112 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
7113 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
7114 * (LMPM_PMG_SEL) */
7115 uint32_t u_timestamp; /* indicate when the date and time of the
7116 * compilation */
7117 uint32_t flow_handler; /* FH read/write pointers, RX credit */
7118 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7119
7120 /*
7121 * UMAC error struct - relevant starting from family 8000 chip.
7122 * Note: This structure is read from the device with IO accesses,
7123 * and the reading already does the endian conversion. As it is
7124 * read with u32-sized accesses, any members with a different size
7125 * need to be ordered correctly though!
7126 */
7127 struct iwm_umac_error_event_table {
7128 uint32_t valid; /* (nonzero) valid, (0) log is empty */
7129 uint32_t error_id; /* type of error */
7130 uint32_t blink1; /* branch link */
7131 uint32_t blink2; /* branch link */
7132 uint32_t ilink1; /* interrupt link */
7133 uint32_t ilink2; /* interrupt link */
7134 uint32_t data1; /* error-specific data */
7135 uint32_t data2; /* error-specific data */
7136 uint32_t data3; /* error-specific data */
7137 uint32_t umac_major;
7138 uint32_t umac_minor;
7139 uint32_t frame_pointer; /* core register 27 */
7140 uint32_t stack_pointer; /* core register 28 */
7141 uint32_t cmd_header; /* latest host cmd sent to UMAC */
7142 uint32_t nic_isr_pref; /* ISR status register */
7143 } __packed;
7144
7145 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
7146 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
7147
7148 #ifdef IWM_DEBUG
7149 static const struct {
7150 const char *name;
7151 uint8_t num;
7152 } advanced_lookup[] = {
7153 { "NMI_INTERRUPT_WDG", 0x34 },
7154 { "SYSASSERT", 0x35 },
7155 { "UCODE_VERSION_MISMATCH", 0x37 },
7156 { "BAD_COMMAND", 0x38 },
7157 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7158 { "FATAL_ERROR", 0x3D },
7159 { "NMI_TRM_HW_ERR", 0x46 },
7160 { "NMI_INTERRUPT_TRM", 0x4C },
7161 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7162 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7163 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7164 { "NMI_INTERRUPT_HOST", 0x66 },
7165 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
7166 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
7167 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7168 { "ADVANCED_SYSASSERT", 0 },
7169 };
7170
7171 static const char *
7172 iwm_desc_lookup(uint32_t num)
7173 {
7174 int i;
7175
7176 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
7177 if (advanced_lookup[i].num == num)
7178 return advanced_lookup[i].name;
7179
7180 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7181 return advanced_lookup[i].name;
7182 }
7183
7184 /*
7185 * Support for dumping the error log seemed like a good idea ...
7186 * but it's mostly hex junk and the only sensible thing is the
7187 * hw/ucode revision (which we know anyway). Since it's here,
7188 * I'll just leave it in, just in case e.g. the Intel guys want to
7189 * help us decipher some "ADVANCED_SYSASSERT" later.
7190 */
7191 static void
7192 iwm_nic_error(struct iwm_softc *sc)
7193 {
7194 struct iwm_error_event_table t;
7195 uint32_t base;
7196
7197 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
7198 base = sc->sc_uc.uc_error_event_table;
7199 if (base < 0x800000) {
7200 aprint_error_dev(sc->sc_dev,
7201 "Invalid error log pointer 0x%08x\n", base);
7202 return;
7203 }
7204
7205 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7206 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7207 return;
7208 }
7209
7210 if (!t.valid) {
7211 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
7212 return;
7213 }
7214
7215 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7216 aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
7217 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7218 sc->sc_flags, t.valid);
7219 }
7220
7221 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
7222 iwm_desc_lookup(t.error_id));
7223 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
7224 t.trm_hw_status0);
7225 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
7226 t.trm_hw_status1);
7227 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
7228 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
7229 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
7230 aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
7231 aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
7232 aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
7233 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
7234 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
7235 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
7236 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
7237 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
7238 aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
7239 t.fw_rev_type);
7240 aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
7241 t.major);
7242 aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
7243 t.minor);
7244 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
7245 aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
7246 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
7247 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
7248 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
7249 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
7250 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
7251 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
7252 aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
7253 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
7254 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
7255 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
7256 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
7257 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
7258 t.l2p_addr_match);
7259 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
7260 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
7261 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
7262
7263 if (sc->sc_uc.uc_umac_error_event_table)
7264 iwm_nic_umac_error(sc);
7265 }
7266
7267 static void
7268 iwm_nic_umac_error(struct iwm_softc *sc)
7269 {
7270 struct iwm_umac_error_event_table t;
7271 uint32_t base;
7272
7273 base = sc->sc_uc.uc_umac_error_event_table;
7274
7275 if (base < 0x800000) {
7276 aprint_error_dev(sc->sc_dev,
7277 "Invalid error log pointer 0x%08x\n", base);
7278 return;
7279 }
7280
7281 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7282 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7283 return;
7284 }
7285
7286 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7287 aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
7288 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7289 sc->sc_flags, t.valid);
7290 }
7291
7292 aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
7293 iwm_desc_lookup(t.error_id));
7294 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
7295 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
7296 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
7297 t.ilink1);
7298 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
7299 t.ilink2);
7300 aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
7301 aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
7302 aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
7303 aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
7304 aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
7305 aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
7306 t.frame_pointer);
7307 aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
7308 t.stack_pointer);
7309 aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
7310 aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
7311 t.nic_isr_pref);
7312 }
7313 #endif
7314
7315 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
7316 do { \
7317 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7318 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
7319 _var_ = (void *)((_pkt_)+1); \
7320 } while (/*CONSTCOND*/0)
7321
7322 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
7323 do { \
7324 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7325 sizeof(len), BUS_DMASYNC_POSTREAD); \
7326 _ptr_ = (void *)((_pkt_)+1); \
7327 } while (/*CONSTCOND*/0)
7328
7329 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7330
7331 static void
7332 iwm_notif_intr(struct iwm_softc *sc)
7333 {
7334 uint16_t hw;
7335
7336 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7337 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7338
7339 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7340 while (sc->rxq.cur != hw) {
7341 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7342 struct iwm_rx_packet *pkt;
7343 struct iwm_cmd_response *cresp;
7344 int orig_qid, qid, idx, code;
7345
7346 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7347 BUS_DMASYNC_POSTREAD);
7348 pkt = mtod(data->m, struct iwm_rx_packet *);
7349
7350 orig_qid = pkt->hdr.qid;
7351 qid = orig_qid & ~0x80;
7352 idx = pkt->hdr.idx;
7353
7354 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7355
7356 /*
7357 * randomly get these from the firmware, no idea why.
7358 * they at least seem harmless, so just ignore them for now
7359 */
7360 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7361 || pkt->len_n_flags == htole32(0x55550000))) {
7362 ADVANCE_RXQ(sc);
7363 continue;
7364 }
7365
7366 switch (code) {
7367 case IWM_REPLY_RX_PHY_CMD:
7368 iwm_rx_rx_phy_cmd(sc, pkt, data);
7369 break;
7370
7371 case IWM_REPLY_RX_MPDU_CMD:
7372 iwm_rx_rx_mpdu(sc, pkt, data);
7373 break;
7374
7375 case IWM_TX_CMD:
7376 iwm_rx_tx_cmd(sc, pkt, data);
7377 break;
7378
7379 case IWM_MISSED_BEACONS_NOTIFICATION:
7380 iwm_rx_missed_beacons_notif(sc, pkt, data);
7381 break;
7382
7383 case IWM_MFUART_LOAD_NOTIFICATION:
7384 break;
7385
7386 case IWM_ALIVE: {
7387 struct iwm_alive_resp_v1 *resp1;
7388 struct iwm_alive_resp_v2 *resp2;
7389 struct iwm_alive_resp_v3 *resp3;
7390
7391 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7392 SYNC_RESP_STRUCT(resp1, pkt);
7393 sc->sc_uc.uc_error_event_table
7394 = le32toh(resp1->error_event_table_ptr);
7395 sc->sc_uc.uc_log_event_table
7396 = le32toh(resp1->log_event_table_ptr);
7397 sc->sched_base = le32toh(resp1->scd_base_ptr);
7398 if (resp1->status == IWM_ALIVE_STATUS_OK)
7399 sc->sc_uc.uc_ok = 1;
7400 else
7401 sc->sc_uc.uc_ok = 0;
7402 }
7403 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7404 SYNC_RESP_STRUCT(resp2, pkt);
7405 sc->sc_uc.uc_error_event_table
7406 = le32toh(resp2->error_event_table_ptr);
7407 sc->sc_uc.uc_log_event_table
7408 = le32toh(resp2->log_event_table_ptr);
7409 sc->sched_base = le32toh(resp2->scd_base_ptr);
7410 sc->sc_uc.uc_umac_error_event_table
7411 = le32toh(resp2->error_info_addr);
7412 if (resp2->status == IWM_ALIVE_STATUS_OK)
7413 sc->sc_uc.uc_ok = 1;
7414 else
7415 sc->sc_uc.uc_ok = 0;
7416 }
7417 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7418 SYNC_RESP_STRUCT(resp3, pkt);
7419 sc->sc_uc.uc_error_event_table
7420 = le32toh(resp3->error_event_table_ptr);
7421 sc->sc_uc.uc_log_event_table
7422 = le32toh(resp3->log_event_table_ptr);
7423 sc->sched_base = le32toh(resp3->scd_base_ptr);
7424 sc->sc_uc.uc_umac_error_event_table
7425 = le32toh(resp3->error_info_addr);
7426 if (resp3->status == IWM_ALIVE_STATUS_OK)
7427 sc->sc_uc.uc_ok = 1;
7428 else
7429 sc->sc_uc.uc_ok = 0;
7430 }
7431
7432 sc->sc_uc.uc_intr = 1;
7433 wakeup(&sc->sc_uc);
7434 break;
7435 }
7436
7437 case IWM_CALIB_RES_NOTIF_PHY_DB: {
7438 struct iwm_calib_res_notif_phy_db *phy_db_notif;
7439 SYNC_RESP_STRUCT(phy_db_notif, pkt);
7440 uint16_t size = le16toh(phy_db_notif->length);
7441 bus_dmamap_sync(sc->sc_dmat, data->map,
7442 sizeof(*pkt) + sizeof(*phy_db_notif),
7443 size, BUS_DMASYNC_POSTREAD);
7444 iwm_phy_db_set_section(sc, phy_db_notif, size);
7445 break;
7446 }
7447
7448 case IWM_STATISTICS_NOTIFICATION: {
7449 struct iwm_notif_statistics *stats;
7450 SYNC_RESP_STRUCT(stats, pkt);
7451 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7452 sc->sc_noise = iwm_get_noise(&stats->rx.general);
7453 break;
7454 }
7455
7456 case IWM_NVM_ACCESS_CMD:
7457 case IWM_MCC_UPDATE_CMD:
7458 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7459 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7460 sizeof(sc->sc_cmd_resp),
7461 BUS_DMASYNC_POSTREAD);
7462 memcpy(sc->sc_cmd_resp,
7463 pkt, sizeof(sc->sc_cmd_resp));
7464 }
7465 break;
7466
7467 case IWM_MCC_CHUB_UPDATE_CMD: {
7468 struct iwm_mcc_chub_notif *notif;
7469 SYNC_RESP_STRUCT(notif, pkt);
7470
7471 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7472 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7473 sc->sc_fw_mcc[2] = '\0';
7474 break;
7475 }
7476
7477 case IWM_DTS_MEASUREMENT_NOTIFICATION:
7478 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7479 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7480 struct iwm_dts_measurement_notif_v1 *notif1;
7481 struct iwm_dts_measurement_notif_v2 *notif2;
7482
7483 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7484 SYNC_RESP_STRUCT(notif1, pkt);
7485 DPRINTF(("%s: DTS temp=%d \n",
7486 DEVNAME(sc), notif1->temp));
7487 break;
7488 }
7489 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7490 SYNC_RESP_STRUCT(notif2, pkt);
7491 DPRINTF(("%s: DTS temp=%d \n",
7492 DEVNAME(sc), notif2->temp));
7493 break;
7494 }
7495 break;
7496 }
7497
7498 case IWM_PHY_CONFIGURATION_CMD:
7499 case IWM_TX_ANT_CONFIGURATION_CMD:
7500 case IWM_ADD_STA:
7501 case IWM_MAC_CONTEXT_CMD:
7502 case IWM_REPLY_SF_CFG_CMD:
7503 case IWM_POWER_TABLE_CMD:
7504 case IWM_PHY_CONTEXT_CMD:
7505 case IWM_BINDING_CONTEXT_CMD:
7506 case IWM_TIME_EVENT_CMD:
7507 case IWM_SCAN_REQUEST_CMD:
7508 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7509 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7510 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7511 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7512 case IWM_SCAN_OFFLOAD_ABORT_CMD:
7513 case IWM_REPLY_BEACON_FILTERING_CMD:
7514 case IWM_MAC_PM_POWER_TABLE:
7515 case IWM_TIME_QUOTA_CMD:
7516 case IWM_REMOVE_STA:
7517 case IWM_TXPATH_FLUSH:
7518 case IWM_LQ_CMD:
7519 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_FW_PAGING_BLOCK_CMD):
7520 case IWM_BT_CONFIG:
7521 case IWM_REPLY_THERMAL_MNG_BACKOFF:
7522 SYNC_RESP_STRUCT(cresp, pkt);
7523 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7524 memcpy(sc->sc_cmd_resp,
7525 pkt, sizeof(*pkt) + sizeof(*cresp));
7526 }
7527 break;
7528
7529 /* ignore */
7530 case IWM_PHY_DB_CMD:
7531 break;
7532
7533 case IWM_INIT_COMPLETE_NOTIF:
7534 sc->sc_init_complete = 1;
7535 wakeup(&sc->sc_init_complete);
7536 break;
7537
7538 case IWM_SCAN_OFFLOAD_COMPLETE: {
7539 struct iwm_periodic_scan_complete *notif;
7540 SYNC_RESP_STRUCT(notif, pkt);
7541 break;
7542 }
7543
7544 case IWM_SCAN_ITERATION_COMPLETE: {
7545 struct iwm_lmac_scan_complete_notif *notif;
7546 SYNC_RESP_STRUCT(notif, pkt);
7547 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7548 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7549 iwm_endscan(sc);
7550 }
7551 break;
7552 }
7553
7554 case IWM_SCAN_COMPLETE_UMAC: {
7555 struct iwm_umac_scan_complete *notif;
7556 SYNC_RESP_STRUCT(notif, pkt);
7557 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7558 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7559 iwm_endscan(sc);
7560 }
7561 break;
7562 }
7563
7564 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7565 struct iwm_umac_scan_iter_complete_notif *notif;
7566 SYNC_RESP_STRUCT(notif, pkt);
7567 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7568 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7569 iwm_endscan(sc);
7570 }
7571 break;
7572 }
7573
7574 case IWM_REPLY_ERROR: {
7575 struct iwm_error_resp *resp;
7576 SYNC_RESP_STRUCT(resp, pkt);
7577 aprint_error_dev(sc->sc_dev,
7578 "firmware error 0x%x, cmd 0x%x\n",
7579 le32toh(resp->error_type), resp->cmd_id);
7580 break;
7581 }
7582
7583 case IWM_TIME_EVENT_NOTIFICATION: {
7584 struct iwm_time_event_notif *notif;
7585 SYNC_RESP_STRUCT(notif, pkt);
7586 break;
7587 }
7588
7589 case IWM_DEBUG_LOG_MSG:
7590 break;
7591
7592 case IWM_MCAST_FILTER_CMD:
7593 break;
7594
7595 case IWM_SCD_QUEUE_CFG: {
7596 struct iwm_scd_txq_cfg_rsp *rsp;
7597 SYNC_RESP_STRUCT(rsp, pkt);
7598 break;
7599 }
7600
7601 default:
7602 aprint_error_dev(sc->sc_dev,
7603 "unhandled firmware response 0x%x 0x%x/0x%x "
7604 "rx ring %d[%d]\n",
7605 code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7606 break;
7607 }
7608
7609 /*
7610 * uCode sets bit 0x80 when it originates the notification,
7611 * i.e. when the notification is not a direct response to a
7612 * command sent by the driver.
7613 * For example, uCode issues IWM_REPLY_RX when it sends a
7614 * received frame to the driver.
7615 */
7616 if (!(orig_qid & (1 << 7))) {
7617 iwm_cmd_done(sc, qid, idx);
7618 }
7619
7620 ADVANCE_RXQ(sc);
7621 }
7622
7623 /*
7624 * Seems like the hardware gets upset unless we align the write by 8??
7625 */
7626 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7627 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7628 }
7629
7630 static int
7631 iwm_intr(void *arg)
7632 {
7633 struct iwm_softc *sc = arg;
7634
7635 /* Disable interrupts */
7636 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7637
7638 iwm_softintr(arg);
7639 return 1;
7640 }
7641
7642 static void
7643 iwm_softintr(void *arg)
7644 {
7645 struct iwm_softc *sc = arg;
7646 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7647 uint32_t r1, r2;
7648 int isperiodic = 0, s;
7649
7650 if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7651 uint32_t *ict = sc->ict_dma.vaddr;
7652 int tmp;
7653
7654 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7655 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7656 tmp = htole32(ict[sc->ict_cur]);
7657 if (tmp == 0)
7658 goto out_ena; /* Interrupt not for us. */
7659
7660 /*
7661 * ok, there was something. keep plowing until we have all.
7662 */
7663 r1 = r2 = 0;
7664 while (tmp) {
7665 r1 |= tmp;
7666 ict[sc->ict_cur] = 0; /* Acknowledge. */
7667 sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7668 tmp = htole32(ict[sc->ict_cur]);
7669 }
7670
7671 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7672 0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7673
7674 /* this is where the fun begins. don't ask */
7675 if (r1 == 0xffffffff)
7676 r1 = 0;
7677
7678 /* i am not expected to understand this */
7679 if (r1 & 0xc0000)
7680 r1 |= 0x8000;
7681 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7682 } else {
7683 r1 = IWM_READ(sc, IWM_CSR_INT);
7684 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7685 return; /* Hardware gone! */
7686 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7687 }
7688 if (r1 == 0 && r2 == 0) {
7689 goto out_ena; /* Interrupt not for us. */
7690 }
7691
7692 /* Acknowledge interrupts. */
7693 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7694 if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7695 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7696
7697 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7698 #ifdef IWM_DEBUG
7699 int i;
7700
7701 iwm_nic_error(sc);
7702
7703 /* Dump driver status (TX and RX rings) while we're here. */
7704 DPRINTF(("driver status:\n"));
7705 for (i = 0; i < IWM_MAX_QUEUES; i++) {
7706 struct iwm_tx_ring *ring = &sc->txq[i];
7707 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7708 "queued=%-3d\n",
7709 i, ring->qid, ring->cur, ring->queued));
7710 }
7711 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7712 DPRINTF((" 802.11 state %s\n",
7713 ieee80211_state_name[sc->sc_ic.ic_state]));
7714 #endif
7715
7716 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7717 fatal:
7718 s = splnet();
7719 ifp->if_flags &= ~IFF_UP;
7720 iwm_stop(ifp, 1);
7721 splx(s);
7722 /* Don't restore interrupt mask */
7723 return;
7724
7725 }
7726
7727 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7728 aprint_error_dev(sc->sc_dev,
7729 "hardware error, stopping device\n");
7730 goto fatal;
7731 }
7732
7733 /* firmware chunk loaded */
7734 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7735 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7736 sc->sc_fw_chunk_done = 1;
7737 wakeup(&sc->sc_fw);
7738 }
7739
7740 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7741 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7742 goto fatal;
7743 }
7744
7745 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7746 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7747 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7748 IWM_WRITE_1(sc,
7749 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7750 isperiodic = 1;
7751 }
7752
7753 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7754 isperiodic) {
7755 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7756
7757 iwm_notif_intr(sc);
7758
7759 /* enable periodic interrupt, see above */
7760 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7761 !isperiodic)
7762 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7763 IWM_CSR_INT_PERIODIC_ENA);
7764 }
7765
7766 out_ena:
7767 iwm_restore_interrupts(sc);
7768 }
7769
7770 /*
7771 * Autoconf glue-sniffing
7772 */
7773
7774 static const pci_product_id_t iwm_devices[] = {
7775 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7776 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7777 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7778 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7779 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7780 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7781 PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7782 PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7783 PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7784 PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7785 PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7786 PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7787 };
7788
7789 static int
7790 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7791 {
7792 struct pci_attach_args *pa = aux;
7793
7794 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7795 return 0;
7796
7797 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7798 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7799 return 1;
7800
7801 return 0;
7802 }
7803
7804 static int
7805 iwm_preinit(struct iwm_softc *sc)
7806 {
7807 struct ieee80211com *ic = &sc->sc_ic;
7808 int err;
7809
7810 if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7811 return 0;
7812
7813 err = iwm_start_hw(sc);
7814 if (err) {
7815 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7816 return err;
7817 }
7818
7819 err = iwm_run_init_mvm_ucode(sc, 1);
7820 iwm_stop_device(sc);
7821 if (err)
7822 return err;
7823
7824 sc->sc_flags |= IWM_FLAG_ATTACHED;
7825
7826 aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7827 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7828 ether_sprintf(sc->sc_nvm.hw_addr));
7829
7830 #ifndef IEEE80211_NO_HT
7831 if (sc->sc_nvm.sku_cap_11n_enable)
7832 iwm_setup_ht_rates(sc);
7833 #endif
7834
7835 /* not all hardware can do 5GHz band */
7836 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7837 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7838
7839 ieee80211_ifattach(ic);
7840
7841 ic->ic_node_alloc = iwm_node_alloc;
7842
7843 /* Override 802.11 state transition machine. */
7844 sc->sc_newstate = ic->ic_newstate;
7845 ic->ic_newstate = iwm_newstate;
7846 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7847 ieee80211_announce(ic);
7848
7849 iwm_radiotap_attach(sc);
7850
7851 return 0;
7852 }
7853
7854 static void
7855 iwm_attach_hook(device_t dev)
7856 {
7857 struct iwm_softc *sc = device_private(dev);
7858
7859 iwm_preinit(sc);
7860 }
7861
7862 static void
7863 iwm_attach(device_t parent, device_t self, void *aux)
7864 {
7865 struct iwm_softc *sc = device_private(self);
7866 struct pci_attach_args *pa = aux;
7867 struct ieee80211com *ic = &sc->sc_ic;
7868 struct ifnet *ifp = &sc->sc_ec.ec_if;
7869 pcireg_t reg, memtype;
7870 pci_intr_handle_t ih;
7871 char intrbuf[PCI_INTRSTR_LEN];
7872 const char *intrstr;
7873 int err;
7874 int txq_i;
7875 const struct sysctlnode *node;
7876
7877 sc->sc_dev = self;
7878 sc->sc_pct = pa->pa_pc;
7879 sc->sc_pcitag = pa->pa_tag;
7880 sc->sc_dmat = pa->pa_dmat;
7881 sc->sc_pciid = pa->pa_id;
7882
7883 pci_aprint_devinfo(pa, NULL);
7884
7885 if (workqueue_create(&sc->sc_nswq, "iwmns",
7886 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7887 panic("%s: could not create workqueue: newstate",
7888 device_xname(self));
7889
7890 /*
7891 * Get the offset of the PCI Express Capability Structure in PCI
7892 * Configuration Space.
7893 */
7894 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7895 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7896 if (err == 0) {
7897 aprint_error_dev(self,
7898 "PCIe capability structure not found!\n");
7899 return;
7900 }
7901
7902 /* Clear device-specific "PCI retry timeout" register (41h). */
7903 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7904 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7905
7906 /* Enable bus-mastering */
7907 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7908 reg |= PCI_COMMAND_MASTER_ENABLE;
7909 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7910
7911 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7912 err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7913 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7914 if (err) {
7915 aprint_error_dev(self, "can't map mem space\n");
7916 return;
7917 }
7918
7919 /* Install interrupt handler. */
7920 err = pci_intr_map(pa, &ih);
7921 if (err) {
7922 aprint_error_dev(self, "can't allocate interrupt\n");
7923 return;
7924 }
7925 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7926 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7927 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7928 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
7929 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
7930 if (sc->sc_ih == NULL) {
7931 aprint_error_dev(self, "can't establish interrupt");
7932 if (intrstr != NULL)
7933 aprint_error(" at %s", intrstr);
7934 aprint_error("\n");
7935 return;
7936 }
7937 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7938
7939 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7940
7941 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7942 switch (PCI_PRODUCT(sc->sc_pciid)) {
7943 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7944 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7945 sc->sc_fwname = "iwlwifi-3160-17.ucode";
7946 sc->host_interrupt_operation_mode = 1;
7947 sc->apmg_wake_up_wa = 1;
7948 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7949 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7950 break;
7951 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7952 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7953 sc->sc_fwname = "iwlwifi-7265D-22.ucode";
7954 sc->host_interrupt_operation_mode = 0;
7955 sc->apmg_wake_up_wa = 1;
7956 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7957 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7958 break;
7959 case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
7960 sc->sc_fwname = "iwlwifi-3168-22.ucode";
7961 sc->host_interrupt_operation_mode = 0;
7962 sc->apmg_wake_up_wa = 1;
7963 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7964 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7965 break;
7966 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7967 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7968 sc->sc_fwname = "iwlwifi-7260-17.ucode";
7969 sc->host_interrupt_operation_mode = 1;
7970 sc->apmg_wake_up_wa = 1;
7971 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7972 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7973 break;
7974 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7975 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7976 sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7977 IWM_CSR_HW_REV_TYPE_7265D ?
7978 "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
7979 sc->host_interrupt_operation_mode = 0;
7980 sc->apmg_wake_up_wa = 1;
7981 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7982 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7983 break;
7984 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7985 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7986 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7987 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7988 sc->sc_fwname = "iwlwifi-8000C-22.ucode";
7989 sc->host_interrupt_operation_mode = 0;
7990 sc->apmg_wake_up_wa = 0;
7991 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7992 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7993 break;
7994 case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
7995 sc->sc_fwname = "iwlwifi-8265-22.ucode";
7996 sc->host_interrupt_operation_mode = 0;
7997 sc->apmg_wake_up_wa = 0;
7998 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7999 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
8000 break;
8001 default:
8002 aprint_error_dev(self, "unknown product %#x",
8003 PCI_PRODUCT(sc->sc_pciid));
8004 return;
8005 }
8006 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
8007
8008 /*
8009 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
8010 * changed, and now the revision step also includes bit 0-1 (no more
8011 * "dash" value). To keep hw_rev backwards compatible - we'll store it
8012 * in the old format.
8013 */
8014
8015 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
8016 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
8017 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
8018
8019 if (iwm_prepare_card_hw(sc) != 0) {
8020 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
8021 return;
8022 }
8023
8024 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
8025 uint32_t hw_step;
8026
8027 /*
8028 * In order to recognize C step the driver should read the
8029 * chip version id located at the AUX bus MISC address.
8030 */
8031 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
8032 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
8033 DELAY(2);
8034
8035 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
8036 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8037 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8038 25000);
8039 if (!err) {
8040 aprint_error_dev(sc->sc_dev,
8041 "failed to wake up the nic\n");
8042 return;
8043 }
8044
8045 if (iwm_nic_lock(sc)) {
8046 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
8047 hw_step |= IWM_ENABLE_WFPM;
8048 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
8049 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
8050 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
8051 if (hw_step == 0x3)
8052 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
8053 (IWM_SILICON_C_STEP << 2);
8054 iwm_nic_unlock(sc);
8055 } else {
8056 aprint_error_dev(sc->sc_dev,
8057 "failed to lock the nic\n");
8058 return;
8059 }
8060 }
8061
8062 /*
8063 * Allocate DMA memory for firmware transfers.
8064 * Must be aligned on a 16-byte boundary.
8065 */
8066 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
8067 16);
8068 if (err) {
8069 aprint_error_dev(sc->sc_dev,
8070 "could not allocate memory for firmware\n");
8071 return;
8072 }
8073
8074 /* Allocate "Keep Warm" page, used internally by the card. */
8075 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
8076 if (err) {
8077 aprint_error_dev(sc->sc_dev,
8078 "could not allocate keep warm page\n");
8079 goto fail1;
8080 }
8081
8082 /* Allocate interrupt cause table (ICT).*/
8083 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
8084 1 << IWM_ICT_PADDR_SHIFT);
8085 if (err) {
8086 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
8087 goto fail2;
8088 }
8089
8090 /* TX scheduler rings must be aligned on a 1KB boundary. */
8091 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
8092 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
8093 if (err) {
8094 aprint_error_dev(sc->sc_dev,
8095 "could not allocate TX scheduler rings\n");
8096 goto fail3;
8097 }
8098
8099 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
8100 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8101 if (err) {
8102 aprint_error_dev(sc->sc_dev,
8103 "could not allocate TX ring %d\n", txq_i);
8104 goto fail4;
8105 }
8106 }
8107
8108 err = iwm_alloc_rx_ring(sc, &sc->rxq);
8109 if (err) {
8110 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
8111 goto fail4;
8112 }
8113
8114 /* Clear pending interrupts. */
8115 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
8116
8117 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8118 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
8119 SYSCTL_DESCR("iwm per-controller controls"),
8120 NULL, 0, NULL, 0,
8121 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
8122 CTL_EOL)) != 0) {
8123 aprint_normal_dev(sc->sc_dev,
8124 "couldn't create iwm per-controller sysctl node\n");
8125 }
8126 if (err == 0) {
8127 int iwm_nodenum = node->sysctl_num;
8128
8129 /* Reload firmware sysctl node */
8130 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8131 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
8132 SYSCTL_DESCR("Reload firmware"),
8133 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
8134 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
8135 CTL_EOL)) != 0) {
8136 aprint_normal_dev(sc->sc_dev,
8137 "couldn't create load_fw sysctl node\n");
8138 }
8139 }
8140
8141 /*
8142 * Attach interface
8143 */
8144 ic->ic_ifp = ifp;
8145 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
8146 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
8147 ic->ic_state = IEEE80211_S_INIT;
8148
8149 /* Set device capabilities. */
8150 ic->ic_caps =
8151 IEEE80211_C_WEP | /* WEP */
8152 IEEE80211_C_WPA | /* 802.11i */
8153 #ifdef notyet
8154 IEEE80211_C_SCANALL | /* device scans all channels at once */
8155 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
8156 #endif
8157 IEEE80211_C_SHSLOT | /* short slot time supported */
8158 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
8159
8160 #ifndef IEEE80211_NO_HT
8161 ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8162 ic->ic_htxcaps = 0;
8163 ic->ic_txbfcaps = 0;
8164 ic->ic_aselcaps = 0;
8165 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8166 #endif
8167
8168 /* all hardware can do 2.4GHz band */
8169 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8170 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8171
8172 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
8173 sc->sc_phyctxt[i].id = i;
8174 }
8175
8176 sc->sc_amrr.amrr_min_success_threshold = 1;
8177 sc->sc_amrr.amrr_max_success_threshold = 15;
8178
8179 /* IBSS channel undefined for now. */
8180 ic->ic_ibss_chan = &ic->ic_channels[1];
8181
8182 #if 0
8183 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
8184 #endif
8185
8186 ifp->if_softc = sc;
8187 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8188 ifp->if_init = iwm_init;
8189 ifp->if_stop = iwm_stop;
8190 ifp->if_ioctl = iwm_ioctl;
8191 ifp->if_start = iwm_start;
8192 ifp->if_watchdog = iwm_watchdog;
8193 IFQ_SET_READY(&ifp->if_snd);
8194 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8195
8196 if_attach(ifp);
8197 #if 0
8198 ieee80211_ifattach(ic);
8199 #else
8200 ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
8201 #endif
8202
8203 callout_init(&sc->sc_calib_to, 0);
8204 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
8205 callout_init(&sc->sc_led_blink_to, 0);
8206 callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8207 #ifndef IEEE80211_NO_HT
8208 if (workqueue_create(&sc->sc_setratewq, "iwmsr",
8209 iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
8210 panic("%s: could not create workqueue: setrates",
8211 device_xname(self));
8212 if (workqueue_create(&sc->sc_bawq, "iwmba",
8213 iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
8214 panic("%s: could not create workqueue: blockack",
8215 device_xname(self));
8216 if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
8217 iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
8218 panic("%s: could not create workqueue: htprot",
8219 device_xname(self));
8220 #endif
8221
8222 if (pmf_device_register(self, NULL, NULL))
8223 pmf_class_network_register(self, ifp);
8224 else
8225 aprint_error_dev(self, "couldn't establish power handler\n");
8226
8227 /*
8228 * We can't do normal attach before the file system is mounted
8229 * because we cannot read the MAC address without loading the
8230 * firmware from disk. So we postpone until mountroot is done.
8231 * Notably, this will require a full driver unload/load cycle
8232 * (or reboot) in case the firmware is not present when the
8233 * hook runs.
8234 */
8235 config_mountroot(self, iwm_attach_hook);
8236
8237 return;
8238
8239 fail4: while (--txq_i >= 0)
8240 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8241 iwm_free_rx_ring(sc, &sc->rxq);
8242 iwm_dma_contig_free(&sc->sched_dma);
8243 fail3: if (sc->ict_dma.vaddr != NULL)
8244 iwm_dma_contig_free(&sc->ict_dma);
8245 fail2: iwm_dma_contig_free(&sc->kw_dma);
8246 fail1: iwm_dma_contig_free(&sc->fw_dma);
8247 }
8248
8249 void
8250 iwm_radiotap_attach(struct iwm_softc *sc)
8251 {
8252 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8253
8254 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
8255 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
8256 &sc->sc_drvbpf);
8257
8258 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8259 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8260 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8261
8262 sc->sc_txtap_len = sizeof sc->sc_txtapu;
8263 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8264 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8265 }
8266
8267 #if 0
8268 static void
8269 iwm_init_task(void *arg)
8270 {
8271 struct iwm_softc *sc = arg;
8272 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8273 int s;
8274
8275 rw_enter_write(&sc->ioctl_rwl);
8276 s = splnet();
8277
8278 iwm_stop(ifp, 0);
8279 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8280 iwm_init(ifp);
8281
8282 splx(s);
8283 rw_exit(&sc->ioctl_rwl);
8284 }
8285
8286 static void
8287 iwm_wakeup(struct iwm_softc *sc)
8288 {
8289 pcireg_t reg;
8290
8291 /* Clear device-specific "PCI retry timeout" register (41h). */
8292 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8293 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8294
8295 iwm_init_task(sc);
8296 }
8297
8298 static int
8299 iwm_activate(device_t self, enum devact act)
8300 {
8301 struct iwm_softc *sc = device_private(self);
8302 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8303
8304 switch (act) {
8305 case DVACT_DEACTIVATE:
8306 if (ifp->if_flags & IFF_RUNNING)
8307 iwm_stop(ifp, 0);
8308 return 0;
8309 default:
8310 return EOPNOTSUPP;
8311 }
8312 }
8313 #endif
8314
8315 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
8316 NULL, NULL);
8317
8318 static int
8319 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
8320 {
8321 struct sysctlnode node;
8322 struct iwm_softc *sc;
8323 int err, t;
8324
8325 node = *rnode;
8326 sc = node.sysctl_data;
8327 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
8328 node.sysctl_data = &t;
8329 err = sysctl_lookup(SYSCTLFN_CALL(&node));
8330 if (err || newp == NULL)
8331 return err;
8332
8333 if (t == 0)
8334 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
8335 return 0;
8336 }
8337
8338 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
8339 {
8340 const struct sysctlnode *rnode;
8341 #ifdef IWM_DEBUG
8342 const struct sysctlnode *cnode;
8343 #endif /* IWM_DEBUG */
8344 int rc;
8345
8346 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
8347 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
8348 SYSCTL_DESCR("iwm global controls"),
8349 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
8350 goto err;
8351
8352 iwm_sysctl_root_num = rnode->sysctl_num;
8353
8354 #ifdef IWM_DEBUG
8355 /* control debugging printfs */
8356 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
8357 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
8358 "debug", SYSCTL_DESCR("Enable debugging output"),
8359 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
8360 goto err;
8361 #endif /* IWM_DEBUG */
8362
8363 return;
8364
8365 err:
8366 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
8367 }
8368