if_iwm.c revision 1.80 1 /* $NetBSD: if_iwm.c,v 1.80 2018/06/06 01:49:08 maya Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp */
3 #define IEEE80211_NO_HT
4 /*
5 * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 * Author: Stefan Sperling <stsp (at) openbsd.org>
7 * Copyright (c) 2014 Fixup Software Ltd.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 /*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <linuxwifi (at) intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
61 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62 * Copyright(c) 2016 Intel Deutschland GmbH
63 * All rights reserved.
64 *
65 * Redistribution and use in source and binary forms, with or without
66 * modification, are permitted provided that the following conditions
67 * are met:
68 *
69 * * Redistributions of source code must retain the above copyright
70 * notice, this list of conditions and the following disclaimer.
71 * * Redistributions in binary form must reproduce the above copyright
72 * notice, this list of conditions and the following disclaimer in
73 * the documentation and/or other materials provided with the
74 * distribution.
75 * * Neither the name Intel Corporation nor the names of its
76 * contributors may be used to endorse or promote products derived
77 * from this software without specific prior written permission.
78 *
79 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 */
91
92 /*-
93 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
94 *
95 * Permission to use, copy, modify, and distribute this software for any
96 * purpose with or without fee is hereby granted, provided that the above
97 * copyright notice and this permission notice appear in all copies.
98 *
99 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 */
107
108 #include <sys/cdefs.h>
109 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.80 2018/06/06 01:49:08 maya Exp $");
110
111 #include <sys/param.h>
112 #include <sys/conf.h>
113 #include <sys/kernel.h>
114 #include <sys/kmem.h>
115 #include <sys/mbuf.h>
116 #include <sys/mutex.h>
117 #include <sys/proc.h>
118 #include <sys/socket.h>
119 #include <sys/sockio.h>
120 #include <sys/sysctl.h>
121 #include <sys/systm.h>
122
123 #include <sys/cpu.h>
124 #include <sys/bus.h>
125 #include <sys/workqueue.h>
126 #include <machine/endian.h>
127 #include <sys/intr.h>
128
129 #include <dev/pci/pcireg.h>
130 #include <dev/pci/pcivar.h>
131 #include <dev/pci/pcidevs.h>
132 #include <dev/firmload.h>
133
134 #include <net/bpf.h>
135 #include <net/if.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_ether.h>
139
140 #include <netinet/in.h>
141 #include <netinet/ip.h>
142
143 #include <net80211/ieee80211_var.h>
144 #include <net80211/ieee80211_amrr.h>
145 #include <net80211/ieee80211_radiotap.h>
146
147 #define DEVNAME(_s) device_xname((_s)->sc_dev)
148 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
149
150 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152
153 #ifdef IWM_DEBUG
154 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
155 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
156 int iwm_debug = 0;
157 #else
158 #define DPRINTF(x) do { ; } while (0)
159 #define DPRINTFN(n, x) do { ; } while (0)
160 #endif
161
162 #include <dev/pci/if_iwmreg.h>
163 #include <dev/pci/if_iwmvar.h>
164
165 static const uint8_t iwm_nvm_channels[] = {
166 /* 2.4 GHz */
167 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 /* 5 GHz */
169 36, 40, 44, 48, 52, 56, 60, 64,
170 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 149, 153, 157, 161, 165
172 };
173
174 static const uint8_t iwm_nvm_channels_8000[] = {
175 /* 2.4 GHz */
176 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 /* 5 GHz */
178 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 149, 153, 157, 161, 165, 169, 173, 177, 181
181 };
182
183 #define IWM_NUM_2GHZ_CHANNELS 14
184
185 static const struct iwm_rate {
186 uint8_t rate;
187 uint8_t plcp;
188 uint8_t ht_plcp;
189 } iwm_rates[] = {
190 /* Legacy */ /* HT */
191 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
192 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
196 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
198 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
199 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
200 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
201 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
202 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
203 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
204 };
205 #define IWM_RIDX_CCK 0
206 #define IWM_RIDX_OFDM 4
207 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
208 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
209 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
210
211 #ifndef IEEE80211_NO_HT
212 /* Convert an MCS index into an iwm_rates[] index. */
213 static const int iwm_mcs2ridx[] = {
214 IWM_RATE_MCS_0_INDEX,
215 IWM_RATE_MCS_1_INDEX,
216 IWM_RATE_MCS_2_INDEX,
217 IWM_RATE_MCS_3_INDEX,
218 IWM_RATE_MCS_4_INDEX,
219 IWM_RATE_MCS_5_INDEX,
220 IWM_RATE_MCS_6_INDEX,
221 IWM_RATE_MCS_7_INDEX,
222 };
223 #endif
224
225 struct iwm_nvm_section {
226 uint16_t length;
227 uint8_t *data;
228 };
229
230 struct iwm_newstate_state {
231 struct work ns_wk;
232 enum ieee80211_state ns_nstate;
233 int ns_arg;
234 int ns_generation;
235 };
236
237 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
238 static int iwm_firmware_store_section(struct iwm_softc *,
239 enum iwm_ucode_type, uint8_t *, size_t);
240 static int iwm_set_default_calib(struct iwm_softc *, const void *);
241 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
243 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
244 #ifdef IWM_DEBUG
245 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
246 #endif
247 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
248 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
249 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
250 static int iwm_nic_lock(struct iwm_softc *);
251 static void iwm_nic_unlock(struct iwm_softc *);
252 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
253 uint32_t);
254 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
255 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 bus_size_t, bus_size_t);
258 static void iwm_dma_contig_free(struct iwm_dma_info *);
259 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
260 static void iwm_disable_rx_dma(struct iwm_softc *);
261 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
262 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
264 int);
265 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
266 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void iwm_enable_rfkill_int(struct iwm_softc *);
268 static int iwm_check_rfkill(struct iwm_softc *);
269 static void iwm_enable_interrupts(struct iwm_softc *);
270 static void iwm_restore_interrupts(struct iwm_softc *);
271 static void iwm_disable_interrupts(struct iwm_softc *);
272 static void iwm_ict_reset(struct iwm_softc *);
273 static int iwm_set_hw_ready(struct iwm_softc *);
274 static int iwm_prepare_card_hw(struct iwm_softc *);
275 static void iwm_apm_config(struct iwm_softc *);
276 static int iwm_apm_init(struct iwm_softc *);
277 static void iwm_apm_stop(struct iwm_softc *);
278 static int iwm_allow_mcast(struct iwm_softc *);
279 static int iwm_start_hw(struct iwm_softc *);
280 static void iwm_stop_device(struct iwm_softc *);
281 static void iwm_nic_config(struct iwm_softc *);
282 static int iwm_nic_rx_init(struct iwm_softc *);
283 static int iwm_nic_tx_init(struct iwm_softc *);
284 static int iwm_nic_init(struct iwm_softc *);
285 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
286 static int iwm_post_alive(struct iwm_softc *);
287 static struct iwm_phy_db_entry *
288 iwm_phy_db_get_section(struct iwm_softc *,
289 enum iwm_phy_db_section_type, uint16_t);
290 static int iwm_phy_db_set_section(struct iwm_softc *,
291 struct iwm_calib_res_notif_phy_db *, uint16_t);
292 static int iwm_is_valid_channel(uint16_t);
293 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
294 static uint16_t iwm_channel_id_to_papd(uint16_t);
295 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
296 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
297 uint8_t **, uint16_t *, uint16_t);
298 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
299 void *);
300 static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
301 enum iwm_phy_db_section_type, uint8_t);
302 static int iwm_send_phy_db_data(struct iwm_softc *);
303 static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
304 struct iwm_time_event_cmd_v1 *);
305 static int iwm_send_time_event_cmd(struct iwm_softc *,
306 const struct iwm_time_event_cmd_v2 *);
307 static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
308 uint32_t, uint32_t);
309 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
310 uint16_t, uint8_t *, uint16_t *);
311 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
312 uint16_t *, size_t);
313 static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
314 const uint8_t *, size_t);
315 #ifndef IEEE80211_NO_HT
316 static void iwm_setup_ht_rates(struct iwm_softc *);
317 static void iwm_htprot_task(void *);
318 static void iwm_update_htprot(struct ieee80211com *,
319 struct ieee80211_node *);
320 static int iwm_ampdu_rx_start(struct ieee80211com *,
321 struct ieee80211_node *, uint8_t);
322 static void iwm_ampdu_rx_stop(struct ieee80211com *,
323 struct ieee80211_node *, uint8_t);
324 static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
325 uint8_t, uint16_t, int);
326 #ifdef notyet
327 static int iwm_ampdu_tx_start(struct ieee80211com *,
328 struct ieee80211_node *, uint8_t);
329 static void iwm_ampdu_tx_stop(struct ieee80211com *,
330 struct ieee80211_node *, uint8_t);
331 #endif
332 static void iwm_ba_task(void *);
333 #endif
334
335 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336 const uint16_t *, const uint16_t *, const uint16_t *,
337 const uint16_t *, const uint16_t *);
338 static void iwm_set_hw_address_8000(struct iwm_softc *,
339 struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
340 static int iwm_parse_nvm_sections(struct iwm_softc *,
341 struct iwm_nvm_section *);
342 static int iwm_nvm_init(struct iwm_softc *);
343 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
344 const uint8_t *, uint32_t);
345 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
346 const uint8_t *, uint32_t);
347 static int iwm_load_cpu_sections_7000(struct iwm_softc *,
348 struct iwm_fw_sects *, int , int *);
349 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
350 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
351 struct iwm_fw_sects *, int , int *);
352 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
353 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
354 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
355 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
356 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
357 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
358 enum iwm_ucode_type);
359 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
360 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
361 static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
362 static int iwm_get_signal_strength(struct iwm_softc *,
363 struct iwm_rx_phy_info *);
364 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
365 struct iwm_rx_packet *, struct iwm_rx_data *);
366 static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
367 static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
368 struct iwm_rx_data *);
369 static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
370 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
371 struct iwm_rx_data *);
372 static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
373 uint32_t);
374 #if 0
375 static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
376 static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
377 #endif
378 static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
379 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
380 static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
381 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
382 uint8_t, uint8_t);
383 static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
384 uint8_t, uint8_t, uint32_t, uint32_t);
385 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
386 static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
387 uint16_t, const void *);
388 static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
389 uint32_t *);
390 static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
391 const void *, uint32_t *);
392 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
393 static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
394 #if 0
395 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
396 uint16_t);
397 #endif
398 static const struct iwm_rate *
399 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
400 struct ieee80211_frame *, struct iwm_tx_cmd *);
401 static int iwm_tx(struct iwm_softc *, struct mbuf *,
402 struct ieee80211_node *, int);
403 static void iwm_led_enable(struct iwm_softc *);
404 static void iwm_led_disable(struct iwm_softc *);
405 static int iwm_led_is_enabled(struct iwm_softc *);
406 static void iwm_led_blink_timeout(void *);
407 static void iwm_led_blink_start(struct iwm_softc *);
408 static void iwm_led_blink_stop(struct iwm_softc *);
409 static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
410 struct iwm_beacon_filter_cmd *);
411 static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
412 struct iwm_node *, struct iwm_beacon_filter_cmd *);
413 static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
414 int);
415 static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 struct iwm_mac_power_cmd *);
417 static int iwm_power_mac_update_mode(struct iwm_softc *,
418 struct iwm_node *);
419 static int iwm_power_update_device(struct iwm_softc *);
420 #ifdef notyet
421 static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 #endif
423 static int iwm_disable_beacon_filter(struct iwm_softc *);
424 static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
425 static int iwm_add_aux_sta(struct iwm_softc *);
426 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 #ifdef notyet
429 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
430 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
431 #endif
432 static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
433 struct iwm_scan_channel_cfg_lmac *, int);
434 static int iwm_fill_probe_req(struct iwm_softc *,
435 struct iwm_scan_probe_req *);
436 static int iwm_lmac_scan(struct iwm_softc *);
437 static int iwm_config_umac_scan(struct iwm_softc *);
438 static int iwm_umac_scan(struct iwm_softc *);
439 static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
440 static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
441 int *);
442 static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
443 struct iwm_mac_ctx_cmd *, uint32_t, int);
444 static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
445 struct iwm_mac_data_sta *, int);
446 static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
447 uint32_t, int);
448 static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
449 static int iwm_auth(struct iwm_softc *);
450 static int iwm_assoc(struct iwm_softc *);
451 static void iwm_calib_timeout(void *);
452 #ifndef IEEE80211_NO_HT
453 static void iwm_setrates_task(void *);
454 static int iwm_setrates(struct iwm_node *);
455 #endif
456 static int iwm_media_change(struct ifnet *);
457 static int iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
458 int);
459 static void iwm_newstate_cb(struct work *, void *);
460 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 static void iwm_endscan(struct iwm_softc *);
462 static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 struct ieee80211_node *);
464 static int iwm_sf_config(struct iwm_softc *, int);
465 static int iwm_send_bt_init_conf(struct iwm_softc *);
466 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 static int iwm_init_hw(struct iwm_softc *);
469 static int iwm_init(struct ifnet *);
470 static void iwm_start(struct ifnet *);
471 static void iwm_stop(struct ifnet *, int);
472 static void iwm_watchdog(struct ifnet *);
473 static int iwm_ioctl(struct ifnet *, u_long, void *);
474 #ifdef IWM_DEBUG
475 static const char *iwm_desc_lookup(uint32_t);
476 static void iwm_nic_error(struct iwm_softc *);
477 static void iwm_nic_umac_error(struct iwm_softc *);
478 #endif
479 static void iwm_notif_intr(struct iwm_softc *);
480 static int iwm_intr(void *);
481 static void iwm_softintr(void *);
482 static int iwm_preinit(struct iwm_softc *);
483 static void iwm_attach_hook(device_t);
484 static void iwm_attach(device_t, device_t, void *);
485 #if 0
486 static void iwm_init_task(void *);
487 static int iwm_activate(device_t, enum devact);
488 static void iwm_wakeup(struct iwm_softc *);
489 #endif
490 static void iwm_radiotap_attach(struct iwm_softc *);
491 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
492
493 static int iwm_sysctl_root_num;
494 static int iwm_lar_disable;
495
496 #ifndef IWM_DEFAULT_MCC
497 #define IWM_DEFAULT_MCC "ZZ"
498 #endif
499 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
500
501 static int
502 iwm_firmload(struct iwm_softc *sc)
503 {
504 struct iwm_fw_info *fw = &sc->sc_fw;
505 firmware_handle_t fwh;
506 int err;
507
508 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
509 return 0;
510
511 /* Open firmware image. */
512 err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
513 if (err) {
514 aprint_error_dev(sc->sc_dev,
515 "could not get firmware handle %s\n", sc->sc_fwname);
516 return err;
517 }
518
519 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
520 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
521 fw->fw_rawdata = NULL;
522 }
523
524 fw->fw_rawsize = firmware_get_size(fwh);
525 /*
526 * Well, this is how the Linux driver checks it ....
527 */
528 if (fw->fw_rawsize < sizeof(uint32_t)) {
529 aprint_error_dev(sc->sc_dev,
530 "firmware too short: %zd bytes\n", fw->fw_rawsize);
531 err = EINVAL;
532 goto out;
533 }
534
535 /* Read the firmware. */
536 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
537 err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
538 if (err) {
539 aprint_error_dev(sc->sc_dev,
540 "could not read firmware %s\n", sc->sc_fwname);
541 goto out;
542 }
543
544 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
545 out:
546 /* caller will release memory, if necessary */
547
548 firmware_close(fwh);
549 return err;
550 }
551
552 /*
553 * just maintaining status quo.
554 */
555 static void
556 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
557 {
558 struct ieee80211com *ic = &sc->sc_ic;
559 struct ieee80211_frame *wh;
560 uint8_t subtype;
561
562 wh = mtod(m, struct ieee80211_frame *);
563
564 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
565 return;
566
567 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
568
569 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
570 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
571 return;
572
573 int chan = le32toh(sc->sc_last_phy_info.channel);
574 if (chan < __arraycount(ic->ic_channels))
575 ic->ic_curchan = &ic->ic_channels[chan];
576 }
577
578 static int
579 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
580 {
581 struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
582
583 if (dlen < sizeof(*l) ||
584 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
585 return EINVAL;
586
587 /* we don't actually store anything for now, always use s/w crypto */
588
589 return 0;
590 }
591
592 static int
593 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
594 uint8_t *data, size_t dlen)
595 {
596 struct iwm_fw_sects *fws;
597 struct iwm_fw_onesect *fwone;
598
599 if (type >= IWM_UCODE_TYPE_MAX)
600 return EINVAL;
601 if (dlen < sizeof(uint32_t))
602 return EINVAL;
603
604 fws = &sc->sc_fw.fw_sects[type];
605 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
606 return EINVAL;
607
608 fwone = &fws->fw_sect[fws->fw_count];
609
610 /* first 32bit are device load offset */
611 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
612
613 /* rest is data */
614 fwone->fws_data = data + sizeof(uint32_t);
615 fwone->fws_len = dlen - sizeof(uint32_t);
616
617 /* for freeing the buffer during driver unload */
618 fwone->fws_alloc = data;
619 fwone->fws_allocsize = dlen;
620
621 fws->fw_count++;
622 fws->fw_totlen += fwone->fws_len;
623
624 return 0;
625 }
626
627 struct iwm_tlv_calib_data {
628 uint32_t ucode_type;
629 struct iwm_tlv_calib_ctrl calib;
630 } __packed;
631
632 static int
633 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
634 {
635 const struct iwm_tlv_calib_data *def_calib = data;
636 uint32_t ucode_type = le32toh(def_calib->ucode_type);
637
638 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
639 DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
640 DEVNAME(sc), ucode_type));
641 return EINVAL;
642 }
643
644 sc->sc_default_calib[ucode_type].flow_trigger =
645 def_calib->calib.flow_trigger;
646 sc->sc_default_calib[ucode_type].event_trigger =
647 def_calib->calib.event_trigger;
648
649 return 0;
650 }
651
652 static int
653 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
654 {
655 struct iwm_fw_info *fw = &sc->sc_fw;
656 struct iwm_tlv_ucode_header *uhdr;
657 struct iwm_ucode_tlv tlv;
658 enum iwm_ucode_tlv_type tlv_type;
659 uint8_t *data;
660 int err, status;
661 size_t len;
662
663 if (ucode_type != IWM_UCODE_TYPE_INIT &&
664 fw->fw_status == IWM_FW_STATUS_DONE)
665 return 0;
666
667 if (fw->fw_status == IWM_FW_STATUS_NONE) {
668 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
669 } else {
670 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
671 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
672 }
673 status = fw->fw_status;
674
675 if (status == IWM_FW_STATUS_DONE)
676 return 0;
677
678 err = iwm_firmload(sc);
679 if (err) {
680 aprint_error_dev(sc->sc_dev,
681 "could not read firmware %s (error %d)\n",
682 sc->sc_fwname, err);
683 goto out;
684 }
685
686 sc->sc_capaflags = 0;
687 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
688 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
689 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
690
691 uhdr = (void *)fw->fw_rawdata;
692 if (*(uint32_t *)fw->fw_rawdata != 0
693 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
694 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
695 sc->sc_fwname);
696 err = EINVAL;
697 goto out;
698 }
699
700 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
701 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
702 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
703 IWM_UCODE_API(le32toh(uhdr->ver)));
704 data = uhdr->data;
705 len = fw->fw_rawsize - sizeof(*uhdr);
706
707 while (len >= sizeof(tlv)) {
708 size_t tlv_len;
709 void *tlv_data;
710
711 memcpy(&tlv, data, sizeof(tlv));
712 tlv_len = le32toh(tlv.length);
713 tlv_type = le32toh(tlv.type);
714
715 len -= sizeof(tlv);
716 data += sizeof(tlv);
717 tlv_data = data;
718
719 if (len < tlv_len) {
720 aprint_error_dev(sc->sc_dev,
721 "firmware too short: %zu bytes\n", len);
722 err = EINVAL;
723 goto parse_out;
724 }
725
726 switch (tlv_type) {
727 case IWM_UCODE_TLV_PROBE_MAX_LEN:
728 if (tlv_len < sizeof(uint32_t)) {
729 err = EINVAL;
730 goto parse_out;
731 }
732 sc->sc_capa_max_probe_len
733 = le32toh(*(uint32_t *)tlv_data);
734 /* limit it to something sensible */
735 if (sc->sc_capa_max_probe_len >
736 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
737 err = EINVAL;
738 goto parse_out;
739 }
740 break;
741 case IWM_UCODE_TLV_PAN:
742 if (tlv_len) {
743 err = EINVAL;
744 goto parse_out;
745 }
746 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
747 break;
748 case IWM_UCODE_TLV_FLAGS:
749 if (tlv_len < sizeof(uint32_t)) {
750 err = EINVAL;
751 goto parse_out;
752 }
753 if (tlv_len % sizeof(uint32_t)) {
754 err = EINVAL;
755 goto parse_out;
756 }
757 /*
758 * Apparently there can be many flags, but Linux driver
759 * parses only the first one, and so do we.
760 *
761 * XXX: why does this override IWM_UCODE_TLV_PAN?
762 * Intentional or a bug? Observations from
763 * current firmware file:
764 * 1) TLV_PAN is parsed first
765 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
766 * ==> this resets TLV_PAN to itself... hnnnk
767 */
768 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
769 break;
770 case IWM_UCODE_TLV_CSCHEME:
771 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
772 if (err)
773 goto parse_out;
774 break;
775 case IWM_UCODE_TLV_NUM_OF_CPU: {
776 uint32_t num_cpu;
777 if (tlv_len != sizeof(uint32_t)) {
778 err = EINVAL;
779 goto parse_out;
780 }
781 num_cpu = le32toh(*(uint32_t *)tlv_data);
782 if (num_cpu == 2) {
783 fw->fw_sects[IWM_UCODE_TYPE_REGULAR].is_dual_cpus =
784 true;
785 fw->fw_sects[IWM_UCODE_TYPE_INIT].is_dual_cpus =
786 true;
787 fw->fw_sects[IWM_UCODE_TYPE_WOW].is_dual_cpus =
788 true;
789 } else if (num_cpu < 1 || num_cpu > 2) {
790 err = EINVAL;
791 goto parse_out;
792 }
793 break;
794 }
795 case IWM_UCODE_TLV_SEC_RT:
796 err = iwm_firmware_store_section(sc,
797 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
798 if (err)
799 goto parse_out;
800 break;
801 case IWM_UCODE_TLV_SEC_INIT:
802 err = iwm_firmware_store_section(sc,
803 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
804 if (err)
805 goto parse_out;
806 break;
807 case IWM_UCODE_TLV_SEC_WOWLAN:
808 err = iwm_firmware_store_section(sc,
809 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
810 if (err)
811 goto parse_out;
812 break;
813 case IWM_UCODE_TLV_DEF_CALIB:
814 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
815 err = EINVAL;
816 goto parse_out;
817 }
818 err = iwm_set_default_calib(sc, tlv_data);
819 if (err)
820 goto parse_out;
821 break;
822 case IWM_UCODE_TLV_PHY_SKU:
823 if (tlv_len != sizeof(uint32_t)) {
824 err = EINVAL;
825 goto parse_out;
826 }
827 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
828 break;
829
830 case IWM_UCODE_TLV_API_CHANGES_SET: {
831 struct iwm_ucode_api *api;
832 uint32_t idx, bits;
833 int i;
834 if (tlv_len != sizeof(*api)) {
835 err = EINVAL;
836 goto parse_out;
837 }
838 api = (struct iwm_ucode_api *)tlv_data;
839 idx = le32toh(api->api_index);
840 bits = le32toh(api->api_flags);
841 if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
842 err = EINVAL;
843 goto parse_out;
844 }
845 for (i = 0; i < 32; i++) {
846 if (!ISSET(bits, __BIT(i)))
847 continue;
848 setbit(sc->sc_ucode_api, i + (32 * idx));
849 }
850 break;
851 }
852
853 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
854 struct iwm_ucode_capa *capa;
855 uint32_t idx, bits;
856 int i;
857 if (tlv_len != sizeof(*capa)) {
858 err = EINVAL;
859 goto parse_out;
860 }
861 capa = (struct iwm_ucode_capa *)tlv_data;
862 idx = le32toh(capa->api_index);
863 bits = le32toh(capa->api_capa);
864 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
865 err = EINVAL;
866 goto parse_out;
867 }
868 for (i = 0; i < 32; i++) {
869 if (!ISSET(bits, __BIT(i)))
870 continue;
871 setbit(sc->sc_enabled_capa, i + (32 * idx));
872 }
873 break;
874 }
875
876 case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
877 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
878 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
879 case IWM_UCODE_TLV_FW_MEM_SEG:
880 /* ignore, not used by current driver */
881 break;
882
883 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
884 err = iwm_firmware_store_section(sc,
885 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
886 tlv_len);
887 if (err)
888 goto parse_out;
889 break;
890
891 case IWM_UCODE_TLV_PAGING: {
892 uint32_t paging_mem_size;
893 if (tlv_len != sizeof(paging_mem_size)) {
894 err = EINVAL;
895 goto parse_out;
896 }
897 paging_mem_size = le32toh(*(uint32_t *)tlv_data);
898 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
899 err = EINVAL;
900 goto parse_out;
901 }
902 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
903 err = EINVAL;
904 goto parse_out;
905 }
906 fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
907 paging_mem_size;
908 fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
909 paging_mem_size;
910 break;
911 }
912
913 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
914 if (tlv_len != sizeof(uint32_t)) {
915 err = EINVAL;
916 goto parse_out;
917 }
918 sc->sc_capa_n_scan_channels =
919 le32toh(*(uint32_t *)tlv_data);
920 break;
921
922 case IWM_UCODE_TLV_FW_VERSION:
923 if (tlv_len != sizeof(uint32_t) * 3) {
924 err = EINVAL;
925 goto parse_out;
926 }
927 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
928 "%d.%d.%d",
929 le32toh(((uint32_t *)tlv_data)[0]),
930 le32toh(((uint32_t *)tlv_data)[1]),
931 le32toh(((uint32_t *)tlv_data)[2]));
932 break;
933
934 default:
935 DPRINTF(("%s: unknown firmware section %d, abort\n",
936 DEVNAME(sc), tlv_type));
937 err = EINVAL;
938 goto parse_out;
939 }
940
941 len -= roundup(tlv_len, 4);
942 data += roundup(tlv_len, 4);
943 }
944
945 KASSERT(err == 0);
946
947 parse_out:
948 if (err) {
949 aprint_error_dev(sc->sc_dev,
950 "firmware parse error, section type %d\n", tlv_type);
951 }
952
953 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
954 aprint_error_dev(sc->sc_dev,
955 "device uses unsupported power ops\n");
956 err = ENOTSUP;
957 }
958
959 out:
960 if (err)
961 fw->fw_status = IWM_FW_STATUS_NONE;
962 else
963 fw->fw_status = IWM_FW_STATUS_DONE;
964 wakeup(&sc->sc_fw);
965
966 if (err && fw->fw_rawdata != NULL) {
967 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
968 fw->fw_rawdata = NULL;
969 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
970 /* don't touch fw->fw_status */
971 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
972 }
973 return err;
974 }
975
976 static uint32_t
977 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
978 {
979 IWM_WRITE(sc,
980 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
981 IWM_BARRIER_READ_WRITE(sc);
982 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
983 }
984
985 static void
986 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
987 {
988 IWM_WRITE(sc,
989 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
990 IWM_BARRIER_WRITE(sc);
991 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
992 }
993
994 #ifdef IWM_DEBUG
995 static int
996 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
997 {
998 int offs;
999 uint32_t *vals = buf;
1000
1001 if (iwm_nic_lock(sc)) {
1002 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1003 for (offs = 0; offs < dwords; offs++)
1004 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1005 iwm_nic_unlock(sc);
1006 return 0;
1007 }
1008 return EBUSY;
1009 }
1010 #endif
1011
1012 static int
1013 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1014 {
1015 int offs;
1016 const uint32_t *vals = buf;
1017
1018 if (iwm_nic_lock(sc)) {
1019 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1020 /* WADDR auto-increments */
1021 for (offs = 0; offs < dwords; offs++) {
1022 uint32_t val = vals ? vals[offs] : 0;
1023 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1024 }
1025 iwm_nic_unlock(sc);
1026 return 0;
1027 }
1028 return EBUSY;
1029 }
1030
1031 static int
1032 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1033 {
1034 return iwm_write_mem(sc, addr, &val, 1);
1035 }
1036
1037 static int
1038 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1039 int timo)
1040 {
1041 for (;;) {
1042 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1043 return 1;
1044 }
1045 if (timo < 10) {
1046 return 0;
1047 }
1048 timo -= 10;
1049 DELAY(10);
1050 }
1051 }
1052
1053 static int
1054 iwm_nic_lock(struct iwm_softc *sc)
1055 {
1056 int rv = 0;
1057
1058 if (sc->sc_cmd_hold_nic_awake)
1059 return 1;
1060
1061 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1062 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1063
1064 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1065 DELAY(2);
1066
1067 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1068 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1069 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1070 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1071 rv = 1;
1072 } else {
1073 DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1074 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1075 }
1076
1077 return rv;
1078 }
1079
1080 static void
1081 iwm_nic_unlock(struct iwm_softc *sc)
1082 {
1083
1084 if (sc->sc_cmd_hold_nic_awake)
1085 return;
1086
1087 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1088 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1089 }
1090
1091 static void
1092 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1093 uint32_t mask)
1094 {
1095 uint32_t val;
1096
1097 /* XXX: no error path? */
1098 if (iwm_nic_lock(sc)) {
1099 val = iwm_read_prph(sc, reg) & mask;
1100 val |= bits;
1101 iwm_write_prph(sc, reg, val);
1102 iwm_nic_unlock(sc);
1103 }
1104 }
1105
1106 static void
1107 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1108 {
1109 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1110 }
1111
1112 static void
1113 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1114 {
1115 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1116 }
1117
1118 static int
1119 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1120 bus_size_t size, bus_size_t alignment)
1121 {
1122 int nsegs, err;
1123 void *va;
1124
1125 dma->tag = tag;
1126 dma->size = size;
1127
1128 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1129 &dma->map);
1130 if (err)
1131 goto fail;
1132
1133 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1134 BUS_DMA_NOWAIT);
1135 if (err)
1136 goto fail;
1137
1138 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1139 if (err)
1140 goto fail;
1141 dma->vaddr = va;
1142
1143 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1144 BUS_DMA_NOWAIT);
1145 if (err)
1146 goto fail;
1147
1148 memset(dma->vaddr, 0, size);
1149 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1150 dma->paddr = dma->map->dm_segs[0].ds_addr;
1151
1152 return 0;
1153
1154 fail: iwm_dma_contig_free(dma);
1155 return err;
1156 }
1157
1158 static void
1159 iwm_dma_contig_free(struct iwm_dma_info *dma)
1160 {
1161 if (dma->map != NULL) {
1162 if (dma->vaddr != NULL) {
1163 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1164 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1165 bus_dmamap_unload(dma->tag, dma->map);
1166 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1167 bus_dmamem_free(dma->tag, &dma->seg, 1);
1168 dma->vaddr = NULL;
1169 }
1170 bus_dmamap_destroy(dma->tag, dma->map);
1171 dma->map = NULL;
1172 }
1173 }
1174
1175 static int
1176 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1177 {
1178 bus_size_t size;
1179 int i, err;
1180
1181 ring->cur = 0;
1182
1183 /* Allocate RX descriptors (256-byte aligned). */
1184 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1185 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1186 if (err) {
1187 aprint_error_dev(sc->sc_dev,
1188 "could not allocate RX ring DMA memory\n");
1189 goto fail;
1190 }
1191 ring->desc = ring->desc_dma.vaddr;
1192
1193 /* Allocate RX status area (16-byte aligned). */
1194 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1195 sizeof(*ring->stat), 16);
1196 if (err) {
1197 aprint_error_dev(sc->sc_dev,
1198 "could not allocate RX status DMA memory\n");
1199 goto fail;
1200 }
1201 ring->stat = ring->stat_dma.vaddr;
1202
1203 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1204 struct iwm_rx_data *data = &ring->data[i];
1205
1206 memset(data, 0, sizeof(*data));
1207 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1208 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1209 &data->map);
1210 if (err) {
1211 aprint_error_dev(sc->sc_dev,
1212 "could not create RX buf DMA map\n");
1213 goto fail;
1214 }
1215
1216 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1217 if (err)
1218 goto fail;
1219 }
1220 return 0;
1221
1222 fail: iwm_free_rx_ring(sc, ring);
1223 return err;
1224 }
1225
1226 static void
1227 iwm_disable_rx_dma(struct iwm_softc *sc)
1228 {
1229 int ntries;
1230
1231 if (iwm_nic_lock(sc)) {
1232 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1233 for (ntries = 0; ntries < 1000; ntries++) {
1234 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1235 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1236 break;
1237 DELAY(10);
1238 }
1239 iwm_nic_unlock(sc);
1240 }
1241 }
1242
1243 void
1244 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1245 {
1246 ring->cur = 0;
1247 memset(ring->stat, 0, sizeof(*ring->stat));
1248 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1249 ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1250 }
1251
1252 static void
1253 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1254 {
1255 int i;
1256
1257 iwm_dma_contig_free(&ring->desc_dma);
1258 iwm_dma_contig_free(&ring->stat_dma);
1259
1260 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1261 struct iwm_rx_data *data = &ring->data[i];
1262
1263 if (data->m != NULL) {
1264 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1265 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1266 bus_dmamap_unload(sc->sc_dmat, data->map);
1267 m_freem(data->m);
1268 data->m = NULL;
1269 }
1270 if (data->map != NULL) {
1271 bus_dmamap_destroy(sc->sc_dmat, data->map);
1272 data->map = NULL;
1273 }
1274 }
1275 }
1276
1277 static int
1278 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1279 {
1280 bus_addr_t paddr;
1281 bus_size_t size;
1282 int i, err, nsegs;
1283
1284 ring->qid = qid;
1285 ring->queued = 0;
1286 ring->cur = 0;
1287
1288 /* Allocate TX descriptors (256-byte aligned). */
1289 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1290 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1291 if (err) {
1292 aprint_error_dev(sc->sc_dev,
1293 "could not allocate TX ring DMA memory\n");
1294 goto fail;
1295 }
1296 ring->desc = ring->desc_dma.vaddr;
1297
1298 /*
1299 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1300 * to allocate commands space for other rings.
1301 */
1302 if (qid > IWM_CMD_QUEUE)
1303 return 0;
1304
1305 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1306 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1307 if (err) {
1308 aprint_error_dev(sc->sc_dev,
1309 "could not allocate TX cmd DMA memory\n");
1310 goto fail;
1311 }
1312 ring->cmd = ring->cmd_dma.vaddr;
1313
1314 paddr = ring->cmd_dma.paddr;
1315 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1316 struct iwm_tx_data *data = &ring->data[i];
1317 size_t mapsize;
1318
1319 data->cmd_paddr = paddr;
1320 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1321 + offsetof(struct iwm_tx_cmd, scratch);
1322 paddr += sizeof(struct iwm_device_cmd);
1323
1324 /* FW commands may require more mapped space than packets. */
1325 if (qid == IWM_CMD_QUEUE) {
1326 mapsize = IWM_RBUF_SIZE;
1327 nsegs = 1;
1328 } else {
1329 mapsize = MCLBYTES;
1330 nsegs = IWM_NUM_OF_TBS - 2;
1331 }
1332 err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1333 0, BUS_DMA_NOWAIT, &data->map);
1334 if (err) {
1335 aprint_error_dev(sc->sc_dev,
1336 "could not create TX buf DMA map\n");
1337 goto fail;
1338 }
1339 }
1340 KASSERT(paddr == ring->cmd_dma.paddr + size);
1341 return 0;
1342
1343 fail: iwm_free_tx_ring(sc, ring);
1344 return err;
1345 }
1346
1347 static void
1348 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1349 {
1350
1351 if (!sc->apmg_wake_up_wa)
1352 return;
1353
1354 if (!sc->sc_cmd_hold_nic_awake) {
1355 aprint_error_dev(sc->sc_dev,
1356 "cmd_hold_nic_awake not set\n");
1357 return;
1358 }
1359
1360 sc->sc_cmd_hold_nic_awake = 0;
1361 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1362 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1363 }
1364
1365 static int
1366 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1367 {
1368 int ret;
1369
1370 /*
1371 * wake up the NIC to make sure that the firmware will see the host
1372 * command - we will let the NIC sleep once all the host commands
1373 * returned. This needs to be done only on NICs that have
1374 * apmg_wake_up_wa set.
1375 */
1376 if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1377
1378 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1379 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1380
1381 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1382 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1383 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1384 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1385 15000);
1386 if (ret == 0) {
1387 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1388 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1389 aprint_error_dev(sc->sc_dev,
1390 "failed to wake NIC for hcmd\n");
1391 return EIO;
1392 }
1393 sc->sc_cmd_hold_nic_awake = 1;
1394 }
1395
1396 return 0;
1397 }
1398 static void
1399 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1400 {
1401 int i;
1402
1403 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1404 struct iwm_tx_data *data = &ring->data[i];
1405
1406 if (data->m != NULL) {
1407 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1408 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1409 bus_dmamap_unload(sc->sc_dmat, data->map);
1410 m_freem(data->m);
1411 data->m = NULL;
1412 }
1413 }
1414 /* Clear TX descriptors. */
1415 memset(ring->desc, 0, ring->desc_dma.size);
1416 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1417 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1418 sc->qfullmsk &= ~(1 << ring->qid);
1419 ring->queued = 0;
1420 ring->cur = 0;
1421
1422 if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1423 iwm_clear_cmd_in_flight(sc);
1424 }
1425
1426 static void
1427 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1428 {
1429 int i;
1430
1431 iwm_dma_contig_free(&ring->desc_dma);
1432 iwm_dma_contig_free(&ring->cmd_dma);
1433
1434 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1435 struct iwm_tx_data *data = &ring->data[i];
1436
1437 if (data->m != NULL) {
1438 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1439 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1440 bus_dmamap_unload(sc->sc_dmat, data->map);
1441 m_freem(data->m);
1442 data->m = NULL;
1443 }
1444 if (data->map != NULL) {
1445 bus_dmamap_destroy(sc->sc_dmat, data->map);
1446 data->map = NULL;
1447 }
1448 }
1449 }
1450
1451 static void
1452 iwm_enable_rfkill_int(struct iwm_softc *sc)
1453 {
1454 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1455 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1456 }
1457
1458 static int
1459 iwm_check_rfkill(struct iwm_softc *sc)
1460 {
1461 uint32_t v;
1462 int s;
1463 int rv;
1464
1465 s = splnet();
1466
1467 /*
1468 * "documentation" is not really helpful here:
1469 * 27: HW_RF_KILL_SW
1470 * Indicates state of (platform's) hardware RF-Kill switch
1471 *
1472 * But apparently when it's off, it's on ...
1473 */
1474 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1475 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1476 if (rv) {
1477 sc->sc_flags |= IWM_FLAG_RFKILL;
1478 } else {
1479 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1480 }
1481
1482 splx(s);
1483 return rv;
1484 }
1485
1486 static void
1487 iwm_enable_interrupts(struct iwm_softc *sc)
1488 {
1489 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1490 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1491 }
1492
1493 static void
1494 iwm_restore_interrupts(struct iwm_softc *sc)
1495 {
1496 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1497 }
1498
1499 static void
1500 iwm_disable_interrupts(struct iwm_softc *sc)
1501 {
1502 int s = splnet();
1503
1504 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1505
1506 /* acknowledge all interrupts */
1507 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1508 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1509
1510 splx(s);
1511 }
1512
1513 static void
1514 iwm_ict_reset(struct iwm_softc *sc)
1515 {
1516 iwm_disable_interrupts(sc);
1517
1518 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1519 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1520 BUS_DMASYNC_PREWRITE);
1521 sc->ict_cur = 0;
1522
1523 /* Set physical address of ICT (4KB aligned). */
1524 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1525 IWM_CSR_DRAM_INT_TBL_ENABLE
1526 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1527 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1528 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1529
1530 /* Switch to ICT interrupt mode in driver. */
1531 sc->sc_flags |= IWM_FLAG_USE_ICT;
1532
1533 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1534 iwm_enable_interrupts(sc);
1535 }
1536
1537 #define IWM_HW_READY_TIMEOUT 50
1538 static int
1539 iwm_set_hw_ready(struct iwm_softc *sc)
1540 {
1541 int ready;
1542
1543 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1544 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1545
1546 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1547 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1548 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1549 IWM_HW_READY_TIMEOUT);
1550 if (ready)
1551 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1552 IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1553
1554 return ready;
1555 }
1556 #undef IWM_HW_READY_TIMEOUT
1557
1558 static int
1559 iwm_prepare_card_hw(struct iwm_softc *sc)
1560 {
1561 int t = 0;
1562
1563 if (iwm_set_hw_ready(sc))
1564 return 0;
1565
1566 DELAY(100);
1567
1568 /* If HW is not ready, prepare the conditions to check again */
1569 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1570 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1571
1572 do {
1573 if (iwm_set_hw_ready(sc))
1574 return 0;
1575 DELAY(200);
1576 t += 200;
1577 } while (t < 150000);
1578
1579 return ETIMEDOUT;
1580 }
1581
1582 static void
1583 iwm_apm_config(struct iwm_softc *sc)
1584 {
1585 pcireg_t reg;
1586
1587 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1588 sc->sc_cap_off + PCIE_LCSR);
1589 if (reg & PCIE_LCSR_ASPM_L1) {
1590 /* Um the Linux driver prints "Disabling L0S for this one ... */
1591 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1592 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1593 } else {
1594 /* ... and "Enabling" here */
1595 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1596 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1597 }
1598 }
1599
1600 /*
1601 * Start up NIC's basic functionality after it has been reset
1602 * e.g. after platform boot or shutdown.
1603 * NOTE: This does not load uCode nor start the embedded processor
1604 */
1605 static int
1606 iwm_apm_init(struct iwm_softc *sc)
1607 {
1608 int err = 0;
1609
1610 /* Disable L0S exit timer (platform NMI workaround) */
1611 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1612 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1613 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1614 }
1615
1616 /*
1617 * Disable L0s without affecting L1;
1618 * don't wait for ICH L0s (ICH bug W/A)
1619 */
1620 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1621 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1622
1623 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1624 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1625
1626 /*
1627 * Enable HAP INTA (interrupt from management bus) to
1628 * wake device's PCI Express link L1a -> L0s
1629 */
1630 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1631 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1632
1633 iwm_apm_config(sc);
1634
1635 #if 0 /* not for 7k/8k */
1636 /* Configure analog phase-lock-loop before activating to D0A */
1637 if (trans->cfg->base_params->pll_cfg_val)
1638 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1639 trans->cfg->base_params->pll_cfg_val);
1640 #endif
1641
1642 /*
1643 * Set "initialization complete" bit to move adapter from
1644 * D0U* --> D0A* (powered-up active) state.
1645 */
1646 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1647
1648 /*
1649 * Wait for clock stabilization; once stabilized, access to
1650 * device-internal resources is supported, e.g. iwm_write_prph()
1651 * and accesses to uCode SRAM.
1652 */
1653 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1654 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1655 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1656 aprint_error_dev(sc->sc_dev,
1657 "timeout waiting for clock stabilization\n");
1658 err = ETIMEDOUT;
1659 goto out;
1660 }
1661
1662 if (sc->host_interrupt_operation_mode) {
1663 /*
1664 * This is a bit of an abuse - This is needed for 7260 / 3160
1665 * only check host_interrupt_operation_mode even if this is
1666 * not related to host_interrupt_operation_mode.
1667 *
1668 * Enable the oscillator to count wake up time for L1 exit. This
1669 * consumes slightly more power (100uA) - but allows to be sure
1670 * that we wake up from L1 on time.
1671 *
1672 * This looks weird: read twice the same register, discard the
1673 * value, set a bit, and yet again, read that same register
1674 * just to discard the value. But that's the way the hardware
1675 * seems to like it.
1676 */
1677 iwm_read_prph(sc, IWM_OSC_CLK);
1678 iwm_read_prph(sc, IWM_OSC_CLK);
1679 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1680 iwm_read_prph(sc, IWM_OSC_CLK);
1681 iwm_read_prph(sc, IWM_OSC_CLK);
1682 }
1683
1684 /*
1685 * Enable DMA clock and wait for it to stabilize.
1686 *
1687 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1688 * do not disable clocks. This preserves any hardware bits already
1689 * set by default in "CLK_CTRL_REG" after reset.
1690 */
1691 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1692 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1693 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1694 DELAY(20);
1695
1696 /* Disable L1-Active */
1697 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1698 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1699
1700 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1701 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1702 IWM_APMG_RTC_INT_STT_RFKILL);
1703 }
1704 out:
1705 if (err)
1706 aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1707 return err;
1708 }
1709
1710 static void
1711 iwm_apm_stop(struct iwm_softc *sc)
1712 {
1713 /* stop device's busmaster DMA activity */
1714 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1715
1716 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1717 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1718 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1719 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1720 DPRINTF(("iwm apm stop\n"));
1721 }
1722
1723 static int
1724 iwm_start_hw(struct iwm_softc *sc)
1725 {
1726 int err;
1727
1728 err = iwm_prepare_card_hw(sc);
1729 if (err)
1730 return err;
1731
1732 /* Reset the entire device */
1733 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1734 DELAY(10);
1735
1736 err = iwm_apm_init(sc);
1737 if (err)
1738 return err;
1739
1740 iwm_enable_rfkill_int(sc);
1741 iwm_check_rfkill(sc);
1742
1743 return 0;
1744 }
1745
1746 static void
1747 iwm_stop_device(struct iwm_softc *sc)
1748 {
1749 int chnl, ntries;
1750 int qid;
1751
1752 iwm_disable_interrupts(sc);
1753 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1754
1755 /* Deactivate TX scheduler. */
1756 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1757
1758 /* Stop all DMA channels. */
1759 if (iwm_nic_lock(sc)) {
1760 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1761 IWM_WRITE(sc,
1762 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1763 for (ntries = 0; ntries < 200; ntries++) {
1764 uint32_t r;
1765
1766 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1767 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1768 chnl))
1769 break;
1770 DELAY(20);
1771 }
1772 }
1773 iwm_nic_unlock(sc);
1774 }
1775 iwm_disable_rx_dma(sc);
1776
1777 iwm_reset_rx_ring(sc, &sc->rxq);
1778
1779 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1780 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1781
1782 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1783 /* Power-down device's busmaster DMA clocks */
1784 if (iwm_nic_lock(sc)) {
1785 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1786 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1787 DELAY(5);
1788 iwm_nic_unlock(sc);
1789 }
1790 }
1791
1792 /* Make sure (redundant) we've released our request to stay awake */
1793 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1794 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1795
1796 /* Stop the device, and put it in low power state */
1797 iwm_apm_stop(sc);
1798
1799 /*
1800 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1801 * Clean again the interrupt here
1802 */
1803 iwm_disable_interrupts(sc);
1804
1805 /* Reset the on-board processor. */
1806 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1807
1808 /* Even though we stop the HW we still want the RF kill interrupt. */
1809 iwm_enable_rfkill_int(sc);
1810 iwm_check_rfkill(sc);
1811 }
1812
1813 static void
1814 iwm_nic_config(struct iwm_softc *sc)
1815 {
1816 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1817 uint32_t reg_val = 0;
1818
1819 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1820 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1821 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1822 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1823 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1824 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1825
1826 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1827 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1828 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1829 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1830
1831 /* radio configuration */
1832 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1833 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1834 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1835
1836 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1837
1838 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1839 radio_cfg_step, radio_cfg_dash));
1840
1841 /*
1842 * W/A : NIC is stuck in a reset state after Early PCIe power off
1843 * (PCIe power is lost before PERST# is asserted), causing ME FW
1844 * to lose ownership and not being able to obtain it back.
1845 */
1846 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1847 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1848 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1849 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1850 }
1851 }
1852
1853 static int
1854 iwm_nic_rx_init(struct iwm_softc *sc)
1855 {
1856 if (!iwm_nic_lock(sc))
1857 return EBUSY;
1858
1859 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1860 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1861 0, sc->rxq.stat_dma.size,
1862 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1863
1864 iwm_disable_rx_dma(sc);
1865 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1866 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1867 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1868 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1869
1870 /* Set physical address of RX ring (256-byte aligned). */
1871 IWM_WRITE(sc,
1872 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1873
1874 /* Set physical address of RX status (16-byte aligned). */
1875 IWM_WRITE(sc,
1876 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1877
1878 /* Enable RX. */
1879 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1880 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1881 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1882 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1883 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1884 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1885 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1886 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1887
1888 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1889
1890 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1891 if (sc->host_interrupt_operation_mode)
1892 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1893
1894 /*
1895 * This value should initially be 0 (before preparing any RBs),
1896 * and should be 8 after preparing the first 8 RBs (for example).
1897 */
1898 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1899
1900 iwm_nic_unlock(sc);
1901
1902 return 0;
1903 }
1904
1905 static int
1906 iwm_nic_tx_init(struct iwm_softc *sc)
1907 {
1908 int qid;
1909
1910 if (!iwm_nic_lock(sc))
1911 return EBUSY;
1912
1913 /* Deactivate TX scheduler. */
1914 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1915
1916 /* Set physical address of "keep warm" page (16-byte aligned). */
1917 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1918
1919 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1920 struct iwm_tx_ring *txq = &sc->txq[qid];
1921
1922 /* Set physical address of TX ring (256-byte aligned). */
1923 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1924 txq->desc_dma.paddr >> 8);
1925 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1926 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1927 }
1928
1929 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1930
1931 iwm_nic_unlock(sc);
1932
1933 return 0;
1934 }
1935
1936 static int
1937 iwm_nic_init(struct iwm_softc *sc)
1938 {
1939 int err;
1940
1941 iwm_apm_init(sc);
1942 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1943 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1944 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1945 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1946 }
1947
1948 iwm_nic_config(sc);
1949
1950 err = iwm_nic_rx_init(sc);
1951 if (err)
1952 return err;
1953
1954 err = iwm_nic_tx_init(sc);
1955 if (err)
1956 return err;
1957
1958 DPRINTF(("shadow registers enabled\n"));
1959 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1960
1961 return 0;
1962 }
1963
1964 static const uint8_t iwm_ac_to_tx_fifo[] = {
1965 IWM_TX_FIFO_VO,
1966 IWM_TX_FIFO_VI,
1967 IWM_TX_FIFO_BE,
1968 IWM_TX_FIFO_BK,
1969 };
1970
1971 static int
1972 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1973 {
1974 if (!iwm_nic_lock(sc)) {
1975 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1976 return EBUSY;
1977 }
1978
1979 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1980
1981 if (qid == IWM_CMD_QUEUE) {
1982 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1983 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1984 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1985
1986 iwm_nic_unlock(sc);
1987
1988 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1989
1990 if (!iwm_nic_lock(sc))
1991 return EBUSY;
1992 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1993 iwm_nic_unlock(sc);
1994
1995 iwm_write_mem32(sc,
1996 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1997
1998 /* Set scheduler window size and frame limit. */
1999 iwm_write_mem32(sc,
2000 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2001 sizeof(uint32_t),
2002 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2003 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2004 ((IWM_FRAME_LIMIT
2005 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2006 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2007
2008 if (!iwm_nic_lock(sc))
2009 return EBUSY;
2010 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2011 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2012 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2013 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2014 IWM_SCD_QUEUE_STTS_REG_MSK);
2015 } else {
2016 struct iwm_scd_txq_cfg_cmd cmd;
2017 int err;
2018
2019 iwm_nic_unlock(sc);
2020
2021 memset(&cmd, 0, sizeof(cmd));
2022 cmd.scd_queue = qid;
2023 cmd.enable = 1;
2024 cmd.sta_id = sta_id;
2025 cmd.tx_fifo = fifo;
2026 cmd.aggregate = 0;
2027 cmd.window = IWM_FRAME_LIMIT;
2028
2029 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
2030 &cmd);
2031 if (err)
2032 return err;
2033
2034 if (!iwm_nic_lock(sc))
2035 return EBUSY;
2036 }
2037
2038 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2039 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2040
2041 iwm_nic_unlock(sc);
2042
2043 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2044
2045 return 0;
2046 }
2047
2048 static int
2049 iwm_post_alive(struct iwm_softc *sc)
2050 {
2051 int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2052 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2053 int err, chnl;
2054 uint32_t base;
2055
2056 if (!iwm_nic_lock(sc))
2057 return EBUSY;
2058
2059 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2060 if (sc->sched_base != base) {
2061 DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2062 DEVNAME(sc), sc->sched_base, base));
2063 sc->sched_base = base;
2064 }
2065
2066 iwm_nic_unlock(sc);
2067
2068 iwm_ict_reset(sc);
2069
2070 /* Clear TX scheduler state in SRAM. */
2071 err = iwm_write_mem(sc,
2072 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2073 if (err)
2074 return err;
2075
2076 if (!iwm_nic_lock(sc))
2077 return EBUSY;
2078
2079 /* Set physical address of TX scheduler rings (1KB aligned). */
2080 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2081
2082 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2083
2084 iwm_nic_unlock(sc);
2085
2086 /* enable command channel */
2087 err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2088 if (err)
2089 return err;
2090
2091 if (!iwm_nic_lock(sc))
2092 return EBUSY;
2093
2094 /* Activate TX scheduler. */
2095 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2096
2097 /* Enable DMA channels. */
2098 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2099 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2100 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2101 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2102 }
2103
2104 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2105 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2106
2107 /* Enable L1-Active */
2108 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2109 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2110 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2111 }
2112
2113 iwm_nic_unlock(sc);
2114
2115 return 0;
2116 }
2117
2118 static struct iwm_phy_db_entry *
2119 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2120 uint16_t chg_id)
2121 {
2122 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2123
2124 if (type >= IWM_PHY_DB_MAX)
2125 return NULL;
2126
2127 switch (type) {
2128 case IWM_PHY_DB_CFG:
2129 return &phy_db->cfg;
2130 case IWM_PHY_DB_CALIB_NCH:
2131 return &phy_db->calib_nch;
2132 case IWM_PHY_DB_CALIB_CHG_PAPD:
2133 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2134 return NULL;
2135 return &phy_db->calib_ch_group_papd[chg_id];
2136 case IWM_PHY_DB_CALIB_CHG_TXP:
2137 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2138 return NULL;
2139 return &phy_db->calib_ch_group_txp[chg_id];
2140 default:
2141 return NULL;
2142 }
2143 return NULL;
2144 }
2145
2146 static int
2147 iwm_phy_db_set_section(struct iwm_softc *sc,
2148 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2149 {
2150 struct iwm_phy_db_entry *entry;
2151 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2152 uint16_t chg_id = 0;
2153
2154 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2155 type == IWM_PHY_DB_CALIB_CHG_TXP)
2156 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2157
2158 entry = iwm_phy_db_get_section(sc, type, chg_id);
2159 if (!entry)
2160 return EINVAL;
2161
2162 if (entry->data)
2163 kmem_intr_free(entry->data, entry->size);
2164 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2165 if (!entry->data) {
2166 entry->size = 0;
2167 return ENOMEM;
2168 }
2169 memcpy(entry->data, phy_db_notif->data, size);
2170 entry->size = size;
2171
2172 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2173 __func__, __LINE__, type, size, entry->data));
2174
2175 return 0;
2176 }
2177
2178 static int
2179 iwm_is_valid_channel(uint16_t ch_id)
2180 {
2181 if (ch_id <= 14 ||
2182 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2183 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2184 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2185 return 1;
2186 return 0;
2187 }
2188
2189 static uint8_t
2190 iwm_ch_id_to_ch_index(uint16_t ch_id)
2191 {
2192 if (!iwm_is_valid_channel(ch_id))
2193 return 0xff;
2194
2195 if (ch_id <= 14)
2196 return ch_id - 1;
2197 if (ch_id <= 64)
2198 return (ch_id + 20) / 4;
2199 if (ch_id <= 140)
2200 return (ch_id - 12) / 4;
2201 return (ch_id - 13) / 4;
2202 }
2203
2204
2205 static uint16_t
2206 iwm_channel_id_to_papd(uint16_t ch_id)
2207 {
2208 if (!iwm_is_valid_channel(ch_id))
2209 return 0xff;
2210
2211 if (1 <= ch_id && ch_id <= 14)
2212 return 0;
2213 if (36 <= ch_id && ch_id <= 64)
2214 return 1;
2215 if (100 <= ch_id && ch_id <= 140)
2216 return 2;
2217 return 3;
2218 }
2219
2220 static uint16_t
2221 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2222 {
2223 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2224 struct iwm_phy_db_chg_txp *txp_chg;
2225 int i;
2226 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2227
2228 if (ch_index == 0xff)
2229 return 0xff;
2230
2231 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2232 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2233 if (!txp_chg)
2234 return 0xff;
2235 /*
2236 * Looking for the first channel group the max channel
2237 * of which is higher than the requested channel.
2238 */
2239 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2240 return i;
2241 }
2242 return 0xff;
2243 }
2244
2245 static int
2246 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2247 uint16_t *size, uint16_t ch_id)
2248 {
2249 struct iwm_phy_db_entry *entry;
2250 uint16_t ch_group_id = 0;
2251
2252 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2253 ch_group_id = iwm_channel_id_to_papd(ch_id);
2254 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2255 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2256
2257 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2258 if (!entry)
2259 return EINVAL;
2260
2261 *data = entry->data;
2262 *size = entry->size;
2263
2264 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2265 __func__, __LINE__, type, *size));
2266
2267 return 0;
2268 }
2269
2270 static int
2271 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2272 void *data)
2273 {
2274 struct iwm_phy_db_cmd phy_db_cmd;
2275 struct iwm_host_cmd cmd = {
2276 .id = IWM_PHY_DB_CMD,
2277 .flags = IWM_CMD_ASYNC,
2278 };
2279
2280 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2281 type, length));
2282
2283 phy_db_cmd.type = le16toh(type);
2284 phy_db_cmd.length = le16toh(length);
2285
2286 cmd.data[0] = &phy_db_cmd;
2287 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2288 cmd.data[1] = data;
2289 cmd.len[1] = length;
2290
2291 return iwm_send_cmd(sc, &cmd);
2292 }
2293
2294 static int
2295 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2296 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2297 {
2298 uint16_t i;
2299 int err;
2300 struct iwm_phy_db_entry *entry;
2301
2302 /* Send all the channel-specific groups to operational fw */
2303 for (i = 0; i < max_ch_groups; i++) {
2304 entry = iwm_phy_db_get_section(sc, type, i);
2305 if (!entry)
2306 return EINVAL;
2307
2308 if (!entry->size)
2309 continue;
2310
2311 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2312 if (err) {
2313 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2314 "err %d\n", DEVNAME(sc), type, i, err));
2315 return err;
2316 }
2317
2318 DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2319 DEVNAME(sc), type, i));
2320
2321 DELAY(1000);
2322 }
2323
2324 return 0;
2325 }
2326
2327 static int
2328 iwm_send_phy_db_data(struct iwm_softc *sc)
2329 {
2330 uint8_t *data = NULL;
2331 uint16_t size = 0;
2332 int err;
2333
2334 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2335 if (err)
2336 return err;
2337
2338 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2339 if (err)
2340 return err;
2341
2342 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2343 &data, &size, 0);
2344 if (err)
2345 return err;
2346
2347 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2348 if (err)
2349 return err;
2350
2351 err = iwm_phy_db_send_all_channel_groups(sc,
2352 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2353 if (err)
2354 return err;
2355
2356 err = iwm_phy_db_send_all_channel_groups(sc,
2357 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2358 if (err)
2359 return err;
2360
2361 return 0;
2362 }
2363
2364 /*
2365 * For the high priority TE use a time event type that has similar priority to
2366 * the FW's action scan priority.
2367 */
2368 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2369 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2370
2371 /* used to convert from time event API v2 to v1 */
2372 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2373 IWM_TE_V2_EVENT_SOCIOPATHIC)
2374 static inline uint16_t
2375 iwm_te_v2_get_notify(uint16_t policy)
2376 {
2377 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2378 }
2379
2380 static inline uint16_t
2381 iwm_te_v2_get_dep_policy(uint16_t policy)
2382 {
2383 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2384 IWM_TE_V2_PLACEMENT_POS;
2385 }
2386
2387 static inline uint16_t
2388 iwm_te_v2_get_absence(uint16_t policy)
2389 {
2390 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2391 }
2392
2393 static void
2394 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2395 struct iwm_time_event_cmd_v1 *cmd_v1)
2396 {
2397 cmd_v1->id_and_color = cmd_v2->id_and_color;
2398 cmd_v1->action = cmd_v2->action;
2399 cmd_v1->id = cmd_v2->id;
2400 cmd_v1->apply_time = cmd_v2->apply_time;
2401 cmd_v1->max_delay = cmd_v2->max_delay;
2402 cmd_v1->depends_on = cmd_v2->depends_on;
2403 cmd_v1->interval = cmd_v2->interval;
2404 cmd_v1->duration = cmd_v2->duration;
2405 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2406 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2407 else
2408 cmd_v1->repeat = htole32(cmd_v2->repeat);
2409 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2410 cmd_v1->interval_reciprocal = 0; /* unused */
2411
2412 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2413 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2414 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2415 }
2416
2417 static int
2418 iwm_send_time_event_cmd(struct iwm_softc *sc,
2419 const struct iwm_time_event_cmd_v2 *cmd)
2420 {
2421 struct iwm_time_event_cmd_v1 cmd_v1;
2422
2423 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2424 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2425 cmd);
2426
2427 iwm_te_v2_to_v1(cmd, &cmd_v1);
2428 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2429 &cmd_v1);
2430 }
2431
2432 static void
2433 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2434 uint32_t duration, uint32_t max_delay)
2435 {
2436 struct iwm_time_event_cmd_v2 time_cmd;
2437
2438 memset(&time_cmd, 0, sizeof(time_cmd));
2439
2440 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2441 time_cmd.id_and_color =
2442 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2443 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2444
2445 time_cmd.apply_time = htole32(0);
2446
2447 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2448 time_cmd.max_delay = htole32(max_delay);
2449 /* TODO: why do we need to interval = bi if it is not periodic? */
2450 time_cmd.interval = htole32(1);
2451 time_cmd.duration = htole32(duration);
2452 time_cmd.repeat = 1;
2453 time_cmd.policy
2454 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2455 IWM_TE_V2_NOTIF_HOST_EVENT_END |
2456 IWM_T2_V2_START_IMMEDIATELY);
2457
2458 iwm_send_time_event_cmd(sc, &time_cmd);
2459 }
2460
2461 /*
2462 * NVM read access and content parsing. We do not support
2463 * external NVM or writing NVM.
2464 */
2465
2466 /* list of NVM sections we are allowed/need to read */
2467 static const int iwm_nvm_to_read[] = {
2468 IWM_NVM_SECTION_TYPE_HW,
2469 IWM_NVM_SECTION_TYPE_SW,
2470 IWM_NVM_SECTION_TYPE_REGULATORY,
2471 IWM_NVM_SECTION_TYPE_CALIBRATION,
2472 IWM_NVM_SECTION_TYPE_PRODUCTION,
2473 IWM_NVM_SECTION_TYPE_HW_8000,
2474 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2475 IWM_NVM_SECTION_TYPE_PHY_SKU,
2476 };
2477
2478 /* Default NVM size to read */
2479 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2480 #define IWM_MAX_NVM_SECTION_SIZE_7000 (16 * 512 * sizeof(uint16_t)) /*16 KB*/
2481 #define IWM_MAX_NVM_SECTION_SIZE_8000 (32 * 512 * sizeof(uint16_t)) /*32 KB*/
2482
2483 #define IWM_NVM_WRITE_OPCODE 1
2484 #define IWM_NVM_READ_OPCODE 0
2485
2486 static int
2487 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2488 uint16_t length, uint8_t *data, uint16_t *len)
2489 {
2490 offset = 0;
2491 struct iwm_nvm_access_cmd nvm_access_cmd = {
2492 .offset = htole16(offset),
2493 .length = htole16(length),
2494 .type = htole16(section),
2495 .op_code = IWM_NVM_READ_OPCODE,
2496 };
2497 struct iwm_nvm_access_resp *nvm_resp;
2498 struct iwm_rx_packet *pkt;
2499 struct iwm_host_cmd cmd = {
2500 .id = IWM_NVM_ACCESS_CMD,
2501 .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2502 .data = { &nvm_access_cmd, },
2503 };
2504 int err, offset_read;
2505 size_t bytes_read;
2506 uint8_t *resp_data;
2507
2508 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2509
2510 err = iwm_send_cmd(sc, &cmd);
2511 if (err) {
2512 DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2513 DEVNAME(sc), err));
2514 return err;
2515 }
2516
2517 pkt = cmd.resp_pkt;
2518 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2519 err = EIO;
2520 goto exit;
2521 }
2522
2523 /* Extract NVM response */
2524 nvm_resp = (void *)pkt->data;
2525
2526 err = le16toh(nvm_resp->status);
2527 bytes_read = le16toh(nvm_resp->length);
2528 offset_read = le16toh(nvm_resp->offset);
2529 resp_data = nvm_resp->data;
2530 if (err) {
2531 err = EINVAL;
2532 goto exit;
2533 }
2534
2535 if (offset_read != offset) {
2536 err = EINVAL;
2537 goto exit;
2538 }
2539 if (bytes_read > length) {
2540 err = EINVAL;
2541 goto exit;
2542 }
2543
2544 memcpy(data + offset, resp_data, bytes_read);
2545 *len = bytes_read;
2546
2547 exit:
2548 iwm_free_resp(sc, &cmd);
2549 return err;
2550 }
2551
2552 /*
2553 * Reads an NVM section completely.
2554 * NICs prior to 7000 family doesn't have a real NVM, but just read
2555 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2556 * by uCode, we need to manually check in this case that we don't
2557 * overflow and try to read more than the EEPROM size.
2558 */
2559 static int
2560 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2561 uint16_t *len, size_t max_len)
2562 {
2563 uint16_t chunklen, seglen;
2564 int err;
2565
2566 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2567 *len = 0;
2568
2569 /* Read NVM chunks until exhausted (reading less than requested) */
2570 while (seglen == chunklen && *len < max_len) {
2571 err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2572 &seglen);
2573 if (err) {
2574 DPRINTF(("%s: Cannot read NVM from section %d "
2575 "offset %d, length %d\n",
2576 DEVNAME(sc), section, *len, chunklen));
2577 return err;
2578 }
2579 *len += seglen;
2580 }
2581
2582 DPRINTFN(4, ("NVM section %d read completed\n", section));
2583 return 0;
2584 }
2585
2586 static uint8_t
2587 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2588 {
2589 uint8_t tx_ant;
2590
2591 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2592 >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2593
2594 if (sc->sc_nvm.valid_tx_ant)
2595 tx_ant &= sc->sc_nvm.valid_tx_ant;
2596
2597 return tx_ant;
2598 }
2599
2600 static uint8_t
2601 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2602 {
2603 uint8_t rx_ant;
2604
2605 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2606 >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2607
2608 if (sc->sc_nvm.valid_rx_ant)
2609 rx_ant &= sc->sc_nvm.valid_rx_ant;
2610
2611 return rx_ant;
2612 }
2613
2614 static void
2615 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2616 const uint8_t *nvm_channels, size_t nchan)
2617 {
2618 struct ieee80211com *ic = &sc->sc_ic;
2619 struct iwm_nvm_data *data = &sc->sc_nvm;
2620 int ch_idx;
2621 struct ieee80211_channel *channel;
2622 uint16_t ch_flags;
2623 int is_5ghz;
2624 int flags, hw_value;
2625
2626 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2627 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2628 aprint_debug_dev(sc->sc_dev,
2629 "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2630 " %cwide %c40MHz %c80MHz %c160MHz\n",
2631 nvm_channels[ch_idx],
2632 ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2633 ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2634 ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2635 ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2636 ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2637 ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2638 ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2639 ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2640 ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2641
2642 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2643 !data->sku_cap_band_52GHz_enable)
2644 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2645
2646 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2647 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2648 nvm_channels[ch_idx], ch_flags,
2649 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2650 continue;
2651 }
2652
2653 hw_value = nvm_channels[ch_idx];
2654 channel = &ic->ic_channels[hw_value];
2655
2656 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2657 if (!is_5ghz) {
2658 flags = IEEE80211_CHAN_2GHZ;
2659 channel->ic_flags
2660 = IEEE80211_CHAN_CCK
2661 | IEEE80211_CHAN_OFDM
2662 | IEEE80211_CHAN_DYN
2663 | IEEE80211_CHAN_2GHZ;
2664 } else {
2665 flags = IEEE80211_CHAN_5GHZ;
2666 channel->ic_flags =
2667 IEEE80211_CHAN_A;
2668 }
2669 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2670
2671 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2672 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2673
2674 #ifndef IEEE80211_NO_HT
2675 if (data->sku_cap_11n_enable)
2676 channel->ic_flags |= IEEE80211_CHAN_HT;
2677 #endif
2678 }
2679 }
2680
2681 #ifndef IEEE80211_NO_HT
2682 static void
2683 iwm_setup_ht_rates(struct iwm_softc *sc)
2684 {
2685 struct ieee80211com *ic = &sc->sc_ic;
2686
2687 /* TX is supported with the same MCS as RX. */
2688 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2689
2690 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2691
2692 #ifdef notyet
2693 if (sc->sc_nvm.sku_cap_mimo_disable)
2694 return;
2695
2696 if (iwm_fw_valid_rx_ant(sc) > 1)
2697 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2698 if (iwm_fw_valid_rx_ant(sc) > 2)
2699 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2700 #endif
2701 }
2702
2703 #define IWM_MAX_RX_BA_SESSIONS 16
2704
2705 static void
2706 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2707 uint16_t ssn, int start)
2708 {
2709 struct ieee80211com *ic = &sc->sc_ic;
2710 struct iwm_add_sta_cmd_v7 cmd;
2711 struct iwm_node *in = (struct iwm_node *)ni;
2712 int err, s;
2713 uint32_t status;
2714
2715 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2716 ieee80211_addba_req_refuse(ic, ni, tid);
2717 return;
2718 }
2719
2720 memset(&cmd, 0, sizeof(cmd));
2721
2722 cmd.sta_id = IWM_STATION_ID;
2723 cmd.mac_id_n_color
2724 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2725 cmd.add_modify = IWM_STA_MODE_MODIFY;
2726
2727 if (start) {
2728 cmd.add_immediate_ba_tid = (uint8_t)tid;
2729 cmd.add_immediate_ba_ssn = ssn;
2730 } else {
2731 cmd.remove_immediate_ba_tid = (uint8_t)tid;
2732 }
2733 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2734 IWM_STA_MODIFY_REMOVE_BA_TID;
2735
2736 status = IWM_ADD_STA_SUCCESS;
2737 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2738 &status);
2739
2740 s = splnet();
2741 if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2742 if (start) {
2743 sc->sc_rx_ba_sessions++;
2744 ieee80211_addba_req_accept(ic, ni, tid);
2745 } else if (sc->sc_rx_ba_sessions > 0)
2746 sc->sc_rx_ba_sessions--;
2747 } else if (start)
2748 ieee80211_addba_req_refuse(ic, ni, tid);
2749 splx(s);
2750 }
2751
2752 static void
2753 iwm_htprot_task(void *arg)
2754 {
2755 struct iwm_softc *sc = arg;
2756 struct ieee80211com *ic = &sc->sc_ic;
2757 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2758 int err;
2759
2760 /* This call updates HT protection based on in->in_ni.ni_htop1. */
2761 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2762 if (err)
2763 aprint_error_dev(sc->sc_dev,
2764 "could not change HT protection: error %d\n", err);
2765 }
2766
2767 /*
2768 * This function is called by upper layer when HT protection settings in
2769 * beacons have changed.
2770 */
2771 static void
2772 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2773 {
2774 struct iwm_softc *sc = ic->ic_softc;
2775
2776 /* assumes that ni == ic->ic_bss */
2777 task_add(systq, &sc->htprot_task);
2778 }
2779
2780 static void
2781 iwm_ba_task(void *arg)
2782 {
2783 struct iwm_softc *sc = arg;
2784 struct ieee80211com *ic = &sc->sc_ic;
2785 struct ieee80211_node *ni = ic->ic_bss;
2786
2787 if (sc->ba_start)
2788 iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2789 else
2790 iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2791 }
2792
2793 /*
2794 * This function is called by upper layer when an ADDBA request is received
2795 * from another STA and before the ADDBA response is sent.
2796 */
2797 static int
2798 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2799 uint8_t tid)
2800 {
2801 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2802 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2803
2804 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2805 return ENOSPC;
2806
2807 sc->ba_start = 1;
2808 sc->ba_tid = tid;
2809 sc->ba_ssn = htole16(ba->ba_winstart);
2810 task_add(systq, &sc->ba_task);
2811
2812 return EBUSY;
2813 }
2814
2815 /*
2816 * This function is called by upper layer on teardown of an HT-immediate
2817 * Block Ack agreement (eg. upon receipt of a DELBA frame).
2818 */
2819 static void
2820 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2821 uint8_t tid)
2822 {
2823 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2824
2825 sc->ba_start = 0;
2826 sc->ba_tid = tid;
2827 task_add(systq, &sc->ba_task);
2828 }
2829 #endif
2830
2831 static void
2832 iwm_free_fw_paging(struct iwm_softc *sc)
2833 {
2834 int i;
2835
2836 if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
2837 return;
2838
2839 for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
2840 iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
2841 }
2842
2843 memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
2844 }
2845
2846 static int
2847 iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2848 {
2849 int sec_idx, idx;
2850 uint32_t offset = 0;
2851
2852 /*
2853 * find where is the paging image start point:
2854 * if CPU2 exist and it's in paging format, then the image looks like:
2855 * CPU1 sections (2 or more)
2856 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
2857 * CPU2 sections (not paged)
2858 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
2859 * non paged to CPU2 paging sec
2860 * CPU2 paging CSS
2861 * CPU2 paging image (including instruction and data)
2862 */
2863 for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
2864 if (fws->fw_sect[sec_idx].fws_devoff ==
2865 IWM_PAGING_SEPARATOR_SECTION) {
2866 sec_idx++;
2867 break;
2868 }
2869 }
2870
2871 /*
2872 * If paging is enabled there should be at least 2 more sections left
2873 * (one for CSS and one for Paging data)
2874 */
2875 if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
2876 aprint_verbose_dev(sc->sc_dev,
2877 "Paging: Missing CSS and/or paging sections\n");
2878 iwm_free_fw_paging(sc);
2879 return EINVAL;
2880 }
2881
2882 /* copy the CSS block to the dram */
2883 DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
2884 sec_idx));
2885
2886 memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
2887 fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
2888
2889 DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
2890 DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
2891
2892 sec_idx++;
2893
2894 /*
2895 * copy the paging blocks to the dram
2896 * loop index start from 1 since that CSS block already copied to dram
2897 * and CSS index is 0.
2898 * loop stop at num_of_paging_blk since that last block is not full.
2899 */
2900 for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
2901 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2902 (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2903 sc->fw_paging_db[idx].fw_paging_size);
2904
2905 DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
2906 DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
2907
2908 offset += sc->fw_paging_db[idx].fw_paging_size;
2909 }
2910
2911 /* copy the last paging block */
2912 if (sc->num_of_pages_in_last_blk > 0) {
2913 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2914 (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2915 IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
2916
2917 DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
2918 DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
2919 }
2920
2921 return 0;
2922 }
2923
2924 static int
2925 iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2926 {
2927 int blk_idx = 0;
2928 int error, num_of_pages;
2929 bus_dmamap_t dmap;
2930
2931 if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
2932 int i;
2933 /* Device got reset, and we setup firmware paging again */
2934 for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
2935 dmap = sc->fw_paging_db[i].fw_paging_block.map;
2936 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2937 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2938 }
2939 return 0;
2940 }
2941
2942 /* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
2943 CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
2944
2945 num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
2946 sc->num_of_paging_blk =
2947 howmany(num_of_pages, IWM_NUM_OF_PAGE_PER_GROUP);
2948 sc->num_of_pages_in_last_blk = num_of_pages -
2949 IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
2950
2951 DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
2952 "each block holds 8 pages, last block holds %d pages\n",
2953 DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
2954
2955 /* allocate block of 4Kbytes for paging CSS */
2956 error = iwm_dma_contig_alloc(sc->sc_dmat,
2957 &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
2958 4096);
2959 if (error) {
2960 /* free all the previous pages since we failed */
2961 iwm_free_fw_paging(sc);
2962 return ENOMEM;
2963 }
2964
2965 sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
2966
2967 DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
2968 DEVNAME(sc)));
2969
2970 /*
2971 * allocate blocks in dram.
2972 * since that CSS allocated in fw_paging_db[0] loop start from index 1
2973 */
2974 for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
2975 /* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
2976 /* XXX Use iwm_dma_contig_alloc for allocating */
2977 error = iwm_dma_contig_alloc(sc->sc_dmat,
2978 &sc->fw_paging_db[blk_idx].fw_paging_block,
2979 IWM_PAGING_BLOCK_SIZE, 4096);
2980 if (error) {
2981 /* free all the previous pages since we failed */
2982 iwm_free_fw_paging(sc);
2983 return ENOMEM;
2984 }
2985
2986 sc->fw_paging_db[blk_idx].fw_paging_size =
2987 IWM_PAGING_BLOCK_SIZE;
2988
2989 DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
2990 "paging.\n", DEVNAME(sc)));
2991 }
2992
2993 return 0;
2994 }
2995
2996 static int
2997 iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2998 {
2999 int err;
3000
3001 err = iwm_alloc_fw_paging_mem(sc, fws);
3002 if (err)
3003 return err;
3004
3005 return iwm_fill_paging_mem(sc, fws);
3006 }
3007
3008 static bool
3009 iwm_has_new_tx_api(struct iwm_softc *sc)
3010 {
3011 /* XXX */
3012 return false;
3013 }
3014
3015 /* send paging cmd to FW in case CPU2 has paging image */
3016 static int
3017 iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3018 {
3019 struct iwm_fw_paging_cmd fw_paging_cmd = {
3020 .flags = htole32(IWM_PAGING_CMD_IS_SECURED |
3021 IWM_PAGING_CMD_IS_ENABLED |
3022 (sc->num_of_pages_in_last_blk <<
3023 IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
3024 .block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
3025 .block_num = htole32(sc->num_of_paging_blk),
3026 };
3027 size_t size = sizeof(fw_paging_cmd);
3028 int blk_idx;
3029 bus_dmamap_t dmap;
3030
3031 if (!iwm_has_new_tx_api(sc))
3032 size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
3033 IWM_NUM_OF_FW_PAGING_BLOCKS;
3034
3035 /* loop for for all paging blocks + CSS block */
3036 for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3037 bus_addr_t dev_phy_addr =
3038 sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
3039 if (iwm_has_new_tx_api(sc)) {
3040 fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
3041 htole64(dev_phy_addr);
3042 } else {
3043 dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
3044 fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
3045 htole32(dev_phy_addr);
3046 }
3047 dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map;
3048 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3049 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3050 }
3051
3052 return iwm_send_cmd_pdu(sc,
3053 iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_ALWAYS_LONG_GROUP, 0),
3054 0, size, &fw_paging_cmd);
3055 }
3056
3057 static void
3058 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3059 const uint16_t *mac_override, const uint16_t *nvm_hw)
3060 {
3061 static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
3062 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3063 };
3064 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
3065 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
3066 };
3067 const uint8_t *hw_addr;
3068
3069 if (mac_override) {
3070 hw_addr = (const uint8_t *)(mac_override +
3071 IWM_MAC_ADDRESS_OVERRIDE_8000);
3072
3073 /*
3074 * Store the MAC address from MAO section.
3075 * No byte swapping is required in MAO section
3076 */
3077 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3078
3079 /*
3080 * Force the use of the OTP MAC address in case of reserved MAC
3081 * address in the NVM, or if address is given but invalid.
3082 */
3083 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3084 (memcmp(etherbroadcastaddr, data->hw_addr,
3085 sizeof(etherbroadcastaddr)) != 0) &&
3086 (memcmp(etheranyaddr, data->hw_addr,
3087 sizeof(etheranyaddr)) != 0) &&
3088 !ETHER_IS_MULTICAST(data->hw_addr))
3089 return;
3090 }
3091
3092 if (nvm_hw) {
3093 /* Read the mac address from WFMP registers. */
3094 uint32_t mac_addr0 =
3095 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3096 uint32_t mac_addr1 =
3097 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3098
3099 hw_addr = (const uint8_t *)&mac_addr0;
3100 data->hw_addr[0] = hw_addr[3];
3101 data->hw_addr[1] = hw_addr[2];
3102 data->hw_addr[2] = hw_addr[1];
3103 data->hw_addr[3] = hw_addr[0];
3104
3105 hw_addr = (const uint8_t *)&mac_addr1;
3106 data->hw_addr[4] = hw_addr[1];
3107 data->hw_addr[5] = hw_addr[0];
3108
3109 return;
3110 }
3111
3112 aprint_error_dev(sc->sc_dev, "mac address not found\n");
3113 memset(data->hw_addr, 0, sizeof(data->hw_addr));
3114 }
3115
3116 static int
3117 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3118 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3119 const uint16_t *mac_override, const uint16_t *phy_sku,
3120 const uint16_t *regulatory)
3121 {
3122 struct iwm_nvm_data *data = &sc->sc_nvm;
3123 uint8_t hw_addr[ETHER_ADDR_LEN];
3124 uint32_t sku;
3125
3126 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3127 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3128 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3129 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3130 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3131 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3132
3133 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3134 sku = le16_to_cpup(nvm_sw + IWM_SKU);
3135 } else {
3136 uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
3137 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3138 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3139 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3140 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3141 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3142 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3143
3144 data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
3145 sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
3146 }
3147
3148 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3149 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3150 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3151 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3152
3153 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3154
3155 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3156 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3157 data->hw_addr[0] = hw_addr[1];
3158 data->hw_addr[1] = hw_addr[0];
3159 data->hw_addr[2] = hw_addr[3];
3160 data->hw_addr[3] = hw_addr[2];
3161 data->hw_addr[4] = hw_addr[5];
3162 data->hw_addr[5] = hw_addr[4];
3163 } else
3164 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3165
3166 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3167 uint16_t lar_offset, lar_config;
3168 lar_offset = data->nvm_version < 0xE39 ?
3169 IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
3170 lar_config = le16_to_cpup(regulatory + lar_offset);
3171 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
3172 }
3173
3174 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
3175 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3176 iwm_nvm_channels, __arraycount(iwm_nvm_channels));
3177 else
3178 iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
3179 iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
3180
3181 data->calib_version = 255; /* TODO:
3182 this value will prevent some checks from
3183 failing, we need to check if this
3184 field is still needed, and if it does,
3185 where is it in the NVM */
3186
3187 return 0;
3188 }
3189
3190 static int
3191 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3192 {
3193 const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3194 const uint16_t *regulatory = NULL;
3195
3196 /* Checking for required sections */
3197 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3198 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3199 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3200 return ENOENT;
3201 }
3202
3203 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3204 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3205 /* SW and REGULATORY sections are mandatory */
3206 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3207 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3208 return ENOENT;
3209 }
3210 /* MAC_OVERRIDE or at least HW section must exist */
3211 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3212 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3213 return ENOENT;
3214 }
3215
3216 /* PHY_SKU section is mandatory in B0 */
3217 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3218 return ENOENT;
3219 }
3220
3221 regulatory = (const uint16_t *)
3222 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3223 hw = (const uint16_t *)
3224 sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3225 mac_override =
3226 (const uint16_t *)
3227 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3228 phy_sku = (const uint16_t *)
3229 sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3230 } else {
3231 panic("unknown device family %d\n", sc->sc_device_family);
3232 }
3233
3234 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3235 calib = (const uint16_t *)
3236 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3237
3238 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3239 phy_sku, regulatory);
3240 }
3241
3242 static int
3243 iwm_nvm_init(struct iwm_softc *sc)
3244 {
3245 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3246 int i, section, err;
3247 uint16_t len;
3248 uint8_t *buf;
3249 const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
3250 IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
3251
3252 /* Read From FW NVM */
3253 DPRINTF(("Read NVM\n"));
3254
3255 memset(nvm_sections, 0, sizeof(nvm_sections));
3256
3257 buf = kmem_alloc(bufsz, KM_SLEEP);
3258
3259 for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
3260 section = iwm_nvm_to_read[i];
3261 KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3262
3263 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3264 if (err) {
3265 err = 0;
3266 continue;
3267 }
3268 nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3269 memcpy(nvm_sections[section].data, buf, len);
3270 nvm_sections[section].length = len;
3271 }
3272 kmem_free(buf, bufsz);
3273 if (err == 0)
3274 err = iwm_parse_nvm_sections(sc, nvm_sections);
3275
3276 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3277 if (nvm_sections[i].data != NULL)
3278 kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3279 }
3280
3281 return err;
3282 }
3283
3284 static int
3285 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3286 const uint8_t *section, uint32_t byte_cnt)
3287 {
3288 int err = EINVAL;
3289 uint32_t chunk_sz, offset;
3290
3291 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3292
3293 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3294 uint32_t addr, len;
3295 const uint8_t *data;
3296 bool is_extended = false;
3297
3298 addr = dst_addr + offset;
3299 len = MIN(chunk_sz, byte_cnt - offset);
3300 data = section + offset;
3301
3302 if (addr >= IWM_FW_MEM_EXTENDED_START &&
3303 addr <= IWM_FW_MEM_EXTENDED_END)
3304 is_extended = true;
3305
3306 if (is_extended)
3307 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3308 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3309
3310 err = iwm_firmware_load_chunk(sc, addr, data, len);
3311
3312 if (is_extended)
3313 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3314 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3315
3316 if (err)
3317 break;
3318 }
3319
3320 return err;
3321 }
3322
3323 static int
3324 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3325 const uint8_t *section, uint32_t byte_cnt)
3326 {
3327 struct iwm_dma_info *dma = &sc->fw_dma;
3328 int err;
3329
3330 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
3331 memcpy(dma->vaddr, section, byte_cnt);
3332 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3333 BUS_DMASYNC_PREWRITE);
3334
3335 sc->sc_fw_chunk_done = 0;
3336
3337 if (!iwm_nic_lock(sc))
3338 return EBUSY;
3339
3340 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3341 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3342 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3343 dst_addr);
3344 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3345 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3346 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3347 (iwm_get_dma_hi_addr(dma->paddr)
3348 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3349 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3350 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3351 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3352 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3353 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3354 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3355 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3356 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3357
3358 iwm_nic_unlock(sc);
3359
3360 /* Wait for this segment to load. */
3361 err = 0;
3362 while (!sc->sc_fw_chunk_done) {
3363 err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3364 if (err)
3365 break;
3366 }
3367 if (!sc->sc_fw_chunk_done) {
3368 DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3369 DEVNAME(sc), dst_addr, byte_cnt));
3370 }
3371
3372 return err;
3373 }
3374
3375 static int
3376 iwm_load_cpu_sections_7000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3377 int cpu, int *first_ucode_section)
3378 {
3379 int i, err = 0;
3380 uint32_t last_read_idx = 0;
3381 void *data;
3382 uint32_t dlen;
3383 uint32_t offset;
3384
3385 if (cpu == 1) {
3386 *first_ucode_section = 0;
3387 } else {
3388 (*first_ucode_section)++;
3389 }
3390
3391 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3392 last_read_idx = i;
3393 data = fws->fw_sect[i].fws_data;
3394 dlen = fws->fw_sect[i].fws_len;
3395 offset = fws->fw_sect[i].fws_devoff;
3396
3397 /*
3398 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3399 * CPU1 to CPU2.
3400 * PAGING_SEPARATOR_SECTION delimiter - separate between
3401 * CPU2 non paged to CPU2 paging sec.
3402 */
3403 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3404 offset == IWM_PAGING_SEPARATOR_SECTION)
3405 break;
3406
3407 if (dlen > sc->sc_fwdmasegsz) {
3408 err = EFBIG;
3409 } else
3410 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3411 if (err) {
3412 DPRINTF(("%s: could not load firmware chunk %d "
3413 "(error %d)\n", DEVNAME(sc), i, err));
3414 return err;
3415 }
3416 }
3417
3418 *first_ucode_section = last_read_idx;
3419
3420 return 0;
3421 }
3422
3423 static int
3424 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3425 {
3426 struct iwm_fw_sects *fws;
3427 int err = 0;
3428 int first_ucode_section;
3429
3430 fws = &sc->sc_fw.fw_sects[ucode_type];
3431
3432 DPRINTF(("%s: working with %s CPU\n", DEVNAME(sc),
3433 fws->is_dual_cpus ? "dual" : "single"));
3434
3435 /* load to FW the binary Secured sections of CPU1 */
3436 err = iwm_load_cpu_sections_7000(sc, fws, 1, &first_ucode_section);
3437 if (err)
3438 return err;
3439
3440 if (fws->is_dual_cpus) {
3441 /* set CPU2 header address */
3442 if (iwm_nic_lock(sc)) {
3443 iwm_write_prph(sc,
3444 IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
3445 IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
3446 iwm_nic_unlock(sc);
3447 }
3448
3449 /* load to FW the binary sections of CPU2 */
3450 err = iwm_load_cpu_sections_7000(sc, fws, 2,
3451 &first_ucode_section);
3452 if (err)
3453 return err;
3454 }
3455
3456 /* release CPU reset */
3457 IWM_WRITE(sc, IWM_CSR_RESET, 0);
3458
3459 return 0;
3460 }
3461
3462 static int
3463 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3464 int cpu, int *first_ucode_section)
3465 {
3466 int shift_param;
3467 int i, err = 0, sec_num = 0x1;
3468 uint32_t val, last_read_idx = 0;
3469 void *data;
3470 uint32_t dlen;
3471 uint32_t offset;
3472
3473 if (cpu == 1) {
3474 shift_param = 0;
3475 *first_ucode_section = 0;
3476 } else {
3477 shift_param = 16;
3478 (*first_ucode_section)++;
3479 }
3480
3481 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3482 last_read_idx = i;
3483 data = fws->fw_sect[i].fws_data;
3484 dlen = fws->fw_sect[i].fws_len;
3485 offset = fws->fw_sect[i].fws_devoff;
3486
3487 /*
3488 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3489 * CPU1 to CPU2.
3490 * PAGING_SEPARATOR_SECTION delimiter - separate between
3491 * CPU2 non paged to CPU2 paging sec.
3492 */
3493 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3494 offset == IWM_PAGING_SEPARATOR_SECTION)
3495 break;
3496
3497 if (dlen > sc->sc_fwdmasegsz) {
3498 err = EFBIG;
3499 } else
3500 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3501 if (err) {
3502 DPRINTF(("%s: could not load firmware chunk %d "
3503 "(error %d)\n", DEVNAME(sc), i, err));
3504 return err;
3505 }
3506
3507 /* Notify the ucode of the loaded section number and status */
3508 if (iwm_nic_lock(sc)) {
3509 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3510 val = val | (sec_num << shift_param);
3511 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3512 sec_num = (sec_num << 1) | 0x1;
3513 iwm_nic_unlock(sc);
3514
3515 /*
3516 * The firmware won't load correctly without this delay.
3517 */
3518 DELAY(8000);
3519 }
3520 }
3521
3522 *first_ucode_section = last_read_idx;
3523
3524 if (iwm_nic_lock(sc)) {
3525 if (cpu == 1)
3526 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3527 else
3528 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3529 iwm_nic_unlock(sc);
3530 }
3531
3532 return 0;
3533 }
3534
3535 static int
3536 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3537 {
3538 struct iwm_fw_sects *fws;
3539 int err = 0;
3540 int first_ucode_section;
3541
3542 fws = &sc->sc_fw.fw_sects[ucode_type];
3543
3544 /* configure the ucode to be ready to get the secured image */
3545 /* release CPU reset */
3546 if (iwm_nic_lock(sc)) {
3547 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3548 IWM_RELEASE_CPU_RESET_BIT);
3549 iwm_nic_unlock(sc);
3550 }
3551
3552 /* load to FW the binary Secured sections of CPU1 */
3553 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3554 if (err)
3555 return err;
3556
3557 /* load to FW the binary sections of CPU2 */
3558 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3559 }
3560
3561 static int
3562 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3563 {
3564 int err, w;
3565
3566 sc->sc_uc.uc_intr = 0;
3567
3568 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3569 err = iwm_load_firmware_8000(sc, ucode_type);
3570 else
3571 err = iwm_load_firmware_7000(sc, ucode_type);
3572 if (err)
3573 return err;
3574
3575 /* wait for the firmware to load */
3576 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3577 err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3578 if (err || !sc->sc_uc.uc_ok) {
3579 aprint_error_dev(sc->sc_dev,
3580 "could not load firmware (error %d, ok %d)\n",
3581 err, sc->sc_uc.uc_ok);
3582 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3583 aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3584 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3585 aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3586 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3587 }
3588 }
3589
3590 return err;
3591 }
3592
3593 static int
3594 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3595 {
3596 int err;
3597
3598 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3599
3600 err = iwm_nic_init(sc);
3601 if (err) {
3602 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3603 return err;
3604 }
3605
3606 /* make sure rfkill handshake bits are cleared */
3607 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3608 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3609 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3610
3611 /* clear (again), then enable host interrupts */
3612 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3613 iwm_enable_interrupts(sc);
3614
3615 /* really make sure rfkill handshake bits are cleared */
3616 /* maybe we should write a few times more? just to make sure */
3617 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3618 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3619
3620 return iwm_load_firmware(sc, ucode_type);
3621 }
3622
3623 static int
3624 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3625 {
3626 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3627 .valid = htole32(valid_tx_ant),
3628 };
3629
3630 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3631 sizeof(tx_ant_cmd), &tx_ant_cmd);
3632 }
3633
3634 static int
3635 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3636 {
3637 struct iwm_phy_cfg_cmd phy_cfg_cmd;
3638 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3639
3640 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3641 phy_cfg_cmd.calib_control.event_trigger =
3642 sc->sc_default_calib[ucode_type].event_trigger;
3643 phy_cfg_cmd.calib_control.flow_trigger =
3644 sc->sc_default_calib[ucode_type].flow_trigger;
3645
3646 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3647 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3648 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3649 }
3650
3651 static int
3652 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3653 {
3654 struct iwm_fw_sects *fws;
3655 enum iwm_ucode_type old_type = sc->sc_uc_current;
3656 int err;
3657
3658 err = iwm_read_firmware(sc, ucode_type);
3659 if (err)
3660 return err;
3661
3662 sc->sc_uc_current = ucode_type;
3663 err = iwm_start_fw(sc, ucode_type);
3664 if (err) {
3665 sc->sc_uc_current = old_type;
3666 return err;
3667 }
3668
3669 err = iwm_post_alive(sc);
3670 if (err)
3671 return err;
3672
3673 fws = &sc->sc_fw.fw_sects[ucode_type];
3674 if (fws->paging_mem_size) {
3675 err = iwm_save_fw_paging(sc, fws);
3676 if (err)
3677 return err;
3678
3679 err = iwm_send_paging_cmd(sc, fws);
3680 if (err) {
3681 iwm_free_fw_paging(sc);
3682 return err;
3683 }
3684 }
3685
3686 return 0;
3687 }
3688
3689 static int
3690 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3691 {
3692 int err;
3693
3694 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3695 aprint_error_dev(sc->sc_dev,
3696 "radio is disabled by hardware switch\n");
3697 return EPERM;
3698 }
3699
3700 sc->sc_init_complete = 0;
3701 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3702 if (err) {
3703 DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3704 return err;
3705 }
3706
3707 if (justnvm) {
3708 err = iwm_nvm_init(sc);
3709 if (err) {
3710 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3711 return err;
3712 }
3713
3714 memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3715 ETHER_ADDR_LEN);
3716 return 0;
3717 }
3718
3719 err = iwm_send_bt_init_conf(sc);
3720 if (err)
3721 return err;
3722
3723 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3724 if (err)
3725 return err;
3726
3727 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3728 if (err)
3729 return err;
3730
3731 /*
3732 * Send phy configurations command to init uCode
3733 * to start the 16.0 uCode init image internal calibrations.
3734 */
3735 err = iwm_send_phy_cfg_cmd(sc);
3736 if (err)
3737 return err;
3738
3739 /*
3740 * Nothing to do but wait for the init complete notification
3741 * from the firmware
3742 */
3743 while (!sc->sc_init_complete) {
3744 err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3745 if (err)
3746 break;
3747 }
3748
3749 return err;
3750 }
3751
3752 static int
3753 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3754 {
3755 struct iwm_rx_ring *ring = &sc->rxq;
3756 struct iwm_rx_data *data = &ring->data[idx];
3757 struct mbuf *m;
3758 int err;
3759 int fatal = 0;
3760
3761 m = m_gethdr(M_DONTWAIT, MT_DATA);
3762 if (m == NULL)
3763 return ENOBUFS;
3764
3765 if (size <= MCLBYTES) {
3766 MCLGET(m, M_DONTWAIT);
3767 } else {
3768 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3769 }
3770 if ((m->m_flags & M_EXT) == 0) {
3771 m_freem(m);
3772 return ENOBUFS;
3773 }
3774
3775 if (data->m != NULL) {
3776 bus_dmamap_unload(sc->sc_dmat, data->map);
3777 fatal = 1;
3778 }
3779
3780 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3781 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3782 BUS_DMA_READ|BUS_DMA_NOWAIT);
3783 if (err) {
3784 /* XXX */
3785 if (fatal)
3786 panic("iwm: could not load RX mbuf");
3787 m_freem(m);
3788 return err;
3789 }
3790 data->m = m;
3791 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3792
3793 /* Update RX descriptor. */
3794 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3795 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3796 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3797
3798 return 0;
3799 }
3800
3801 #define IWM_RSSI_OFFSET 50
3802 static int
3803 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3804 {
3805 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3806 uint32_t agc_a, agc_b;
3807 uint32_t val;
3808
3809 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3810 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3811 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3812
3813 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3814 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3815 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3816
3817 /*
3818 * dBm = rssi dB - agc dB - constant.
3819 * Higher AGC (higher radio gain) means lower signal.
3820 */
3821 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3822 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3823 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3824
3825 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3826 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3827
3828 return max_rssi_dbm;
3829 }
3830
3831 /*
3832 * RSSI values are reported by the FW as positive values - need to negate
3833 * to obtain their dBM. Account for missing antennas by replacing 0
3834 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3835 */
3836 static int
3837 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3838 {
3839 int energy_a, energy_b, energy_c, max_energy;
3840 uint32_t val;
3841
3842 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3843 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3844 IWM_RX_INFO_ENERGY_ANT_A_POS;
3845 energy_a = energy_a ? -energy_a : -256;
3846 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3847 IWM_RX_INFO_ENERGY_ANT_B_POS;
3848 energy_b = energy_b ? -energy_b : -256;
3849 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3850 IWM_RX_INFO_ENERGY_ANT_C_POS;
3851 energy_c = energy_c ? -energy_c : -256;
3852 max_energy = MAX(energy_a, energy_b);
3853 max_energy = MAX(max_energy, energy_c);
3854
3855 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3856 energy_a, energy_b, energy_c, max_energy));
3857
3858 return max_energy;
3859 }
3860
3861 static void
3862 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3863 struct iwm_rx_data *data)
3864 {
3865 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3866
3867 DPRINTFN(20, ("received PHY stats\n"));
3868 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3869 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3870
3871 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3872 }
3873
3874 /*
3875 * Retrieve the average noise (in dBm) among receivers.
3876 */
3877 static int
3878 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3879 {
3880 int i, total, nbant, noise;
3881
3882 total = nbant = noise = 0;
3883 for (i = 0; i < 3; i++) {
3884 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3885 if (noise) {
3886 total += noise;
3887 nbant++;
3888 }
3889 }
3890
3891 /* There should be at least one antenna but check anyway. */
3892 return (nbant == 0) ? -127 : (total / nbant) - 107;
3893 }
3894
3895 static void
3896 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3897 struct iwm_rx_data *data)
3898 {
3899 struct ieee80211com *ic = &sc->sc_ic;
3900 struct ieee80211_frame *wh;
3901 struct ieee80211_node *ni;
3902 struct ieee80211_channel *c = NULL;
3903 struct mbuf *m;
3904 struct iwm_rx_phy_info *phy_info;
3905 struct iwm_rx_mpdu_res_start *rx_res;
3906 int device_timestamp;
3907 uint32_t len;
3908 uint32_t rx_pkt_status;
3909 int rssi;
3910 int s;
3911
3912 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3913 BUS_DMASYNC_POSTREAD);
3914
3915 phy_info = &sc->sc_last_phy_info;
3916 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3917 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3918 len = le16toh(rx_res->byte_count);
3919 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3920 sizeof(*rx_res) + len));
3921
3922 m = data->m;
3923 m->m_data = pkt->data + sizeof(*rx_res);
3924 m->m_pkthdr.len = m->m_len = len;
3925
3926 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3927 DPRINTF(("dsp size out of range [0,20]: %d\n",
3928 phy_info->cfg_phy_cnt));
3929 return;
3930 }
3931
3932 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3933 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3934 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3935 return; /* drop */
3936 }
3937
3938 device_timestamp = le32toh(phy_info->system_timestamp);
3939
3940 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3941 rssi = iwm_get_signal_strength(sc, phy_info);
3942 } else {
3943 rssi = iwm_calc_rssi(sc, phy_info);
3944 }
3945 rssi = -rssi;
3946
3947 if (ic->ic_state == IEEE80211_S_SCAN)
3948 iwm_fix_channel(sc, m);
3949
3950 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3951 return;
3952
3953 m_set_rcvif(m, IC2IFP(ic));
3954
3955 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3956 c = &ic->ic_channels[le32toh(phy_info->channel)];
3957
3958 s = splnet();
3959
3960 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3961 if (c)
3962 ni->ni_chan = c;
3963
3964 if (__predict_false(sc->sc_drvbpf != NULL)) {
3965 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3966
3967 tap->wr_flags = 0;
3968 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3969 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3970 tap->wr_chan_freq =
3971 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3972 tap->wr_chan_flags =
3973 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3974 tap->wr_dbm_antsignal = (int8_t)rssi;
3975 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3976 tap->wr_tsft = phy_info->system_timestamp;
3977 if (phy_info->phy_flags &
3978 htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3979 uint8_t mcs = (phy_info->rate_n_flags &
3980 htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3981 IWM_RATE_HT_MCS_NSS_MSK));
3982 tap->wr_rate = (0x80 | mcs);
3983 } else {
3984 uint8_t rate = (phy_info->rate_n_flags &
3985 htole32(IWM_RATE_LEGACY_RATE_MSK));
3986 switch (rate) {
3987 /* CCK rates. */
3988 case 10: tap->wr_rate = 2; break;
3989 case 20: tap->wr_rate = 4; break;
3990 case 55: tap->wr_rate = 11; break;
3991 case 110: tap->wr_rate = 22; break;
3992 /* OFDM rates. */
3993 case 0xd: tap->wr_rate = 12; break;
3994 case 0xf: tap->wr_rate = 18; break;
3995 case 0x5: tap->wr_rate = 24; break;
3996 case 0x7: tap->wr_rate = 36; break;
3997 case 0x9: tap->wr_rate = 48; break;
3998 case 0xb: tap->wr_rate = 72; break;
3999 case 0x1: tap->wr_rate = 96; break;
4000 case 0x3: tap->wr_rate = 108; break;
4001 /* Unknown rate: should not happen. */
4002 default: tap->wr_rate = 0;
4003 }
4004 }
4005
4006 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
4007 }
4008 ieee80211_input(ic, m, ni, rssi, device_timestamp);
4009 ieee80211_free_node(ni);
4010
4011 splx(s);
4012 }
4013
4014 static void
4015 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4016 struct iwm_node *in)
4017 {
4018 struct ieee80211com *ic = &sc->sc_ic;
4019 struct ifnet *ifp = IC2IFP(ic);
4020 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4021 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4022 int failack = tx_resp->failure_frame;
4023
4024 KASSERT(tx_resp->frame_count == 1);
4025
4026 /* Update rate control statistics. */
4027 in->in_amn.amn_txcnt++;
4028 if (failack > 0) {
4029 in->in_amn.amn_retrycnt++;
4030 }
4031
4032 if (status != IWM_TX_STATUS_SUCCESS &&
4033 status != IWM_TX_STATUS_DIRECT_DONE)
4034 ifp->if_oerrors++;
4035 else
4036 ifp->if_opackets++;
4037 }
4038
4039 static void
4040 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4041 struct iwm_rx_data *data)
4042 {
4043 struct ieee80211com *ic = &sc->sc_ic;
4044 struct ifnet *ifp = IC2IFP(ic);
4045 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4046 int idx = cmd_hdr->idx;
4047 int qid = cmd_hdr->qid;
4048 struct iwm_tx_ring *ring = &sc->txq[qid];
4049 struct iwm_tx_data *txd = &ring->data[idx];
4050 struct iwm_node *in = txd->in;
4051 int s;
4052
4053 s = splnet();
4054
4055 if (txd->done) {
4056 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
4057 DEVNAME(sc)));
4058 splx(s);
4059 return;
4060 }
4061
4062 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4063 BUS_DMASYNC_POSTREAD);
4064
4065 sc->sc_tx_timer = 0;
4066
4067 iwm_rx_tx_cmd_single(sc, pkt, in);
4068
4069 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4070 BUS_DMASYNC_POSTWRITE);
4071 bus_dmamap_unload(sc->sc_dmat, txd->map);
4072 m_freem(txd->m);
4073
4074 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
4075 KASSERT(txd->done == 0);
4076 txd->done = 1;
4077 KASSERT(txd->in);
4078
4079 txd->m = NULL;
4080 txd->in = NULL;
4081 ieee80211_free_node(&in->in_ni);
4082
4083 if (--ring->queued < IWM_TX_RING_LOMARK) {
4084 sc->qfullmsk &= ~(1 << qid);
4085 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
4086 ifp->if_flags &= ~IFF_OACTIVE;
4087 KASSERT(KERNEL_LOCKED_P());
4088 iwm_start(ifp);
4089 }
4090 }
4091
4092 splx(s);
4093 }
4094
4095 static int
4096 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4097 {
4098 struct iwm_binding_cmd cmd;
4099 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4100 int i, err;
4101 uint32_t status;
4102
4103 memset(&cmd, 0, sizeof(cmd));
4104
4105 cmd.id_and_color
4106 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4107 cmd.action = htole32(action);
4108 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4109
4110 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4111 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4112 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4113
4114 status = 0;
4115 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4116 sizeof(cmd), &cmd, &status);
4117 if (err == 0 && status != 0)
4118 err = EIO;
4119
4120 return err;
4121 }
4122
4123 static void
4124 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4125 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4126 {
4127 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4128
4129 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4130 ctxt->color));
4131 cmd->action = htole32(action);
4132 cmd->apply_time = htole32(apply_time);
4133 }
4134
4135 static void
4136 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4137 struct ieee80211_channel *chan, uint8_t chains_static,
4138 uint8_t chains_dynamic)
4139 {
4140 struct ieee80211com *ic = &sc->sc_ic;
4141 uint8_t active_cnt, idle_cnt;
4142
4143 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4144 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4145
4146 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4147 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4148 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4149
4150 /* Set rx the chains */
4151 idle_cnt = chains_static;
4152 active_cnt = chains_dynamic;
4153
4154 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4155 IWM_PHY_RX_CHAIN_VALID_POS);
4156 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4157 cmd->rxchain_info |= htole32(active_cnt <<
4158 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4159
4160 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4161 }
4162
4163 static int
4164 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4165 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4166 uint32_t apply_time)
4167 {
4168 struct iwm_phy_context_cmd cmd;
4169
4170 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4171
4172 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4173 chains_static, chains_dynamic);
4174
4175 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4176 sizeof(struct iwm_phy_context_cmd), &cmd);
4177 }
4178
4179 static int
4180 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4181 {
4182 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4183 struct iwm_tfd *desc;
4184 struct iwm_tx_data *txdata;
4185 struct iwm_device_cmd *cmd;
4186 struct mbuf *m;
4187 bus_addr_t paddr;
4188 uint32_t addr_lo;
4189 int err = 0, i, paylen, off, s;
4190 int code;
4191 int async, wantresp;
4192 int group_id;
4193 size_t hdrlen, datasz;
4194 uint8_t *data;
4195
4196 code = hcmd->id;
4197 async = hcmd->flags & IWM_CMD_ASYNC;
4198 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
4199
4200 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
4201 paylen += hcmd->len[i];
4202 }
4203
4204 /* if the command wants an answer, busy sc_cmd_resp */
4205 if (wantresp) {
4206 KASSERT(!async);
4207 while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
4208 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
4209 sc->sc_wantresp = ring->qid << 16 | ring->cur;
4210 }
4211
4212 /*
4213 * Is the hardware still available? (after e.g. above wait).
4214 */
4215 s = splnet();
4216 if (sc->sc_flags & IWM_FLAG_STOPPED) {
4217 err = ENXIO;
4218 goto out;
4219 }
4220
4221 desc = &ring->desc[ring->cur];
4222 txdata = &ring->data[ring->cur];
4223
4224 group_id = iwm_cmd_groupid(code);
4225 if (group_id != 0) {
4226 hdrlen = sizeof(cmd->hdr_wide);
4227 datasz = sizeof(cmd->data_wide);
4228 } else {
4229 hdrlen = sizeof(cmd->hdr);
4230 datasz = sizeof(cmd->data);
4231 }
4232
4233 if (paylen > datasz) {
4234 /* Command is too large to fit in pre-allocated space. */
4235 size_t totlen = hdrlen + paylen;
4236 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4237 aprint_error_dev(sc->sc_dev,
4238 "firmware command too long (%zd bytes)\n", totlen);
4239 err = EINVAL;
4240 goto out;
4241 }
4242 m = m_gethdr(M_DONTWAIT, MT_DATA);
4243 if (m == NULL) {
4244 err = ENOMEM;
4245 goto out;
4246 }
4247 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
4248 if (!(m->m_flags & M_EXT)) {
4249 aprint_error_dev(sc->sc_dev,
4250 "could not get fw cmd mbuf (%zd bytes)\n", totlen);
4251 m_freem(m);
4252 err = ENOMEM;
4253 goto out;
4254 }
4255 cmd = mtod(m, struct iwm_device_cmd *);
4256 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4257 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4258 if (err) {
4259 aprint_error_dev(sc->sc_dev,
4260 "could not load fw cmd mbuf (%zd bytes)\n", totlen);
4261 m_freem(m);
4262 goto out;
4263 }
4264 txdata->m = m;
4265 paddr = txdata->map->dm_segs[0].ds_addr;
4266 } else {
4267 cmd = &ring->cmd[ring->cur];
4268 paddr = txdata->cmd_paddr;
4269 }
4270
4271 if (group_id != 0) {
4272 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4273 cmd->hdr_wide.group_id = group_id;
4274 cmd->hdr_wide.qid = ring->qid;
4275 cmd->hdr_wide.idx = ring->cur;
4276 cmd->hdr_wide.length = htole16(paylen);
4277 cmd->hdr_wide.version = iwm_cmd_version(code);
4278 data = cmd->data_wide;
4279 } else {
4280 cmd->hdr.code = code;
4281 cmd->hdr.flags = 0;
4282 cmd->hdr.qid = ring->qid;
4283 cmd->hdr.idx = ring->cur;
4284 data = cmd->data;
4285 }
4286
4287 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
4288 if (hcmd->len[i] == 0)
4289 continue;
4290 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4291 off += hcmd->len[i];
4292 }
4293 KASSERT(off == paylen);
4294
4295 /* lo field is not aligned */
4296 addr_lo = htole32((uint32_t)paddr);
4297 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4298 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
4299 | ((hdrlen + paylen) << 4));
4300 desc->num_tbs = 1;
4301
4302 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
4303 code, hdrlen + paylen, async ? " (async)" : ""));
4304
4305 if (paylen > datasz) {
4306 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
4307 BUS_DMASYNC_PREWRITE);
4308 } else {
4309 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4310 (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
4311 BUS_DMASYNC_PREWRITE);
4312 }
4313 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4314 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4315 BUS_DMASYNC_PREWRITE);
4316
4317 err = iwm_set_cmd_in_flight(sc);
4318 if (err)
4319 goto out;
4320 ring->queued++;
4321
4322 #if 0
4323 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4324 #endif
4325 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
4326 code, ring->qid, ring->cur));
4327
4328 /* Kick command ring. */
4329 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4330 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4331
4332 if (!async) {
4333 int generation = sc->sc_generation;
4334 err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4335 if (err == 0) {
4336 /* if hardware is no longer up, return error */
4337 if (generation != sc->sc_generation) {
4338 err = ENXIO;
4339 } else {
4340 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4341 }
4342 }
4343 }
4344 out:
4345 if (wantresp && err) {
4346 iwm_free_resp(sc, hcmd);
4347 }
4348 splx(s);
4349
4350 return err;
4351 }
4352
4353 static int
4354 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4355 uint16_t len, const void *data)
4356 {
4357 struct iwm_host_cmd cmd = {
4358 .id = id,
4359 .len = { len, },
4360 .data = { data, },
4361 .flags = flags,
4362 };
4363
4364 return iwm_send_cmd(sc, &cmd);
4365 }
4366
4367 static int
4368 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4369 uint32_t *status)
4370 {
4371 struct iwm_rx_packet *pkt;
4372 struct iwm_cmd_response *resp;
4373 int err, resp_len;
4374
4375 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4376 cmd->flags |= IWM_CMD_WANT_SKB;
4377
4378 err = iwm_send_cmd(sc, cmd);
4379 if (err)
4380 return err;
4381 pkt = cmd->resp_pkt;
4382
4383 /* Can happen if RFKILL is asserted */
4384 if (!pkt) {
4385 err = 0;
4386 goto out_free_resp;
4387 }
4388
4389 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4390 err = EIO;
4391 goto out_free_resp;
4392 }
4393
4394 resp_len = iwm_rx_packet_payload_len(pkt);
4395 if (resp_len != sizeof(*resp)) {
4396 err = EIO;
4397 goto out_free_resp;
4398 }
4399
4400 resp = (void *)pkt->data;
4401 *status = le32toh(resp->status);
4402 out_free_resp:
4403 iwm_free_resp(sc, cmd);
4404 return err;
4405 }
4406
4407 static int
4408 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4409 const void *data, uint32_t *status)
4410 {
4411 struct iwm_host_cmd cmd = {
4412 .id = id,
4413 .len = { len, },
4414 .data = { data, },
4415 };
4416
4417 return iwm_send_cmd_status(sc, &cmd, status);
4418 }
4419
4420 static void
4421 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4422 {
4423 KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4424 KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4425 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4426 wakeup(&sc->sc_wantresp);
4427 }
4428
4429 static void
4430 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4431 {
4432 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4433 struct iwm_tx_data *data;
4434 int s;
4435
4436 if (qid != IWM_CMD_QUEUE) {
4437 return; /* Not a command ack. */
4438 }
4439
4440 s = splnet();
4441
4442 data = &ring->data[idx];
4443
4444 if (data->m != NULL) {
4445 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4446 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4447 bus_dmamap_unload(sc->sc_dmat, data->map);
4448 m_freem(data->m);
4449 data->m = NULL;
4450 }
4451 wakeup(&ring->desc[idx]);
4452
4453 if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4454 aprint_error_dev(sc->sc_dev,
4455 "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4456 idx, ring->queued, ring->cur);
4457 }
4458
4459 KASSERT(ring->queued > 0);
4460 if (--ring->queued == 0)
4461 iwm_clear_cmd_in_flight(sc);
4462
4463 splx(s);
4464 }
4465
4466 #if 0
4467 /*
4468 * necessary only for block ack mode
4469 */
4470 void
4471 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4472 uint16_t len)
4473 {
4474 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4475 uint16_t w_val;
4476
4477 scd_bc_tbl = sc->sched_dma.vaddr;
4478
4479 len += 8; /* magic numbers came naturally from paris */
4480 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4481 len = roundup(len, 4) / 4;
4482
4483 w_val = htole16(sta_id << 12 | len);
4484
4485 /* Update TX scheduler. */
4486 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4487 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4488 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4489 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4490
4491 /* I really wonder what this is ?!? */
4492 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4493 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4494 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4495 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4496 (char *)(void *)sc->sched_dma.vaddr,
4497 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4498 }
4499 }
4500 #endif
4501
4502 /*
4503 * Fill in various bit for management frames, and leave them
4504 * unfilled for data frames (firmware takes care of that).
4505 * Return the selected TX rate.
4506 */
4507 static const struct iwm_rate *
4508 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4509 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4510 {
4511 struct ieee80211com *ic = &sc->sc_ic;
4512 struct ieee80211_node *ni = &in->in_ni;
4513 const struct iwm_rate *rinfo;
4514 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4515 int ridx, rate_flags, i, ind;
4516 int nrates = ni->ni_rates.rs_nrates;
4517
4518 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4519 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4520
4521 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4522 type != IEEE80211_FC0_TYPE_DATA) {
4523 /* for non-data, use the lowest supported rate */
4524 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4525 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4526 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4527 #ifndef IEEE80211_NO_HT
4528 } else if (ic->ic_fixed_mcs != -1) {
4529 ridx = sc->sc_fixed_ridx;
4530 #endif
4531 } else if (ic->ic_fixed_rate != -1) {
4532 ridx = sc->sc_fixed_ridx;
4533 } else {
4534 /* for data frames, use RS table */
4535 tx->initial_rate_index = 0;
4536 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4537 DPRINTFN(12, ("start with txrate %d\n",
4538 tx->initial_rate_index));
4539 #ifndef IEEE80211_NO_HT
4540 if (ni->ni_flags & IEEE80211_NODE_HT) {
4541 ridx = iwm_mcs2ridx[ni->ni_txmcs];
4542 return &iwm_rates[ridx];
4543 }
4544 #endif
4545 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4546 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4547 for (i = 0; i < nrates; i++) {
4548 if (iwm_rates[i].rate == (ni->ni_txrate &
4549 IEEE80211_RATE_VAL)) {
4550 ridx = i;
4551 break;
4552 }
4553 }
4554 return &iwm_rates[ridx];
4555 }
4556
4557 rinfo = &iwm_rates[ridx];
4558 for (i = 0, ind = sc->sc_mgmt_last_antenna;
4559 i < IWM_RATE_MCS_ANT_NUM; i++) {
4560 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4561 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4562 sc->sc_mgmt_last_antenna = ind;
4563 break;
4564 }
4565 }
4566 rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4567 if (IWM_RIDX_IS_CCK(ridx))
4568 rate_flags |= IWM_RATE_MCS_CCK_MSK;
4569 #ifndef IEEE80211_NO_HT
4570 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4571 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4572 rate_flags |= IWM_RATE_MCS_HT_MSK;
4573 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4574 } else
4575 #endif
4576 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4577
4578 return rinfo;
4579 }
4580
4581 #define TB0_SIZE 16
4582 static int
4583 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4584 {
4585 struct ieee80211com *ic = &sc->sc_ic;
4586 struct iwm_node *in = (struct iwm_node *)ni;
4587 struct iwm_tx_ring *ring;
4588 struct iwm_tx_data *data;
4589 struct iwm_tfd *desc;
4590 struct iwm_device_cmd *cmd;
4591 struct iwm_tx_cmd *tx;
4592 struct ieee80211_frame *wh;
4593 struct ieee80211_key *k = NULL;
4594 struct mbuf *m1;
4595 const struct iwm_rate *rinfo;
4596 uint32_t flags;
4597 u_int hdrlen;
4598 bus_dma_segment_t *seg;
4599 uint8_t tid, type;
4600 int i, totlen, err, pad;
4601
4602 wh = mtod(m, struct ieee80211_frame *);
4603 hdrlen = ieee80211_anyhdrsize(wh);
4604 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4605
4606 tid = 0;
4607
4608 ring = &sc->txq[ac];
4609 desc = &ring->desc[ring->cur];
4610 memset(desc, 0, sizeof(*desc));
4611 data = &ring->data[ring->cur];
4612
4613 cmd = &ring->cmd[ring->cur];
4614 cmd->hdr.code = IWM_TX_CMD;
4615 cmd->hdr.flags = 0;
4616 cmd->hdr.qid = ring->qid;
4617 cmd->hdr.idx = ring->cur;
4618
4619 tx = (void *)cmd->data;
4620 memset(tx, 0, sizeof(*tx));
4621
4622 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4623
4624 if (__predict_false(sc->sc_drvbpf != NULL)) {
4625 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4626
4627 tap->wt_flags = 0;
4628 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4629 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4630 #ifndef IEEE80211_NO_HT
4631 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4632 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4633 type == IEEE80211_FC0_TYPE_DATA &&
4634 rinfo->plcp == IWM_RATE_INVM_PLCP) {
4635 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4636 } else
4637 #endif
4638 tap->wt_rate = rinfo->rate;
4639 tap->wt_hwqueue = ac;
4640 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4641 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4642
4643 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4644 }
4645
4646 /* Encrypt the frame if need be. */
4647 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4648 k = ieee80211_crypto_encap(ic, ni, m);
4649 if (k == NULL) {
4650 m_freem(m);
4651 return ENOBUFS;
4652 }
4653 /* Packet header may have moved, reset our local pointer. */
4654 wh = mtod(m, struct ieee80211_frame *);
4655 }
4656 totlen = m->m_pkthdr.len;
4657
4658 flags = 0;
4659 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4660 flags |= IWM_TX_CMD_FLG_ACK;
4661 }
4662
4663 if (type == IEEE80211_FC0_TYPE_DATA &&
4664 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4665 (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4666 (ic->ic_flags & IEEE80211_F_USEPROT)))
4667 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4668
4669 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4670 type != IEEE80211_FC0_TYPE_DATA)
4671 tx->sta_id = IWM_AUX_STA_ID;
4672 else
4673 tx->sta_id = IWM_STATION_ID;
4674
4675 if (type == IEEE80211_FC0_TYPE_MGT) {
4676 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4677
4678 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4679 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4680 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4681 else
4682 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4683 } else {
4684 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4685 }
4686
4687 if (hdrlen & 3) {
4688 /* First segment length must be a multiple of 4. */
4689 flags |= IWM_TX_CMD_FLG_MH_PAD;
4690 pad = 4 - (hdrlen & 3);
4691 } else
4692 pad = 0;
4693
4694 tx->driver_txop = 0;
4695 tx->next_frame_len = 0;
4696
4697 tx->len = htole16(totlen);
4698 tx->tid_tspec = tid;
4699 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4700
4701 /* Set physical address of "scratch area". */
4702 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4703 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4704
4705 /* Copy 802.11 header in TX command. */
4706 memcpy(tx + 1, wh, hdrlen);
4707
4708 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4709
4710 tx->sec_ctl = 0;
4711 tx->tx_flags |= htole32(flags);
4712
4713 /* Trim 802.11 header. */
4714 m_adj(m, hdrlen);
4715
4716 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4717 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4718 if (err) {
4719 if (err != EFBIG) {
4720 aprint_error_dev(sc->sc_dev,
4721 "can't map mbuf (error %d)\n", err);
4722 m_freem(m);
4723 return err;
4724 }
4725 /* Too many DMA segments, linearize mbuf. */
4726 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4727 if (m1 == NULL) {
4728 m_freem(m);
4729 return ENOBUFS;
4730 }
4731 if (m->m_pkthdr.len > MHLEN) {
4732 MCLGET(m1, M_DONTWAIT);
4733 if (!(m1->m_flags & M_EXT)) {
4734 m_freem(m);
4735 m_freem(m1);
4736 return ENOBUFS;
4737 }
4738 }
4739 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4740 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4741 m_freem(m);
4742 m = m1;
4743
4744 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4745 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4746 if (err) {
4747 aprint_error_dev(sc->sc_dev,
4748 "can't map mbuf (error %d)\n", err);
4749 m_freem(m);
4750 return err;
4751 }
4752 }
4753 data->m = m;
4754 data->in = in;
4755 data->done = 0;
4756
4757 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4758 KASSERT(data->in != NULL);
4759
4760 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4761 "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4762 ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4763 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4764 le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4765 le32toh(tx->rate_n_flags)));
4766
4767 /* Fill TX descriptor. */
4768 desc->num_tbs = 2 + data->map->dm_nsegs;
4769
4770 desc->tbs[0].lo = htole32(data->cmd_paddr);
4771 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4772 (TB0_SIZE << 4);
4773 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4774 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4775 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4776 + hdrlen + pad - TB0_SIZE) << 4);
4777
4778 /* Other DMA segments are for data payload. */
4779 seg = data->map->dm_segs;
4780 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4781 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4782 desc->tbs[i+2].hi_n_len =
4783 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4784 | ((seg->ds_len) << 4);
4785 }
4786
4787 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4788 BUS_DMASYNC_PREWRITE);
4789 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4790 (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4791 BUS_DMASYNC_PREWRITE);
4792 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4793 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4794 BUS_DMASYNC_PREWRITE);
4795
4796 #if 0
4797 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4798 le16toh(tx->len));
4799 #endif
4800
4801 /* Kick TX ring. */
4802 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4803 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4804
4805 /* Mark TX ring as full if we reach a certain threshold. */
4806 if (++ring->queued > IWM_TX_RING_HIMARK) {
4807 sc->qfullmsk |= 1 << ring->qid;
4808 }
4809
4810 return 0;
4811 }
4812
4813 #if 0
4814 /* not necessary? */
4815 static int
4816 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4817 {
4818 struct iwm_tx_path_flush_cmd flush_cmd = {
4819 .queues_ctl = htole32(tfd_msk),
4820 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4821 };
4822 int err;
4823
4824 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4825 sizeof(flush_cmd), &flush_cmd);
4826 if (err)
4827 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4828 err);
4829 return err;
4830 }
4831 #endif
4832
4833 static void
4834 iwm_led_enable(struct iwm_softc *sc)
4835 {
4836 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4837 }
4838
4839 static void
4840 iwm_led_disable(struct iwm_softc *sc)
4841 {
4842 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4843 }
4844
4845 static int
4846 iwm_led_is_enabled(struct iwm_softc *sc)
4847 {
4848 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4849 }
4850
4851 static void
4852 iwm_led_blink_timeout(void *arg)
4853 {
4854 struct iwm_softc *sc = arg;
4855
4856 if (iwm_led_is_enabled(sc))
4857 iwm_led_disable(sc);
4858 else
4859 iwm_led_enable(sc);
4860
4861 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4862 }
4863
4864 static void
4865 iwm_led_blink_start(struct iwm_softc *sc)
4866 {
4867 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4868 }
4869
4870 static void
4871 iwm_led_blink_stop(struct iwm_softc *sc)
4872 {
4873 callout_stop(&sc->sc_led_blink_to);
4874 iwm_led_disable(sc);
4875 }
4876
4877 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4878
4879 static int
4880 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4881 struct iwm_beacon_filter_cmd *cmd)
4882 {
4883 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4884 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4885 }
4886
4887 static void
4888 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4889 struct iwm_beacon_filter_cmd *cmd)
4890 {
4891 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4892 }
4893
4894 static int
4895 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4896 {
4897 struct iwm_beacon_filter_cmd cmd = {
4898 IWM_BF_CMD_CONFIG_DEFAULTS,
4899 .bf_enable_beacon_filter = htole32(1),
4900 .ba_enable_beacon_abort = htole32(enable),
4901 };
4902
4903 if (!sc->sc_bf.bf_enabled)
4904 return 0;
4905
4906 sc->sc_bf.ba_enabled = enable;
4907 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4908 return iwm_beacon_filter_send_cmd(sc, &cmd);
4909 }
4910
4911 static void
4912 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4913 struct iwm_mac_power_cmd *cmd)
4914 {
4915 struct ieee80211_node *ni = &in->in_ni;
4916 int dtim_period, dtim_msec, keep_alive;
4917
4918 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4919 in->in_color));
4920 if (ni->ni_dtim_period)
4921 dtim_period = ni->ni_dtim_period;
4922 else
4923 dtim_period = 1;
4924
4925 /*
4926 * Regardless of power management state the driver must set
4927 * keep alive period. FW will use it for sending keep alive NDPs
4928 * immediately after association. Check that keep alive period
4929 * is at least 3 * DTIM.
4930 */
4931 dtim_msec = dtim_period * ni->ni_intval;
4932 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4933 keep_alive = roundup(keep_alive, 1000) / 1000;
4934 cmd->keep_alive_seconds = htole16(keep_alive);
4935
4936 #ifdef notyet
4937 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4938 cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4939 cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4940 #endif
4941 }
4942
4943 static int
4944 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4945 {
4946 int err;
4947 int ba_enable;
4948 struct iwm_mac_power_cmd cmd;
4949
4950 memset(&cmd, 0, sizeof(cmd));
4951
4952 iwm_power_build_cmd(sc, in, &cmd);
4953
4954 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4955 sizeof(cmd), &cmd);
4956 if (err)
4957 return err;
4958
4959 ba_enable = !!(cmd.flags &
4960 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4961 return iwm_update_beacon_abort(sc, in, ba_enable);
4962 }
4963
4964 static int
4965 iwm_power_update_device(struct iwm_softc *sc)
4966 {
4967 struct iwm_device_power_cmd cmd = {
4968 #ifdef notyet
4969 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4970 #else
4971 .flags = 0,
4972 #endif
4973 };
4974
4975 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4976 return 0;
4977
4978 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4979 DPRINTF(("Sending device power command with flags = 0x%X\n",
4980 cmd.flags));
4981
4982 return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4983 }
4984
4985 #ifdef notyet
4986 static int
4987 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4988 {
4989 struct iwm_beacon_filter_cmd cmd = {
4990 IWM_BF_CMD_CONFIG_DEFAULTS,
4991 .bf_enable_beacon_filter = htole32(1),
4992 };
4993 int err;
4994
4995 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4996 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4997
4998 if (err == 0)
4999 sc->sc_bf.bf_enabled = 1;
5000
5001 return err;
5002 }
5003 #endif
5004
5005 static int
5006 iwm_disable_beacon_filter(struct iwm_softc *sc)
5007 {
5008 struct iwm_beacon_filter_cmd cmd;
5009 int err;
5010
5011 memset(&cmd, 0, sizeof(cmd));
5012 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
5013 return 0;
5014
5015 err = iwm_beacon_filter_send_cmd(sc, &cmd);
5016 if (err == 0)
5017 sc->sc_bf.bf_enabled = 0;
5018
5019 return err;
5020 }
5021
5022 static int
5023 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5024 {
5025 struct iwm_add_sta_cmd_v7 add_sta_cmd;
5026 int err;
5027 uint32_t status;
5028
5029 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5030
5031 add_sta_cmd.sta_id = IWM_STATION_ID;
5032 add_sta_cmd.mac_id_n_color
5033 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5034 if (!update) {
5035 int ac;
5036 for (ac = 0; ac < WME_NUM_AC; ac++) {
5037 add_sta_cmd.tfd_queue_msk |=
5038 htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
5039 }
5040 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5041 }
5042 add_sta_cmd.add_modify = update ? 1 : 0;
5043 add_sta_cmd.station_flags_msk
5044 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5045 add_sta_cmd.tid_disable_tx = htole16(0xffff);
5046 if (update)
5047 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5048
5049 #ifndef IEEE80211_NO_HT
5050 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5051 add_sta_cmd.station_flags_msk
5052 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5053 IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5054
5055 add_sta_cmd.station_flags
5056 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5057 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5058 case IEEE80211_AMPDU_PARAM_SS_2:
5059 add_sta_cmd.station_flags
5060 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5061 break;
5062 case IEEE80211_AMPDU_PARAM_SS_4:
5063 add_sta_cmd.station_flags
5064 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5065 break;
5066 case IEEE80211_AMPDU_PARAM_SS_8:
5067 add_sta_cmd.station_flags
5068 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5069 break;
5070 case IEEE80211_AMPDU_PARAM_SS_16:
5071 add_sta_cmd.station_flags
5072 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5073 break;
5074 default:
5075 break;
5076 }
5077 }
5078 #endif
5079
5080 status = IWM_ADD_STA_SUCCESS;
5081 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
5082 &add_sta_cmd, &status);
5083 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5084 err = EIO;
5085
5086 return err;
5087 }
5088
5089 static int
5090 iwm_add_aux_sta(struct iwm_softc *sc)
5091 {
5092 struct iwm_add_sta_cmd_v7 cmd;
5093 int err;
5094 uint32_t status;
5095
5096 err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
5097 if (err)
5098 return err;
5099
5100 memset(&cmd, 0, sizeof(cmd));
5101 cmd.sta_id = IWM_AUX_STA_ID;
5102 cmd.mac_id_n_color =
5103 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5104 cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
5105 cmd.tid_disable_tx = htole16(0xffff);
5106
5107 status = IWM_ADD_STA_SUCCESS;
5108 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
5109 &status);
5110 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5111 err = EIO;
5112
5113 return err;
5114 }
5115
5116 #define IWM_PLCP_QUIET_THRESH 1
5117 #define IWM_ACTIVE_QUIET_TIME 10
5118 #define LONG_OUT_TIME_PERIOD 600
5119 #define SHORT_OUT_TIME_PERIOD 200
5120 #define SUSPEND_TIME_PERIOD 100
5121
5122 static uint16_t
5123 iwm_scan_rx_chain(struct iwm_softc *sc)
5124 {
5125 uint16_t rx_chain;
5126 uint8_t rx_ant;
5127
5128 rx_ant = iwm_fw_valid_rx_ant(sc);
5129 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5130 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5131 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5132 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5133 return htole16(rx_chain);
5134 }
5135
5136 static uint32_t
5137 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5138 {
5139 uint32_t tx_ant;
5140 int i, ind;
5141
5142 for (i = 0, ind = sc->sc_scan_last_antenna;
5143 i < IWM_RATE_MCS_ANT_NUM; i++) {
5144 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5145 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5146 sc->sc_scan_last_antenna = ind;
5147 break;
5148 }
5149 }
5150 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5151
5152 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5153 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5154 tx_ant);
5155 else
5156 return htole32(IWM_RATE_6M_PLCP | tx_ant);
5157 }
5158
5159 #ifdef notyet
5160 /*
5161 * If req->n_ssids > 0, it means we should do an active scan.
5162 * In case of active scan w/o directed scan, we receive a zero-length SSID
5163 * just to notify that this scan is active and not passive.
5164 * In order to notify the FW of the number of SSIDs we wish to scan (including
5165 * the zero-length one), we need to set the corresponding bits in chan->type,
5166 * one for each SSID, and set the active bit (first). If the first SSID is
5167 * already included in the probe template, so we need to set only
5168 * req->n_ssids - 1 bits in addition to the first bit.
5169 */
5170 static uint16_t
5171 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
5172 {
5173 if (flags & IEEE80211_CHAN_2GHZ)
5174 return 30 + 3 * (n_ssids + 1);
5175 return 20 + 2 * (n_ssids + 1);
5176 }
5177
5178 static uint16_t
5179 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
5180 {
5181 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
5182 }
5183 #endif
5184
5185 static uint8_t
5186 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5187 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
5188 {
5189 struct ieee80211com *ic = &sc->sc_ic;
5190 struct ieee80211_channel *c;
5191 uint8_t nchan;
5192
5193 for (nchan = 0, c = &ic->ic_channels[1];
5194 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5195 nchan < sc->sc_capa_n_scan_channels;
5196 c++) {
5197 if (c->ic_flags == 0)
5198 continue;
5199
5200 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5201 chan->iter_count = htole16(1);
5202 chan->iter_interval = htole32(0);
5203 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5204 chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
5205 if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
5206 chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
5207 chan++;
5208 nchan++;
5209 }
5210
5211 return nchan;
5212 }
5213
5214 static uint8_t
5215 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5216 struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
5217 {
5218 struct ieee80211com *ic = &sc->sc_ic;
5219 struct ieee80211_channel *c;
5220 uint8_t nchan;
5221
5222 for (nchan = 0, c = &ic->ic_channels[1];
5223 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5224 nchan < sc->sc_capa_n_scan_channels;
5225 c++) {
5226 if (c->ic_flags == 0)
5227 continue;
5228
5229 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5230 chan->iter_count = 1;
5231 chan->iter_interval = htole16(0);
5232 chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
5233 chan++;
5234 nchan++;
5235 }
5236
5237 return nchan;
5238 }
5239
5240 static int
5241 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5242 {
5243 struct ieee80211com *ic = &sc->sc_ic;
5244 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5245 struct ieee80211_rateset *rs;
5246 size_t remain = sizeof(preq->buf);
5247 uint8_t *frm, *pos;
5248
5249 memset(preq, 0, sizeof(*preq));
5250
5251 KASSERT(ic->ic_des_esslen < sizeof(ic->ic_des_essid));
5252 if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5253 return ENOBUFS;
5254
5255 /*
5256 * Build a probe request frame. Most of the following code is a
5257 * copy & paste of what is done in net80211.
5258 */
5259 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5260 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5261 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5262 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5263 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5264 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5265 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
5266 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
5267
5268 frm = (uint8_t *)(wh + 1);
5269 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5270
5271 /* Tell the firmware where the MAC header is. */
5272 preq->mac_header.offset = 0;
5273 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5274 remain -= frm - (uint8_t *)wh;
5275
5276 /* Fill in 2GHz IEs and tell firmware where they are. */
5277 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5278 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5279 if (remain < 4 + rs->rs_nrates)
5280 return ENOBUFS;
5281 } else if (remain < 2 + rs->rs_nrates)
5282 return ENOBUFS;
5283 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5284 pos = frm;
5285 frm = ieee80211_add_rates(frm, rs);
5286 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5287 frm = ieee80211_add_xrates(frm, rs);
5288 preq->band_data[0].len = htole16(frm - pos);
5289 remain -= frm - pos;
5290
5291 if (isset(sc->sc_enabled_capa,
5292 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5293 if (remain < 3)
5294 return ENOBUFS;
5295 *frm++ = IEEE80211_ELEMID_DSPARMS;
5296 *frm++ = 1;
5297 *frm++ = 0;
5298 remain -= 3;
5299 }
5300
5301 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5302 /* Fill in 5GHz IEs. */
5303 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5304 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5305 if (remain < 4 + rs->rs_nrates)
5306 return ENOBUFS;
5307 } else if (remain < 2 + rs->rs_nrates)
5308 return ENOBUFS;
5309 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5310 pos = frm;
5311 frm = ieee80211_add_rates(frm, rs);
5312 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5313 frm = ieee80211_add_xrates(frm, rs);
5314 preq->band_data[1].len = htole16(frm - pos);
5315 remain -= frm - pos;
5316 }
5317
5318 #ifndef IEEE80211_NO_HT
5319 /* Send 11n IEs on both 2GHz and 5GHz bands. */
5320 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5321 pos = frm;
5322 if (ic->ic_flags & IEEE80211_F_HTON) {
5323 if (remain < 28)
5324 return ENOBUFS;
5325 frm = ieee80211_add_htcaps(frm, ic);
5326 /* XXX add WME info? */
5327 }
5328 #endif
5329
5330 preq->common_data.len = htole16(frm - pos);
5331
5332 return 0;
5333 }
5334
5335 static int
5336 iwm_lmac_scan(struct iwm_softc *sc)
5337 {
5338 struct ieee80211com *ic = &sc->sc_ic;
5339 struct iwm_host_cmd hcmd = {
5340 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5341 .len = { 0, },
5342 .data = { NULL, },
5343 .flags = 0,
5344 };
5345 struct iwm_scan_req_lmac *req;
5346 size_t req_len;
5347 int err;
5348
5349 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5350
5351 req_len = sizeof(struct iwm_scan_req_lmac) +
5352 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5353 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5354 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5355 return ENOMEM;
5356 req = kmem_zalloc(req_len, KM_SLEEP);
5357 hcmd.len[0] = (uint16_t)req_len;
5358 hcmd.data[0] = (void *)req;
5359
5360 /* These timings correspond to iwlwifi's UNASSOC scan. */
5361 req->active_dwell = 10;
5362 req->passive_dwell = 110;
5363 req->fragmented_dwell = 44;
5364 req->extended_dwell = 90;
5365 req->max_out_time = 0;
5366 req->suspend_time = 0;
5367
5368 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5369 req->rx_chain_select = iwm_scan_rx_chain(sc);
5370 req->iter_num = htole32(1);
5371 req->delay = 0;
5372
5373 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5374 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5375 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5376 if (ic->ic_des_esslen == 0)
5377 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5378 else
5379 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5380 if (isset(sc->sc_enabled_capa,
5381 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5382 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5383
5384 req->flags = htole32(IWM_PHY_BAND_24);
5385 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5386 req->flags |= htole32(IWM_PHY_BAND_5);
5387 req->filter_flags =
5388 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5389
5390 /* Tx flags 2 GHz. */
5391 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5392 IWM_TX_CMD_FLG_BT_DIS);
5393 req->tx_cmd[0].rate_n_flags =
5394 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5395 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5396
5397 /* Tx flags 5 GHz. */
5398 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5399 IWM_TX_CMD_FLG_BT_DIS);
5400 req->tx_cmd[1].rate_n_flags =
5401 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5402 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5403
5404 /* Check if we're doing an active directed scan. */
5405 if (ic->ic_des_esslen != 0) {
5406 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5407 req->direct_scan[0].len = ic->ic_des_esslen;
5408 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5409 ic->ic_des_esslen);
5410 }
5411
5412 req->n_channels = iwm_lmac_scan_fill_channels(sc,
5413 (struct iwm_scan_channel_cfg_lmac *)req->data,
5414 ic->ic_des_esslen != 0);
5415
5416 err = iwm_fill_probe_req(sc,
5417 (struct iwm_scan_probe_req *)(req->data +
5418 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5419 sc->sc_capa_n_scan_channels)));
5420 if (err) {
5421 kmem_free(req, req_len);
5422 return err;
5423 }
5424
5425 /* Specify the scan plan: We'll do one iteration. */
5426 req->schedule[0].iterations = 1;
5427 req->schedule[0].full_scan_mul = 1;
5428
5429 /* Disable EBS. */
5430 req->channel_opt[0].non_ebs_ratio = 1;
5431 req->channel_opt[1].non_ebs_ratio = 1;
5432
5433 err = iwm_send_cmd(sc, &hcmd);
5434 kmem_free(req, req_len);
5435 return err;
5436 }
5437
5438 static int
5439 iwm_config_umac_scan(struct iwm_softc *sc)
5440 {
5441 struct ieee80211com *ic = &sc->sc_ic;
5442 struct iwm_scan_config *scan_config;
5443 int err, nchan;
5444 size_t cmd_size;
5445 struct ieee80211_channel *c;
5446 struct iwm_host_cmd hcmd = {
5447 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5448 .flags = 0,
5449 };
5450 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5451 IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5452 IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5453 IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5454 IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5455 IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5456 IWM_SCAN_CONFIG_RATE_54M);
5457
5458 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5459
5460 scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5461 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5462 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5463 scan_config->legacy_rates = htole32(rates |
5464 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5465
5466 /* These timings correspond to iwlwifi's UNASSOC scan. */
5467 scan_config->dwell_active = 10;
5468 scan_config->dwell_passive = 110;
5469 scan_config->dwell_fragmented = 44;
5470 scan_config->dwell_extended = 90;
5471 scan_config->out_of_channel_time = htole32(0);
5472 scan_config->suspend_time = htole32(0);
5473
5474 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5475
5476 scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5477 scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5478 IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5479 IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5480
5481 for (c = &ic->ic_channels[1], nchan = 0;
5482 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5483 nchan < sc->sc_capa_n_scan_channels; c++) {
5484 if (c->ic_flags == 0)
5485 continue;
5486 scan_config->channel_array[nchan++] =
5487 ieee80211_mhz2ieee(c->ic_freq, 0);
5488 }
5489
5490 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5491 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5492 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5493 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5494 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5495 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5496 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5497 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5498 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5499 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5500 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5501
5502 hcmd.data[0] = scan_config;
5503 hcmd.len[0] = cmd_size;
5504
5505 err = iwm_send_cmd(sc, &hcmd);
5506 kmem_free(scan_config, cmd_size);
5507 return err;
5508 }
5509
5510 static int
5511 iwm_umac_scan(struct iwm_softc *sc)
5512 {
5513 struct ieee80211com *ic = &sc->sc_ic;
5514 struct iwm_host_cmd hcmd = {
5515 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5516 .len = { 0, },
5517 .data = { NULL, },
5518 .flags = 0,
5519 };
5520 struct iwm_scan_req_umac *req;
5521 struct iwm_scan_req_umac_tail *tail;
5522 size_t req_len;
5523 int err;
5524
5525 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5526
5527 req_len = sizeof(struct iwm_scan_req_umac) +
5528 (sizeof(struct iwm_scan_channel_cfg_umac) *
5529 sc->sc_capa_n_scan_channels) +
5530 sizeof(struct iwm_scan_req_umac_tail);
5531 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5532 return ENOMEM;
5533 req = kmem_zalloc(req_len, KM_SLEEP);
5534
5535 hcmd.len[0] = (uint16_t)req_len;
5536 hcmd.data[0] = (void *)req;
5537
5538 /* These timings correspond to iwlwifi's UNASSOC scan. */
5539 req->active_dwell = 10;
5540 req->passive_dwell = 110;
5541 req->fragmented_dwell = 44;
5542 req->extended_dwell = 90;
5543 req->max_out_time = 0;
5544 req->suspend_time = 0;
5545
5546 req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5547 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5548
5549 req->n_channels = iwm_umac_scan_fill_channels(sc,
5550 (struct iwm_scan_channel_cfg_umac *)req->data,
5551 ic->ic_des_esslen != 0);
5552
5553 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5554 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5555 IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5556
5557 tail = (struct iwm_scan_req_umac_tail *)(req->data +
5558 sizeof(struct iwm_scan_channel_cfg_umac) *
5559 sc->sc_capa_n_scan_channels);
5560
5561 /* Check if we're doing an active directed scan. */
5562 if (ic->ic_des_esslen != 0) {
5563 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5564 tail->direct_scan[0].len = ic->ic_des_esslen;
5565 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5566 ic->ic_des_esslen);
5567 req->general_flags |=
5568 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5569 } else
5570 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5571
5572 if (isset(sc->sc_enabled_capa,
5573 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5574 req->general_flags |=
5575 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5576
5577 err = iwm_fill_probe_req(sc, &tail->preq);
5578 if (err) {
5579 kmem_free(req, req_len);
5580 return err;
5581 }
5582
5583 /* Specify the scan plan: We'll do one iteration. */
5584 tail->schedule[0].interval = 0;
5585 tail->schedule[0].iter_count = 1;
5586
5587 err = iwm_send_cmd(sc, &hcmd);
5588 kmem_free(req, req_len);
5589 return err;
5590 }
5591
5592 static uint8_t
5593 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5594 {
5595 int i;
5596 uint8_t rval;
5597
5598 for (i = 0; i < rs->rs_nrates; i++) {
5599 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5600 if (rval == iwm_rates[ridx].rate)
5601 return rs->rs_rates[i];
5602 }
5603 return 0;
5604 }
5605
5606 static void
5607 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5608 int *ofdm_rates)
5609 {
5610 struct ieee80211_node *ni = &in->in_ni;
5611 struct ieee80211_rateset *rs = &ni->ni_rates;
5612 int lowest_present_ofdm = -1;
5613 int lowest_present_cck = -1;
5614 uint8_t cck = 0;
5615 uint8_t ofdm = 0;
5616 int i;
5617
5618 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5619 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5620 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5621 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5622 continue;
5623 cck |= (1 << i);
5624 if (lowest_present_cck == -1 || lowest_present_cck > i)
5625 lowest_present_cck = i;
5626 }
5627 }
5628 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5629 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5630 continue;
5631 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5632 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5633 lowest_present_ofdm = i;
5634 }
5635
5636 /*
5637 * Now we've got the basic rates as bitmaps in the ofdm and cck
5638 * variables. This isn't sufficient though, as there might not
5639 * be all the right rates in the bitmap. E.g. if the only basic
5640 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5641 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5642 *
5643 * [...] a STA responding to a received frame shall transmit
5644 * its Control Response frame [...] at the highest rate in the
5645 * BSSBasicRateSet parameter that is less than or equal to the
5646 * rate of the immediately previous frame in the frame exchange
5647 * sequence ([...]) and that is of the same modulation class
5648 * ([...]) as the received frame. If no rate contained in the
5649 * BSSBasicRateSet parameter meets these conditions, then the
5650 * control frame sent in response to a received frame shall be
5651 * transmitted at the highest mandatory rate of the PHY that is
5652 * less than or equal to the rate of the received frame, and
5653 * that is of the same modulation class as the received frame.
5654 *
5655 * As a consequence, we need to add all mandatory rates that are
5656 * lower than all of the basic rates to these bitmaps.
5657 */
5658
5659 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5660 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5661 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5662 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5663 /* 6M already there or needed so always add */
5664 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5665
5666 /*
5667 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5668 * Note, however:
5669 * - if no CCK rates are basic, it must be ERP since there must
5670 * be some basic rates at all, so they're OFDM => ERP PHY
5671 * (or we're in 5 GHz, and the cck bitmap will never be used)
5672 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5673 * - if 5.5M is basic, 1M and 2M are mandatory
5674 * - if 2M is basic, 1M is mandatory
5675 * - if 1M is basic, that's the only valid ACK rate.
5676 * As a consequence, it's not as complicated as it sounds, just add
5677 * any lower rates to the ACK rate bitmap.
5678 */
5679 if (IWM_RATE_11M_INDEX < lowest_present_cck)
5680 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5681 if (IWM_RATE_5M_INDEX < lowest_present_cck)
5682 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5683 if (IWM_RATE_2M_INDEX < lowest_present_cck)
5684 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5685 /* 1M already there or needed so always add */
5686 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5687
5688 *cck_rates = cck;
5689 *ofdm_rates = ofdm;
5690 }
5691
5692 static void
5693 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5694 struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5695 {
5696 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5697 struct ieee80211com *ic = &sc->sc_ic;
5698 struct ieee80211_node *ni = ic->ic_bss;
5699 int cck_ack_rates, ofdm_ack_rates;
5700 int i;
5701
5702 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5703 in->in_color));
5704 cmd->action = htole32(action);
5705
5706 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5707 cmd->tsf_id = htole32(IWM_TSF_ID_A);
5708
5709 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5710 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5711
5712 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5713 cmd->cck_rates = htole32(cck_ack_rates);
5714 cmd->ofdm_rates = htole32(ofdm_ack_rates);
5715
5716 cmd->cck_short_preamble
5717 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5718 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5719 cmd->short_slot
5720 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5721 ? IWM_MAC_FLG_SHORT_SLOT : 0);
5722
5723 for (i = 0; i < WME_NUM_AC; i++) {
5724 struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5725 int txf = iwm_ac_to_tx_fifo[i];
5726
5727 cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5728 cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5729 cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5730 cmd->ac[txf].fifos_mask = (1 << txf);
5731 cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5732 }
5733 if (ni->ni_flags & IEEE80211_NODE_QOS)
5734 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5735
5736 #ifndef IEEE80211_NO_HT
5737 if (ni->ni_flags & IEEE80211_NODE_HT) {
5738 enum ieee80211_htprot htprot =
5739 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5740 switch (htprot) {
5741 case IEEE80211_HTPROT_NONE:
5742 break;
5743 case IEEE80211_HTPROT_NONMEMBER:
5744 case IEEE80211_HTPROT_NONHT_MIXED:
5745 cmd->protection_flags |=
5746 htole32(IWM_MAC_PROT_FLG_HT_PROT);
5747 case IEEE80211_HTPROT_20MHZ:
5748 cmd->protection_flags |=
5749 htole32(IWM_MAC_PROT_FLG_HT_PROT |
5750 IWM_MAC_PROT_FLG_FAT_PROT);
5751 break;
5752 default:
5753 break;
5754 }
5755
5756 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5757 }
5758 #endif
5759
5760 if (ic->ic_flags & IEEE80211_F_USEPROT)
5761 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5762
5763 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5764 #undef IWM_EXP2
5765 }
5766
5767 static void
5768 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5769 struct iwm_mac_data_sta *sta, int assoc)
5770 {
5771 struct ieee80211_node *ni = &in->in_ni;
5772 uint32_t dtim_off;
5773 uint64_t tsf;
5774
5775 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5776 tsf = le64toh(ni->ni_tstamp.tsf);
5777
5778 sta->is_assoc = htole32(assoc);
5779 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5780 sta->dtim_tsf = htole64(tsf + dtim_off);
5781 sta->bi = htole32(ni->ni_intval);
5782 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5783 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5784 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5785 sta->listen_interval = htole32(10);
5786 sta->assoc_id = htole32(ni->ni_associd);
5787 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5788 }
5789
5790 static int
5791 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5792 int assoc)
5793 {
5794 struct ieee80211_node *ni = &in->in_ni;
5795 struct iwm_mac_ctx_cmd cmd;
5796
5797 memset(&cmd, 0, sizeof(cmd));
5798
5799 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5800
5801 /* Allow beacons to pass through as long as we are not associated or we
5802 * do not have dtim period information */
5803 if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5804 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5805 else
5806 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5807
5808 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5809 }
5810
5811 #define IWM_MISSED_BEACONS_THRESHOLD 8
5812
5813 static void
5814 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5815 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5816 {
5817 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5818 int s;
5819
5820 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5821 le32toh(mb->mac_id),
5822 le32toh(mb->consec_missed_beacons),
5823 le32toh(mb->consec_missed_beacons_since_last_rx),
5824 le32toh(mb->num_recvd_beacons),
5825 le32toh(mb->num_expected_beacons)));
5826
5827 /*
5828 * TODO: the threshold should be adjusted based on latency conditions,
5829 * and/or in case of a CS flow on one of the other AP vifs.
5830 */
5831 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5832 IWM_MISSED_BEACONS_THRESHOLD) {
5833 s = splnet();
5834 ieee80211_beacon_miss(&sc->sc_ic);
5835 splx(s);
5836 }
5837 }
5838
5839 static int
5840 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5841 {
5842 struct iwm_time_quota_cmd cmd;
5843 int i, idx, num_active_macs, quota, quota_rem;
5844 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5845 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5846 uint16_t id;
5847
5848 memset(&cmd, 0, sizeof(cmd));
5849
5850 /* currently, PHY ID == binding ID */
5851 if (in) {
5852 id = in->in_phyctxt->id;
5853 KASSERT(id < IWM_MAX_BINDINGS);
5854 colors[id] = in->in_phyctxt->color;
5855
5856 if (1)
5857 n_ifs[id] = 1;
5858 }
5859
5860 /*
5861 * The FW's scheduling session consists of
5862 * IWM_MAX_QUOTA fragments. Divide these fragments
5863 * equally between all the bindings that require quota
5864 */
5865 num_active_macs = 0;
5866 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5867 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5868 num_active_macs += n_ifs[i];
5869 }
5870
5871 quota = 0;
5872 quota_rem = 0;
5873 if (num_active_macs) {
5874 quota = IWM_MAX_QUOTA / num_active_macs;
5875 quota_rem = IWM_MAX_QUOTA % num_active_macs;
5876 }
5877
5878 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5879 if (colors[i] < 0)
5880 continue;
5881
5882 cmd.quotas[idx].id_and_color =
5883 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5884
5885 if (n_ifs[i] <= 0) {
5886 cmd.quotas[idx].quota = htole32(0);
5887 cmd.quotas[idx].max_duration = htole32(0);
5888 } else {
5889 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5890 cmd.quotas[idx].max_duration = htole32(0);
5891 }
5892 idx++;
5893 }
5894
5895 /* Give the remainder of the session to the first binding */
5896 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5897
5898 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5899 }
5900
5901 static int
5902 iwm_auth(struct iwm_softc *sc)
5903 {
5904 struct ieee80211com *ic = &sc->sc_ic;
5905 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5906 uint32_t duration;
5907 int err;
5908
5909 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5910 if (err)
5911 return err;
5912
5913 err = iwm_allow_mcast(sc);
5914 if (err)
5915 return err;
5916
5917 sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5918 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5919 IWM_FW_CTXT_ACTION_MODIFY, 0);
5920 if (err)
5921 return err;
5922 in->in_phyctxt = &sc->sc_phyctxt[0];
5923
5924 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5925 if (err) {
5926 aprint_error_dev(sc->sc_dev,
5927 "could not add MAC context (error %d)\n", err);
5928 return err;
5929 }
5930
5931 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5932 if (err)
5933 return err;
5934
5935 err = iwm_add_sta_cmd(sc, in, 0);
5936 if (err)
5937 return err;
5938
5939 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5940 if (err) {
5941 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5942 return err;
5943 }
5944
5945 /*
5946 * Prevent the FW from wandering off channel during association
5947 * by "protecting" the session with a time event.
5948 */
5949 if (in->in_ni.ni_intval)
5950 duration = in->in_ni.ni_intval * 2;
5951 else
5952 duration = IEEE80211_DUR_TU;
5953 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5954 DELAY(100);
5955
5956 return 0;
5957 }
5958
5959 static int
5960 iwm_assoc(struct iwm_softc *sc)
5961 {
5962 struct ieee80211com *ic = &sc->sc_ic;
5963 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5964 int err;
5965
5966 err = iwm_add_sta_cmd(sc, in, 1);
5967 if (err)
5968 return err;
5969
5970 return 0;
5971 }
5972
5973 static struct ieee80211_node *
5974 iwm_node_alloc(struct ieee80211_node_table *nt)
5975 {
5976 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5977 }
5978
5979 static void
5980 iwm_calib_timeout(void *arg)
5981 {
5982 struct iwm_softc *sc = arg;
5983 struct ieee80211com *ic = &sc->sc_ic;
5984 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5985 #ifndef IEEE80211_NO_HT
5986 struct ieee80211_node *ni = &in->in_ni;
5987 int otxrate;
5988 #endif
5989 int s;
5990
5991 s = splnet();
5992 if ((ic->ic_fixed_rate == -1
5993 #ifndef IEEE80211_NO_HT
5994 || ic->ic_fixed_mcs == -1
5995 #endif
5996 ) &&
5997 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5998 #ifndef IEEE80211_NO_HT
5999 if (ni->ni_flags & IEEE80211_NODE_HT)
6000 otxrate = ni->ni_txmcs;
6001 else
6002 otxrate = ni->ni_txrate;
6003 #endif
6004 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6005
6006 #ifndef IEEE80211_NO_HT
6007 /*
6008 * If AMRR has chosen a new TX rate we must update
6009 * the firwmare's LQ rate table from process context.
6010 */
6011 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6012 otxrate != ni->ni_txmcs)
6013 softint_schedule(sc->setrates_task);
6014 else if (otxrate != ni->ni_txrate)
6015 softint_schedule(sc->setrates_task);
6016 #endif
6017 }
6018 splx(s);
6019
6020 callout_schedule(&sc->sc_calib_to, mstohz(500));
6021 }
6022
6023 #ifndef IEEE80211_NO_HT
6024 static void
6025 iwm_setrates_task(void *arg)
6026 {
6027 struct iwm_softc *sc = arg;
6028 struct ieee80211com *ic = &sc->sc_ic;
6029 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6030
6031 /* Update rates table based on new TX rate determined by AMRR. */
6032 iwm_setrates(in);
6033 }
6034
6035 static int
6036 iwm_setrates(struct iwm_node *in)
6037 {
6038 struct ieee80211_node *ni = &in->in_ni;
6039 struct ieee80211com *ic = ni->ni_ic;
6040 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6041 struct iwm_lq_cmd *lq = &in->in_lq;
6042 struct ieee80211_rateset *rs = &ni->ni_rates;
6043 int i, j, ridx, ridx_min, tab = 0;
6044 #ifndef IEEE80211_NO_HT
6045 int sgi_ok;
6046 #endif
6047 struct iwm_host_cmd cmd = {
6048 .id = IWM_LQ_CMD,
6049 .len = { sizeof(in->in_lq), },
6050 };
6051
6052 memset(lq, 0, sizeof(*lq));
6053 lq->sta_id = IWM_STATION_ID;
6054
6055 if (ic->ic_flags & IEEE80211_F_USEPROT)
6056 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6057
6058 #ifndef IEEE80211_NO_HT
6059 sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
6060 (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
6061 #endif
6062
6063
6064 /*
6065 * Fill the LQ rate selection table with legacy and/or HT rates
6066 * in descending order, i.e. with the node's current TX rate first.
6067 * In cases where throughput of an HT rate corresponds to a legacy
6068 * rate it makes no sense to add both. We rely on the fact that
6069 * iwm_rates is laid out such that equivalent HT/legacy rates share
6070 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6071 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6072 */
6073 j = 0;
6074 ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6075 IWM_RIDX_OFDM : IWM_RIDX_CCK;
6076 for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
6077 if (j >= __arraycount(lq->rs_table))
6078 break;
6079 tab = 0;
6080 #ifndef IEEE80211_NO_HT
6081 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6082 iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6083 for (i = ni->ni_txmcs; i >= 0; i--) {
6084 if (isclr(ni->ni_rxmcs, i))
6085 continue;
6086 if (ridx == iwm_mcs2ridx[i]) {
6087 tab = iwm_rates[ridx].ht_plcp;
6088 tab |= IWM_RATE_MCS_HT_MSK;
6089 if (sgi_ok)
6090 tab |= IWM_RATE_MCS_SGI_MSK;
6091 break;
6092 }
6093 }
6094 }
6095 #endif
6096 if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
6097 for (i = ni->ni_txrate; i >= 0; i--) {
6098 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6099 IEEE80211_RATE_VAL)) {
6100 tab = iwm_rates[ridx].plcp;
6101 break;
6102 }
6103 }
6104 }
6105
6106 if (tab == 0)
6107 continue;
6108
6109 tab |= 1 << IWM_RATE_MCS_ANT_POS;
6110 if (IWM_RIDX_IS_CCK(ridx))
6111 tab |= IWM_RATE_MCS_CCK_MSK;
6112 DPRINTFN(2, ("station rate %d %x\n", i, tab));
6113 lq->rs_table[j++] = htole32(tab);
6114 }
6115
6116 /* Fill the rest with the lowest possible rate */
6117 i = j > 0 ? j - 1 : 0;
6118 while (j < __arraycount(lq->rs_table))
6119 lq->rs_table[j++] = lq->rs_table[i];
6120
6121 lq->single_stream_ant_msk = IWM_ANT_A;
6122 lq->dual_stream_ant_msk = IWM_ANT_AB;
6123
6124 lq->agg_time_limit = htole16(4000); /* 4ms */
6125 lq->agg_disable_start_th = 3;
6126 #ifdef notyet
6127 lq->agg_frame_cnt_limit = 0x3f;
6128 #else
6129 lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6130 #endif
6131
6132 cmd.data[0] = &in->in_lq;
6133 return iwm_send_cmd(sc, &cmd);
6134 }
6135 #endif
6136
6137 static int
6138 iwm_media_change(struct ifnet *ifp)
6139 {
6140 struct iwm_softc *sc = ifp->if_softc;
6141 struct ieee80211com *ic = &sc->sc_ic;
6142 uint8_t rate, ridx;
6143 int err;
6144
6145 err = ieee80211_media_change(ifp);
6146 if (err != ENETRESET)
6147 return err;
6148
6149 #ifndef IEEE80211_NO_HT
6150 if (ic->ic_fixed_mcs != -1)
6151 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6152 else
6153 #endif
6154 if (ic->ic_fixed_rate != -1) {
6155 rate = ic->ic_sup_rates[ic->ic_curmode].
6156 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6157 /* Map 802.11 rate to HW rate index. */
6158 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6159 if (iwm_rates[ridx].rate == rate)
6160 break;
6161 sc->sc_fixed_ridx = ridx;
6162 }
6163
6164 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6165 (IFF_UP | IFF_RUNNING)) {
6166 iwm_stop(ifp, 0);
6167 err = iwm_init(ifp);
6168 }
6169 return err;
6170 }
6171
6172 static int
6173 iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6174 {
6175 struct ifnet *ifp = IC2IFP(ic);
6176 struct iwm_softc *sc = ifp->if_softc;
6177 enum ieee80211_state ostate = ic->ic_state;
6178 struct iwm_node *in;
6179 int err;
6180
6181 DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
6182 ieee80211_state_name[nstate]));
6183
6184 if (ostate == IEEE80211_S_SCAN && nstate != ostate)
6185 iwm_led_blink_stop(sc);
6186
6187 if (ostate == IEEE80211_S_RUN && nstate != ostate)
6188 iwm_disable_beacon_filter(sc);
6189
6190 /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
6191 /* XXX Is there a way to switch states without a full reset? */
6192 if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
6193 /*
6194 * Upon receiving a deauth frame from AP the net80211 stack
6195 * puts the driver into AUTH state. This will fail with this
6196 * driver so bring the FSM from RUN to SCAN in this case.
6197 */
6198 if (nstate != IEEE80211_S_INIT) {
6199 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
6200 /* Always pass arg as -1 since we can't Tx right now. */
6201 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6202 iwm_stop(ifp, 0);
6203 iwm_init(ifp);
6204 return 0;
6205 }
6206
6207 iwm_stop_device(sc);
6208 iwm_init_hw(sc);
6209 }
6210
6211 switch (nstate) {
6212 case IEEE80211_S_INIT:
6213 break;
6214
6215 case IEEE80211_S_SCAN:
6216 if (ostate == nstate &&
6217 ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
6218 return 0;
6219 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6220 err = iwm_umac_scan(sc);
6221 else
6222 err = iwm_lmac_scan(sc);
6223 if (err) {
6224 DPRINTF(("%s: could not initiate scan: %d\n",
6225 DEVNAME(sc), err));
6226 return err;
6227 }
6228 SET(sc->sc_flags, IWM_FLAG_SCANNING);
6229 ic->ic_state = nstate;
6230 iwm_led_blink_start(sc);
6231 return 0;
6232
6233 case IEEE80211_S_AUTH:
6234 err = iwm_auth(sc);
6235 if (err) {
6236 DPRINTF(("%s: could not move to auth state: %d\n",
6237 DEVNAME(sc), err));
6238 return err;
6239 }
6240 break;
6241
6242 case IEEE80211_S_ASSOC:
6243 err = iwm_assoc(sc);
6244 if (err) {
6245 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
6246 err));
6247 return err;
6248 }
6249 break;
6250
6251 case IEEE80211_S_RUN:
6252 in = (struct iwm_node *)ic->ic_bss;
6253
6254 /* We have now been assigned an associd by the AP. */
6255 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6256 if (err) {
6257 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6258 return err;
6259 }
6260
6261 err = iwm_power_update_device(sc);
6262 if (err) {
6263 aprint_error_dev(sc->sc_dev,
6264 "could send power command (error %d)\n", err);
6265 return err;
6266 }
6267 #ifdef notyet
6268 /*
6269 * Disabled for now. Default beacon filter settings
6270 * prevent net80211 from getting ERP and HT protection
6271 * updates from beacons.
6272 */
6273 err = iwm_enable_beacon_filter(sc, in);
6274 if (err) {
6275 aprint_error_dev(sc->sc_dev,
6276 "could not enable beacon filter\n");
6277 return err;
6278 }
6279 #endif
6280 err = iwm_power_mac_update_mode(sc, in);
6281 if (err) {
6282 aprint_error_dev(sc->sc_dev,
6283 "could not update MAC power (error %d)\n", err);
6284 return err;
6285 }
6286
6287 err = iwm_update_quotas(sc, in);
6288 if (err) {
6289 aprint_error_dev(sc->sc_dev,
6290 "could not update quotas (error %d)\n", err);
6291 return err;
6292 }
6293
6294 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6295
6296 /* Start at lowest available bit-rate, AMRR will raise. */
6297 in->in_ni.ni_txrate = 0;
6298 #ifndef IEEE80211_NO_HT
6299 in->in_ni.ni_txmcs = 0;
6300 iwm_setrates(in);
6301 #endif
6302
6303 callout_schedule(&sc->sc_calib_to, mstohz(500));
6304 iwm_led_enable(sc);
6305 break;
6306
6307 default:
6308 break;
6309 }
6310
6311 return sc->sc_newstate(ic, nstate, arg);
6312 }
6313
6314 static void
6315 iwm_newstate_cb(struct work *wk, void *v)
6316 {
6317 struct iwm_softc *sc = v;
6318 struct ieee80211com *ic = &sc->sc_ic;
6319 struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
6320 enum ieee80211_state nstate = iwmns->ns_nstate;
6321 int generation = iwmns->ns_generation;
6322 int arg = iwmns->ns_arg;
6323 int s;
6324
6325 kmem_intr_free(iwmns, sizeof(*iwmns));
6326
6327 s = splnet();
6328
6329 DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6330 if (sc->sc_generation != generation) {
6331 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6332 if (nstate == IEEE80211_S_INIT) {
6333 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6334 "calling sc_newstate()\n"));
6335 (void) sc->sc_newstate(ic, nstate, arg);
6336 }
6337 } else
6338 (void) iwm_do_newstate(ic, nstate, arg);
6339
6340 splx(s);
6341 }
6342
6343 static int
6344 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6345 {
6346 struct iwm_newstate_state *iwmns;
6347 struct ifnet *ifp = IC2IFP(ic);
6348 struct iwm_softc *sc = ifp->if_softc;
6349
6350 callout_stop(&sc->sc_calib_to);
6351
6352 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6353 if (!iwmns) {
6354 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6355 return ENOMEM;
6356 }
6357
6358 iwmns->ns_nstate = nstate;
6359 iwmns->ns_arg = arg;
6360 iwmns->ns_generation = sc->sc_generation;
6361
6362 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6363
6364 return 0;
6365 }
6366
6367 static void
6368 iwm_endscan(struct iwm_softc *sc)
6369 {
6370 struct ieee80211com *ic = &sc->sc_ic;
6371 int s;
6372
6373 DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6374
6375 s = splnet();
6376 if (ic->ic_state == IEEE80211_S_SCAN)
6377 ieee80211_end_scan(ic);
6378 splx(s);
6379 }
6380
6381 /*
6382 * Aging and idle timeouts for the different possible scenarios
6383 * in default configuration
6384 */
6385 static const uint32_t
6386 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6387 {
6388 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6389 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6390 },
6391 {
6392 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6393 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6394 },
6395 {
6396 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6397 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6398 },
6399 {
6400 htole32(IWM_SF_BA_AGING_TIMER_DEF),
6401 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6402 },
6403 {
6404 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6405 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6406 },
6407 };
6408
6409 /*
6410 * Aging and idle timeouts for the different possible scenarios
6411 * in single BSS MAC configuration.
6412 */
6413 static const uint32_t
6414 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6415 {
6416 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6417 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6418 },
6419 {
6420 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6421 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6422 },
6423 {
6424 htole32(IWM_SF_MCAST_AGING_TIMER),
6425 htole32(IWM_SF_MCAST_IDLE_TIMER)
6426 },
6427 {
6428 htole32(IWM_SF_BA_AGING_TIMER),
6429 htole32(IWM_SF_BA_IDLE_TIMER)
6430 },
6431 {
6432 htole32(IWM_SF_TX_RE_AGING_TIMER),
6433 htole32(IWM_SF_TX_RE_IDLE_TIMER)
6434 },
6435 };
6436
6437 static void
6438 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6439 struct ieee80211_node *ni)
6440 {
6441 int i, j, watermark;
6442
6443 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6444
6445 /*
6446 * If we are in association flow - check antenna configuration
6447 * capabilities of the AP station, and choose the watermark accordingly.
6448 */
6449 if (ni) {
6450 #ifndef IEEE80211_NO_HT
6451 if (ni->ni_flags & IEEE80211_NODE_HT) {
6452 #ifdef notyet
6453 if (ni->ni_rxmcs[2] != 0)
6454 watermark = IWM_SF_W_MARK_MIMO3;
6455 else if (ni->ni_rxmcs[1] != 0)
6456 watermark = IWM_SF_W_MARK_MIMO2;
6457 else
6458 #endif
6459 watermark = IWM_SF_W_MARK_SISO;
6460 } else
6461 #endif
6462 watermark = IWM_SF_W_MARK_LEGACY;
6463 /* default watermark value for unassociated mode. */
6464 } else {
6465 watermark = IWM_SF_W_MARK_MIMO2;
6466 }
6467 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6468
6469 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6470 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6471 sf_cmd->long_delay_timeouts[i][j] =
6472 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6473 }
6474 }
6475
6476 if (ni) {
6477 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6478 sizeof(iwm_sf_full_timeout));
6479 } else {
6480 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6481 sizeof(iwm_sf_full_timeout_def));
6482 }
6483 }
6484
6485 static int
6486 iwm_sf_config(struct iwm_softc *sc, int new_state)
6487 {
6488 struct ieee80211com *ic = &sc->sc_ic;
6489 struct iwm_sf_cfg_cmd sf_cmd = {
6490 .state = htole32(IWM_SF_FULL_ON),
6491 };
6492
6493 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6494 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6495
6496 switch (new_state) {
6497 case IWM_SF_UNINIT:
6498 case IWM_SF_INIT_OFF:
6499 iwm_fill_sf_command(sc, &sf_cmd, NULL);
6500 break;
6501 case IWM_SF_FULL_ON:
6502 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6503 break;
6504 default:
6505 return EINVAL;
6506 }
6507
6508 return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6509 sizeof(sf_cmd), &sf_cmd);
6510 }
6511
6512 static int
6513 iwm_send_bt_init_conf(struct iwm_softc *sc)
6514 {
6515 struct iwm_bt_coex_cmd bt_cmd;
6516
6517 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6518 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6519
6520 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6521 }
6522
6523 static bool
6524 iwm_is_lar_supported(struct iwm_softc *sc)
6525 {
6526 bool nvm_lar = sc->sc_nvm.lar_enabled;
6527 bool tlv_lar = isset(sc->sc_enabled_capa,
6528 IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6529
6530 if (iwm_lar_disable)
6531 return false;
6532
6533 /*
6534 * Enable LAR only if it is supported by the FW (TLV) &&
6535 * enabled in the NVM
6536 */
6537 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6538 return nvm_lar && tlv_lar;
6539 else
6540 return tlv_lar;
6541 }
6542
6543 static int
6544 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6545 {
6546 struct iwm_mcc_update_cmd mcc_cmd;
6547 struct iwm_host_cmd hcmd = {
6548 .id = IWM_MCC_UPDATE_CMD,
6549 .flags = IWM_CMD_WANT_SKB,
6550 .data = { &mcc_cmd },
6551 };
6552 int err;
6553 int resp_v2 = isset(sc->sc_enabled_capa,
6554 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6555
6556 if (!iwm_is_lar_supported(sc)) {
6557 DPRINTF(("%s: no LAR support\n", __func__));
6558 return 0;
6559 }
6560
6561 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6562 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6563 if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6564 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6565 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6566 else
6567 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6568
6569 if (resp_v2)
6570 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6571 else
6572 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6573
6574 err = iwm_send_cmd(sc, &hcmd);
6575 if (err)
6576 return err;
6577
6578 iwm_free_resp(sc, &hcmd);
6579
6580 return 0;
6581 }
6582
6583 static void
6584 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6585 {
6586 struct iwm_host_cmd cmd = {
6587 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6588 .len = { sizeof(uint32_t), },
6589 .data = { &backoff, },
6590 };
6591
6592 iwm_send_cmd(sc, &cmd);
6593 }
6594
6595 static int
6596 iwm_init_hw(struct iwm_softc *sc)
6597 {
6598 struct ieee80211com *ic = &sc->sc_ic;
6599 int err, i, ac;
6600
6601 err = iwm_preinit(sc);
6602 if (err)
6603 return err;
6604
6605 err = iwm_start_hw(sc);
6606 if (err) {
6607 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6608 return err;
6609 }
6610
6611 err = iwm_run_init_mvm_ucode(sc, 0);
6612 if (err)
6613 return err;
6614
6615 /* Should stop and start HW since INIT image just loaded. */
6616 iwm_stop_device(sc);
6617 err = iwm_start_hw(sc);
6618 if (err) {
6619 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6620 return err;
6621 }
6622
6623 /* Restart, this time with the regular firmware */
6624 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6625 if (err) {
6626 aprint_error_dev(sc->sc_dev,
6627 "could not load firmware (error %d)\n", err);
6628 goto err;
6629 }
6630
6631 err = iwm_send_bt_init_conf(sc);
6632 if (err) {
6633 aprint_error_dev(sc->sc_dev,
6634 "could not init bt coex (error %d)\n", err);
6635 goto err;
6636 }
6637
6638 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6639 if (err) {
6640 aprint_error_dev(sc->sc_dev,
6641 "could not init tx ant config (error %d)\n", err);
6642 goto err;
6643 }
6644
6645 /* Send phy db control command and then phy db calibration*/
6646 err = iwm_send_phy_db_data(sc);
6647 if (err) {
6648 aprint_error_dev(sc->sc_dev,
6649 "could not init phy db (error %d)\n", err);
6650 goto err;
6651 }
6652
6653 err = iwm_send_phy_cfg_cmd(sc);
6654 if (err) {
6655 aprint_error_dev(sc->sc_dev,
6656 "could not send phy config (error %d)\n", err);
6657 goto err;
6658 }
6659
6660 /* Add auxiliary station for scanning */
6661 err = iwm_add_aux_sta(sc);
6662 if (err) {
6663 aprint_error_dev(sc->sc_dev,
6664 "could not add aux station (error %d)\n", err);
6665 goto err;
6666 }
6667
6668 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6669 /*
6670 * The channel used here isn't relevant as it's
6671 * going to be overwritten in the other flows.
6672 * For now use the first channel we have.
6673 */
6674 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6675 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6676 IWM_FW_CTXT_ACTION_ADD, 0);
6677 if (err) {
6678 aprint_error_dev(sc->sc_dev,
6679 "could not add phy context %d (error %d)\n",
6680 i, err);
6681 goto err;
6682 }
6683 }
6684
6685 /* Initialize tx backoffs to the minimum. */
6686 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6687 iwm_tt_tx_backoff(sc, 0);
6688
6689 err = iwm_power_update_device(sc);
6690 if (err) {
6691 aprint_error_dev(sc->sc_dev,
6692 "could send power command (error %d)\n", err);
6693 goto err;
6694 }
6695
6696 err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6697 if (err) {
6698 aprint_error_dev(sc->sc_dev,
6699 "could not init LAR (error %d)\n", err);
6700 goto err;
6701 }
6702
6703 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6704 err = iwm_config_umac_scan(sc);
6705 if (err) {
6706 aprint_error_dev(sc->sc_dev,
6707 "could not configure scan (error %d)\n", err);
6708 goto err;
6709 }
6710 }
6711
6712 for (ac = 0; ac < WME_NUM_AC; ac++) {
6713 err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6714 iwm_ac_to_tx_fifo[ac]);
6715 if (err) {
6716 aprint_error_dev(sc->sc_dev,
6717 "could not enable Tx queue %d (error %d)\n",
6718 i, err);
6719 goto err;
6720 }
6721 }
6722
6723 err = iwm_disable_beacon_filter(sc);
6724 if (err) {
6725 aprint_error_dev(sc->sc_dev,
6726 "could not disable beacon filter (error %d)\n", err);
6727 goto err;
6728 }
6729
6730 return 0;
6731
6732 err:
6733 iwm_stop_device(sc);
6734 return err;
6735 }
6736
6737 /* Allow multicast from our BSSID. */
6738 static int
6739 iwm_allow_mcast(struct iwm_softc *sc)
6740 {
6741 struct ieee80211com *ic = &sc->sc_ic;
6742 struct ieee80211_node *ni = ic->ic_bss;
6743 struct iwm_mcast_filter_cmd *cmd;
6744 size_t size;
6745 int err;
6746
6747 size = roundup(sizeof(*cmd), 4);
6748 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6749 if (cmd == NULL)
6750 return ENOMEM;
6751 cmd->filter_own = 1;
6752 cmd->port_id = 0;
6753 cmd->count = 0;
6754 cmd->pass_all = 1;
6755 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6756
6757 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6758 kmem_intr_free(cmd, size);
6759 return err;
6760 }
6761
6762 static int
6763 iwm_init(struct ifnet *ifp)
6764 {
6765 struct iwm_softc *sc = ifp->if_softc;
6766 int err;
6767
6768 if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6769 return 0;
6770
6771 sc->sc_generation++;
6772 sc->sc_flags &= ~IWM_FLAG_STOPPED;
6773
6774 err = iwm_init_hw(sc);
6775 if (err) {
6776 iwm_stop(ifp, 1);
6777 return err;
6778 }
6779
6780 ifp->if_flags &= ~IFF_OACTIVE;
6781 ifp->if_flags |= IFF_RUNNING;
6782
6783 ieee80211_begin_scan(&sc->sc_ic, 0);
6784 SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6785
6786 return 0;
6787 }
6788
6789 static void
6790 iwm_start(struct ifnet *ifp)
6791 {
6792 struct iwm_softc *sc = ifp->if_softc;
6793 struct ieee80211com *ic = &sc->sc_ic;
6794 struct ieee80211_node *ni;
6795 struct ether_header *eh;
6796 struct mbuf *m;
6797 int ac;
6798
6799 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6800 return;
6801
6802 for (;;) {
6803 /* why isn't this done per-queue? */
6804 if (sc->qfullmsk != 0) {
6805 ifp->if_flags |= IFF_OACTIVE;
6806 break;
6807 }
6808
6809 /* need to send management frames even if we're not RUNning */
6810 IF_DEQUEUE(&ic->ic_mgtq, m);
6811 if (m) {
6812 ni = M_GETCTX(m, struct ieee80211_node *);
6813 M_CLEARCTX(m);
6814 ac = WME_AC_BE;
6815 goto sendit;
6816 }
6817 if (ic->ic_state != IEEE80211_S_RUN) {
6818 break;
6819 }
6820
6821 IFQ_DEQUEUE(&ifp->if_snd, m);
6822 if (m == NULL)
6823 break;
6824
6825 if (m->m_len < sizeof (*eh) &&
6826 (m = m_pullup(m, sizeof (*eh))) == NULL) {
6827 ifp->if_oerrors++;
6828 continue;
6829 }
6830
6831 eh = mtod(m, struct ether_header *);
6832 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6833 if (ni == NULL) {
6834 m_freem(m);
6835 ifp->if_oerrors++;
6836 continue;
6837 }
6838
6839 /* classify mbuf so we can find which tx ring to use */
6840 if (ieee80211_classify(ic, m, ni) != 0) {
6841 m_freem(m);
6842 ieee80211_free_node(ni);
6843 ifp->if_oerrors++;
6844 continue;
6845 }
6846
6847 /* No QoS encapsulation for EAPOL frames. */
6848 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6849 M_WME_GETAC(m) : WME_AC_BE;
6850
6851 bpf_mtap(ifp, m);
6852
6853 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6854 ieee80211_free_node(ni);
6855 ifp->if_oerrors++;
6856 continue;
6857 }
6858
6859 sendit:
6860 bpf_mtap3(ic->ic_rawbpf, m);
6861
6862 if (iwm_tx(sc, m, ni, ac) != 0) {
6863 ieee80211_free_node(ni);
6864 ifp->if_oerrors++;
6865 continue;
6866 }
6867
6868 if (ifp->if_flags & IFF_UP) {
6869 sc->sc_tx_timer = 15;
6870 ifp->if_timer = 1;
6871 }
6872 }
6873 }
6874
6875 static void
6876 iwm_stop(struct ifnet *ifp, int disable)
6877 {
6878 struct iwm_softc *sc = ifp->if_softc;
6879 struct ieee80211com *ic = &sc->sc_ic;
6880 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6881
6882 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6883 sc->sc_flags |= IWM_FLAG_STOPPED;
6884 sc->sc_generation++;
6885 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6886
6887 if (in)
6888 in->in_phyctxt = NULL;
6889
6890 if (ic->ic_state != IEEE80211_S_INIT)
6891 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6892
6893 callout_stop(&sc->sc_calib_to);
6894 iwm_led_blink_stop(sc);
6895 ifp->if_timer = sc->sc_tx_timer = 0;
6896 iwm_stop_device(sc);
6897 }
6898
6899 static void
6900 iwm_watchdog(struct ifnet *ifp)
6901 {
6902 struct iwm_softc *sc = ifp->if_softc;
6903
6904 ifp->if_timer = 0;
6905 if (sc->sc_tx_timer > 0) {
6906 if (--sc->sc_tx_timer == 0) {
6907 aprint_error_dev(sc->sc_dev, "device timeout\n");
6908 #ifdef IWM_DEBUG
6909 iwm_nic_error(sc);
6910 #endif
6911 ifp->if_flags &= ~IFF_UP;
6912 iwm_stop(ifp, 1);
6913 ifp->if_oerrors++;
6914 return;
6915 }
6916 ifp->if_timer = 1;
6917 }
6918
6919 ieee80211_watchdog(&sc->sc_ic);
6920 }
6921
6922 static int
6923 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6924 {
6925 struct iwm_softc *sc = ifp->if_softc;
6926 struct ieee80211com *ic = &sc->sc_ic;
6927 const struct sockaddr *sa;
6928 int s, err = 0;
6929
6930 s = splnet();
6931
6932 switch (cmd) {
6933 case SIOCSIFADDR:
6934 ifp->if_flags |= IFF_UP;
6935 /* FALLTHROUGH */
6936 case SIOCSIFFLAGS:
6937 err = ifioctl_common(ifp, cmd, data);
6938 if (err)
6939 break;
6940 if (ifp->if_flags & IFF_UP) {
6941 if (!(ifp->if_flags & IFF_RUNNING)) {
6942 err = iwm_init(ifp);
6943 if (err)
6944 ifp->if_flags &= ~IFF_UP;
6945 }
6946 } else {
6947 if (ifp->if_flags & IFF_RUNNING)
6948 iwm_stop(ifp, 1);
6949 }
6950 break;
6951
6952 case SIOCADDMULTI:
6953 case SIOCDELMULTI:
6954 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6955 err = ENXIO;
6956 break;
6957 }
6958 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6959 err = (cmd == SIOCADDMULTI) ?
6960 ether_addmulti(sa, &sc->sc_ec) :
6961 ether_delmulti(sa, &sc->sc_ec);
6962 if (err == ENETRESET)
6963 err = 0;
6964 break;
6965
6966 default:
6967 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6968 err = ether_ioctl(ifp, cmd, data);
6969 break;
6970 }
6971 err = ieee80211_ioctl(ic, cmd, data);
6972 break;
6973 }
6974
6975 if (err == ENETRESET) {
6976 err = 0;
6977 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6978 (IFF_UP | IFF_RUNNING)) {
6979 iwm_stop(ifp, 0);
6980 err = iwm_init(ifp);
6981 }
6982 }
6983
6984 splx(s);
6985 return err;
6986 }
6987
6988 /*
6989 * Note: This structure is read from the device with IO accesses,
6990 * and the reading already does the endian conversion. As it is
6991 * read with uint32_t-sized accesses, any members with a different size
6992 * need to be ordered correctly though!
6993 */
6994 struct iwm_error_event_table {
6995 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6996 uint32_t error_id; /* type of error */
6997 uint32_t trm_hw_status0; /* TRM HW status */
6998 uint32_t trm_hw_status1; /* TRM HW status */
6999 uint32_t blink2; /* branch link */
7000 uint32_t ilink1; /* interrupt link */
7001 uint32_t ilink2; /* interrupt link */
7002 uint32_t data1; /* error-specific data */
7003 uint32_t data2; /* error-specific data */
7004 uint32_t data3; /* error-specific data */
7005 uint32_t bcon_time; /* beacon timer */
7006 uint32_t tsf_low; /* network timestamp function timer */
7007 uint32_t tsf_hi; /* network timestamp function timer */
7008 uint32_t gp1; /* GP1 timer register */
7009 uint32_t gp2; /* GP2 timer register */
7010 uint32_t fw_rev_type; /* firmware revision type */
7011 uint32_t major; /* uCode version major */
7012 uint32_t minor; /* uCode version minor */
7013 uint32_t hw_ver; /* HW Silicon version */
7014 uint32_t brd_ver; /* HW board version */
7015 uint32_t log_pc; /* log program counter */
7016 uint32_t frame_ptr; /* frame pointer */
7017 uint32_t stack_ptr; /* stack pointer */
7018 uint32_t hcmd; /* last host command header */
7019 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
7020 * rxtx_flag */
7021 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
7022 * host_flag */
7023 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
7024 * enc_flag */
7025 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
7026 * time_flag */
7027 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
7028 * wico interrupt */
7029 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
7030 uint32_t wait_event; /* wait event() caller address */
7031 uint32_t l2p_control; /* L2pControlField */
7032 uint32_t l2p_duration; /* L2pDurationField */
7033 uint32_t l2p_mhvalid; /* L2pMhValidBits */
7034 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
7035 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
7036 * (LMPM_PMG_SEL) */
7037 uint32_t u_timestamp; /* indicate when the date and time of the
7038 * compilation */
7039 uint32_t flow_handler; /* FH read/write pointers, RX credit */
7040 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7041
7042 /*
7043 * UMAC error struct - relevant starting from family 8000 chip.
7044 * Note: This structure is read from the device with IO accesses,
7045 * and the reading already does the endian conversion. As it is
7046 * read with u32-sized accesses, any members with a different size
7047 * need to be ordered correctly though!
7048 */
7049 struct iwm_umac_error_event_table {
7050 uint32_t valid; /* (nonzero) valid, (0) log is empty */
7051 uint32_t error_id; /* type of error */
7052 uint32_t blink1; /* branch link */
7053 uint32_t blink2; /* branch link */
7054 uint32_t ilink1; /* interrupt link */
7055 uint32_t ilink2; /* interrupt link */
7056 uint32_t data1; /* error-specific data */
7057 uint32_t data2; /* error-specific data */
7058 uint32_t data3; /* error-specific data */
7059 uint32_t umac_major;
7060 uint32_t umac_minor;
7061 uint32_t frame_pointer; /* core register 27 */
7062 uint32_t stack_pointer; /* core register 28 */
7063 uint32_t cmd_header; /* latest host cmd sent to UMAC */
7064 uint32_t nic_isr_pref; /* ISR status register */
7065 } __packed;
7066
7067 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
7068 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
7069
7070 #ifdef IWM_DEBUG
7071 static const struct {
7072 const char *name;
7073 uint8_t num;
7074 } advanced_lookup[] = {
7075 { "NMI_INTERRUPT_WDG", 0x34 },
7076 { "SYSASSERT", 0x35 },
7077 { "UCODE_VERSION_MISMATCH", 0x37 },
7078 { "BAD_COMMAND", 0x38 },
7079 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7080 { "FATAL_ERROR", 0x3D },
7081 { "NMI_TRM_HW_ERR", 0x46 },
7082 { "NMI_INTERRUPT_TRM", 0x4C },
7083 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7084 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7085 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7086 { "NMI_INTERRUPT_HOST", 0x66 },
7087 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
7088 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
7089 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7090 { "ADVANCED_SYSASSERT", 0 },
7091 };
7092
7093 static const char *
7094 iwm_desc_lookup(uint32_t num)
7095 {
7096 int i;
7097
7098 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
7099 if (advanced_lookup[i].num == num)
7100 return advanced_lookup[i].name;
7101
7102 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7103 return advanced_lookup[i].name;
7104 }
7105
7106 /*
7107 * Support for dumping the error log seemed like a good idea ...
7108 * but it's mostly hex junk and the only sensible thing is the
7109 * hw/ucode revision (which we know anyway). Since it's here,
7110 * I'll just leave it in, just in case e.g. the Intel guys want to
7111 * help us decipher some "ADVANCED_SYSASSERT" later.
7112 */
7113 static void
7114 iwm_nic_error(struct iwm_softc *sc)
7115 {
7116 struct iwm_error_event_table t;
7117 uint32_t base;
7118
7119 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
7120 base = sc->sc_uc.uc_error_event_table;
7121 if (base < 0x800000) {
7122 aprint_error_dev(sc->sc_dev,
7123 "Invalid error log pointer 0x%08x\n", base);
7124 return;
7125 }
7126
7127 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7128 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7129 return;
7130 }
7131
7132 if (!t.valid) {
7133 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
7134 return;
7135 }
7136
7137 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7138 aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
7139 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7140 sc->sc_flags, t.valid);
7141 }
7142
7143 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
7144 iwm_desc_lookup(t.error_id));
7145 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
7146 t.trm_hw_status0);
7147 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
7148 t.trm_hw_status1);
7149 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
7150 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
7151 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
7152 aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
7153 aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
7154 aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
7155 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
7156 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
7157 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
7158 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
7159 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
7160 aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
7161 t.fw_rev_type);
7162 aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
7163 t.major);
7164 aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
7165 t.minor);
7166 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
7167 aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
7168 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
7169 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
7170 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
7171 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
7172 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
7173 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
7174 aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
7175 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
7176 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
7177 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
7178 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
7179 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
7180 t.l2p_addr_match);
7181 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
7182 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
7183 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
7184
7185 if (sc->sc_uc.uc_umac_error_event_table)
7186 iwm_nic_umac_error(sc);
7187 }
7188
7189 static void
7190 iwm_nic_umac_error(struct iwm_softc *sc)
7191 {
7192 struct iwm_umac_error_event_table t;
7193 uint32_t base;
7194
7195 base = sc->sc_uc.uc_umac_error_event_table;
7196
7197 if (base < 0x800000) {
7198 aprint_error_dev(sc->sc_dev,
7199 "Invalid error log pointer 0x%08x\n", base);
7200 return;
7201 }
7202
7203 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7204 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7205 return;
7206 }
7207
7208 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7209 aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
7210 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7211 sc->sc_flags, t.valid);
7212 }
7213
7214 aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
7215 iwm_desc_lookup(t.error_id));
7216 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
7217 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
7218 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
7219 t.ilink1);
7220 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
7221 t.ilink2);
7222 aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
7223 aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
7224 aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
7225 aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
7226 aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
7227 aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
7228 t.frame_pointer);
7229 aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
7230 t.stack_pointer);
7231 aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
7232 aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
7233 t.nic_isr_pref);
7234 }
7235 #endif
7236
7237 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
7238 do { \
7239 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7240 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
7241 _var_ = (void *)((_pkt_)+1); \
7242 } while (/*CONSTCOND*/0)
7243
7244 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
7245 do { \
7246 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7247 sizeof(len), BUS_DMASYNC_POSTREAD); \
7248 _ptr_ = (void *)((_pkt_)+1); \
7249 } while (/*CONSTCOND*/0)
7250
7251 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7252
7253 static void
7254 iwm_notif_intr(struct iwm_softc *sc)
7255 {
7256 uint16_t hw;
7257
7258 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7259 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7260
7261 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7262 while (sc->rxq.cur != hw) {
7263 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7264 struct iwm_rx_packet *pkt;
7265 struct iwm_cmd_response *cresp;
7266 int orig_qid, qid, idx, code;
7267
7268 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7269 BUS_DMASYNC_POSTREAD);
7270 pkt = mtod(data->m, struct iwm_rx_packet *);
7271
7272 orig_qid = pkt->hdr.qid;
7273 qid = orig_qid & ~0x80;
7274 idx = pkt->hdr.idx;
7275
7276 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7277
7278 /*
7279 * randomly get these from the firmware, no idea why.
7280 * they at least seem harmless, so just ignore them for now
7281 */
7282 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7283 || pkt->len_n_flags == htole32(0x55550000))) {
7284 ADVANCE_RXQ(sc);
7285 continue;
7286 }
7287
7288 switch (code) {
7289 case IWM_REPLY_RX_PHY_CMD:
7290 iwm_rx_rx_phy_cmd(sc, pkt, data);
7291 break;
7292
7293 case IWM_REPLY_RX_MPDU_CMD:
7294 iwm_rx_rx_mpdu(sc, pkt, data);
7295 break;
7296
7297 case IWM_TX_CMD:
7298 iwm_rx_tx_cmd(sc, pkt, data);
7299 break;
7300
7301 case IWM_MISSED_BEACONS_NOTIFICATION:
7302 iwm_rx_missed_beacons_notif(sc, pkt, data);
7303 break;
7304
7305 case IWM_MFUART_LOAD_NOTIFICATION:
7306 break;
7307
7308 case IWM_ALIVE: {
7309 struct iwm_alive_resp_v1 *resp1;
7310 struct iwm_alive_resp_v2 *resp2;
7311 struct iwm_alive_resp_v3 *resp3;
7312
7313 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7314 SYNC_RESP_STRUCT(resp1, pkt);
7315 sc->sc_uc.uc_error_event_table
7316 = le32toh(resp1->error_event_table_ptr);
7317 sc->sc_uc.uc_log_event_table
7318 = le32toh(resp1->log_event_table_ptr);
7319 sc->sched_base = le32toh(resp1->scd_base_ptr);
7320 if (resp1->status == IWM_ALIVE_STATUS_OK)
7321 sc->sc_uc.uc_ok = 1;
7322 else
7323 sc->sc_uc.uc_ok = 0;
7324 }
7325 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7326 SYNC_RESP_STRUCT(resp2, pkt);
7327 sc->sc_uc.uc_error_event_table
7328 = le32toh(resp2->error_event_table_ptr);
7329 sc->sc_uc.uc_log_event_table
7330 = le32toh(resp2->log_event_table_ptr);
7331 sc->sched_base = le32toh(resp2->scd_base_ptr);
7332 sc->sc_uc.uc_umac_error_event_table
7333 = le32toh(resp2->error_info_addr);
7334 if (resp2->status == IWM_ALIVE_STATUS_OK)
7335 sc->sc_uc.uc_ok = 1;
7336 else
7337 sc->sc_uc.uc_ok = 0;
7338 }
7339 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7340 SYNC_RESP_STRUCT(resp3, pkt);
7341 sc->sc_uc.uc_error_event_table
7342 = le32toh(resp3->error_event_table_ptr);
7343 sc->sc_uc.uc_log_event_table
7344 = le32toh(resp3->log_event_table_ptr);
7345 sc->sched_base = le32toh(resp3->scd_base_ptr);
7346 sc->sc_uc.uc_umac_error_event_table
7347 = le32toh(resp3->error_info_addr);
7348 if (resp3->status == IWM_ALIVE_STATUS_OK)
7349 sc->sc_uc.uc_ok = 1;
7350 else
7351 sc->sc_uc.uc_ok = 0;
7352 }
7353
7354 sc->sc_uc.uc_intr = 1;
7355 wakeup(&sc->sc_uc);
7356 break;
7357 }
7358
7359 case IWM_CALIB_RES_NOTIF_PHY_DB: {
7360 struct iwm_calib_res_notif_phy_db *phy_db_notif;
7361 SYNC_RESP_STRUCT(phy_db_notif, pkt);
7362 uint16_t size = le16toh(phy_db_notif->length);
7363 bus_dmamap_sync(sc->sc_dmat, data->map,
7364 sizeof(*pkt) + sizeof(*phy_db_notif),
7365 size, BUS_DMASYNC_POSTREAD);
7366 iwm_phy_db_set_section(sc, phy_db_notif, size);
7367 break;
7368 }
7369
7370 case IWM_STATISTICS_NOTIFICATION: {
7371 struct iwm_notif_statistics *stats;
7372 SYNC_RESP_STRUCT(stats, pkt);
7373 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7374 sc->sc_noise = iwm_get_noise(&stats->rx.general);
7375 break;
7376 }
7377
7378 case IWM_NVM_ACCESS_CMD:
7379 case IWM_MCC_UPDATE_CMD:
7380 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7381 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7382 sizeof(sc->sc_cmd_resp),
7383 BUS_DMASYNC_POSTREAD);
7384 memcpy(sc->sc_cmd_resp,
7385 pkt, sizeof(sc->sc_cmd_resp));
7386 }
7387 break;
7388
7389 case IWM_MCC_CHUB_UPDATE_CMD: {
7390 struct iwm_mcc_chub_notif *notif;
7391 SYNC_RESP_STRUCT(notif, pkt);
7392
7393 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7394 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7395 sc->sc_fw_mcc[2] = '\0';
7396 break;
7397 }
7398
7399 case IWM_DTS_MEASUREMENT_NOTIFICATION:
7400 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7401 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7402 struct iwm_dts_measurement_notif_v1 *notif1;
7403 struct iwm_dts_measurement_notif_v2 *notif2;
7404
7405 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7406 SYNC_RESP_STRUCT(notif1, pkt);
7407 DPRINTF(("%s: DTS temp=%d \n",
7408 DEVNAME(sc), notif1->temp));
7409 break;
7410 }
7411 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7412 SYNC_RESP_STRUCT(notif2, pkt);
7413 DPRINTF(("%s: DTS temp=%d \n",
7414 DEVNAME(sc), notif2->temp));
7415 break;
7416 }
7417 break;
7418 }
7419
7420 case IWM_PHY_CONFIGURATION_CMD:
7421 case IWM_TX_ANT_CONFIGURATION_CMD:
7422 case IWM_ADD_STA:
7423 case IWM_MAC_CONTEXT_CMD:
7424 case IWM_REPLY_SF_CFG_CMD:
7425 case IWM_POWER_TABLE_CMD:
7426 case IWM_PHY_CONTEXT_CMD:
7427 case IWM_BINDING_CONTEXT_CMD:
7428 case IWM_TIME_EVENT_CMD:
7429 case IWM_SCAN_REQUEST_CMD:
7430 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7431 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7432 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7433 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7434 case IWM_SCAN_OFFLOAD_ABORT_CMD:
7435 case IWM_REPLY_BEACON_FILTERING_CMD:
7436 case IWM_MAC_PM_POWER_TABLE:
7437 case IWM_TIME_QUOTA_CMD:
7438 case IWM_REMOVE_STA:
7439 case IWM_TXPATH_FLUSH:
7440 case IWM_LQ_CMD:
7441 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_FW_PAGING_BLOCK_CMD):
7442 case IWM_BT_CONFIG:
7443 case IWM_REPLY_THERMAL_MNG_BACKOFF:
7444 SYNC_RESP_STRUCT(cresp, pkt);
7445 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7446 memcpy(sc->sc_cmd_resp,
7447 pkt, sizeof(*pkt) + sizeof(*cresp));
7448 }
7449 break;
7450
7451 /* ignore */
7452 case IWM_PHY_DB_CMD:
7453 break;
7454
7455 case IWM_INIT_COMPLETE_NOTIF:
7456 sc->sc_init_complete = 1;
7457 wakeup(&sc->sc_init_complete);
7458 break;
7459
7460 case IWM_SCAN_OFFLOAD_COMPLETE: {
7461 struct iwm_periodic_scan_complete *notif;
7462 SYNC_RESP_STRUCT(notif, pkt);
7463 break;
7464 }
7465
7466 case IWM_SCAN_ITERATION_COMPLETE: {
7467 struct iwm_lmac_scan_complete_notif *notif;
7468 SYNC_RESP_STRUCT(notif, pkt);
7469 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7470 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7471 iwm_endscan(sc);
7472 }
7473 break;
7474 }
7475
7476 case IWM_SCAN_COMPLETE_UMAC: {
7477 struct iwm_umac_scan_complete *notif;
7478 SYNC_RESP_STRUCT(notif, pkt);
7479 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7480 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7481 iwm_endscan(sc);
7482 }
7483 break;
7484 }
7485
7486 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7487 struct iwm_umac_scan_iter_complete_notif *notif;
7488 SYNC_RESP_STRUCT(notif, pkt);
7489 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7490 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7491 iwm_endscan(sc);
7492 }
7493 break;
7494 }
7495
7496 case IWM_REPLY_ERROR: {
7497 struct iwm_error_resp *resp;
7498 SYNC_RESP_STRUCT(resp, pkt);
7499 aprint_error_dev(sc->sc_dev,
7500 "firmware error 0x%x, cmd 0x%x\n",
7501 le32toh(resp->error_type), resp->cmd_id);
7502 break;
7503 }
7504
7505 case IWM_TIME_EVENT_NOTIFICATION: {
7506 struct iwm_time_event_notif *notif;
7507 SYNC_RESP_STRUCT(notif, pkt);
7508 break;
7509 }
7510
7511 case IWM_DEBUG_LOG_MSG:
7512 break;
7513
7514 case IWM_MCAST_FILTER_CMD:
7515 break;
7516
7517 case IWM_SCD_QUEUE_CFG: {
7518 struct iwm_scd_txq_cfg_rsp *rsp;
7519 SYNC_RESP_STRUCT(rsp, pkt);
7520 break;
7521 }
7522
7523 default:
7524 aprint_error_dev(sc->sc_dev,
7525 "unhandled firmware response 0x%x 0x%x/0x%x "
7526 "rx ring %d[%d]\n",
7527 code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7528 break;
7529 }
7530
7531 /*
7532 * uCode sets bit 0x80 when it originates the notification,
7533 * i.e. when the notification is not a direct response to a
7534 * command sent by the driver.
7535 * For example, uCode issues IWM_REPLY_RX when it sends a
7536 * received frame to the driver.
7537 */
7538 if (!(orig_qid & (1 << 7))) {
7539 iwm_cmd_done(sc, qid, idx);
7540 }
7541
7542 ADVANCE_RXQ(sc);
7543 }
7544
7545 /*
7546 * Seems like the hardware gets upset unless we align the write by 8??
7547 */
7548 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7549 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7550 }
7551
7552 static int
7553 iwm_intr(void *arg)
7554 {
7555 struct iwm_softc *sc = arg;
7556
7557 /* Disable interrupts */
7558 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7559
7560 softint_schedule(sc->sc_soft_ih);
7561 return 1;
7562 }
7563
7564 static void
7565 iwm_softintr(void *arg)
7566 {
7567 struct iwm_softc *sc = arg;
7568 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7569 uint32_t r1, r2;
7570 int isperiodic = 0, s;
7571
7572 if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7573 uint32_t *ict = sc->ict_dma.vaddr;
7574 int tmp;
7575
7576 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7577 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7578 tmp = htole32(ict[sc->ict_cur]);
7579 if (tmp == 0)
7580 goto out_ena; /* Interrupt not for us. */
7581
7582 /*
7583 * ok, there was something. keep plowing until we have all.
7584 */
7585 r1 = r2 = 0;
7586 while (tmp) {
7587 r1 |= tmp;
7588 ict[sc->ict_cur] = 0; /* Acknowledge. */
7589 sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7590 tmp = htole32(ict[sc->ict_cur]);
7591 }
7592
7593 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7594 0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7595
7596 /* this is where the fun begins. don't ask */
7597 if (r1 == 0xffffffff)
7598 r1 = 0;
7599
7600 /* i am not expected to understand this */
7601 if (r1 & 0xc0000)
7602 r1 |= 0x8000;
7603 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7604 } else {
7605 r1 = IWM_READ(sc, IWM_CSR_INT);
7606 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7607 return; /* Hardware gone! */
7608 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7609 }
7610 if (r1 == 0 && r2 == 0) {
7611 goto out_ena; /* Interrupt not for us. */
7612 }
7613
7614 /* Acknowledge interrupts. */
7615 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7616 if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7617 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7618
7619 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7620 #ifdef IWM_DEBUG
7621 int i;
7622
7623 iwm_nic_error(sc);
7624
7625 /* Dump driver status (TX and RX rings) while we're here. */
7626 DPRINTF(("driver status:\n"));
7627 for (i = 0; i < IWM_MAX_QUEUES; i++) {
7628 struct iwm_tx_ring *ring = &sc->txq[i];
7629 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7630 "queued=%-3d\n",
7631 i, ring->qid, ring->cur, ring->queued));
7632 }
7633 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7634 DPRINTF((" 802.11 state %s\n",
7635 ieee80211_state_name[sc->sc_ic.ic_state]));
7636 #endif
7637
7638 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7639 fatal:
7640 s = splnet();
7641 ifp->if_flags &= ~IFF_UP;
7642 iwm_stop(ifp, 1);
7643 splx(s);
7644 /* Don't restore interrupt mask */
7645 return;
7646
7647 }
7648
7649 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7650 aprint_error_dev(sc->sc_dev,
7651 "hardware error, stopping device\n");
7652 goto fatal;
7653 }
7654
7655 /* firmware chunk loaded */
7656 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7657 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7658 sc->sc_fw_chunk_done = 1;
7659 wakeup(&sc->sc_fw);
7660 }
7661
7662 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7663 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7664 goto fatal;
7665 }
7666
7667 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7668 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7669 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7670 IWM_WRITE_1(sc,
7671 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7672 isperiodic = 1;
7673 }
7674
7675 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7676 isperiodic) {
7677 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7678
7679 iwm_notif_intr(sc);
7680
7681 /* enable periodic interrupt, see above */
7682 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7683 !isperiodic)
7684 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7685 IWM_CSR_INT_PERIODIC_ENA);
7686 }
7687
7688 out_ena:
7689 iwm_restore_interrupts(sc);
7690 }
7691
7692 /*
7693 * Autoconf glue-sniffing
7694 */
7695
7696 static const pci_product_id_t iwm_devices[] = {
7697 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7698 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7699 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7700 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7701 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7702 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7703 PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7704 PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7705 PCI_PRODUCT_INTEL_WIFI_LINK_3168,
7706 PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7707 PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7708 PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7709 PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7710 PCI_PRODUCT_INTEL_WIFI_LINK_8265,
7711 };
7712
7713 static int
7714 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7715 {
7716 struct pci_attach_args *pa = aux;
7717
7718 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7719 return 0;
7720
7721 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7722 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7723 return 1;
7724
7725 return 0;
7726 }
7727
7728 static int
7729 iwm_preinit(struct iwm_softc *sc)
7730 {
7731 struct ieee80211com *ic = &sc->sc_ic;
7732 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7733 int err;
7734
7735 if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7736 return 0;
7737
7738 err = iwm_start_hw(sc);
7739 if (err) {
7740 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7741 return err;
7742 }
7743
7744 err = iwm_run_init_mvm_ucode(sc, 1);
7745 iwm_stop_device(sc);
7746 if (err)
7747 return err;
7748
7749 sc->sc_flags |= IWM_FLAG_ATTACHED;
7750
7751 aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7752 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7753 ether_sprintf(sc->sc_nvm.hw_addr));
7754
7755 #ifndef IEEE80211_NO_HT
7756 if (sc->sc_nvm.sku_cap_11n_enable)
7757 iwm_setup_ht_rates(sc);
7758 #endif
7759
7760 /* not all hardware can do 5GHz band */
7761 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7762 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7763
7764 ether_ifdetach(ifp);
7765 ieee80211_ifattach(ic);
7766
7767 ic->ic_node_alloc = iwm_node_alloc;
7768
7769 /* Override 802.11 state transition machine. */
7770 sc->sc_newstate = ic->ic_newstate;
7771 ic->ic_newstate = iwm_newstate;
7772 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7773 ieee80211_announce(ic);
7774
7775 iwm_radiotap_attach(sc);
7776
7777 return 0;
7778 }
7779
7780 static void
7781 iwm_attach_hook(device_t dev)
7782 {
7783 struct iwm_softc *sc = device_private(dev);
7784
7785 iwm_preinit(sc);
7786 }
7787
7788 static void
7789 iwm_attach(device_t parent, device_t self, void *aux)
7790 {
7791 struct iwm_softc *sc = device_private(self);
7792 struct pci_attach_args *pa = aux;
7793 struct ieee80211com *ic = &sc->sc_ic;
7794 struct ifnet *ifp = &sc->sc_ec.ec_if;
7795 pcireg_t reg, memtype;
7796 char intrbuf[PCI_INTRSTR_LEN];
7797 const char *intrstr;
7798 int err;
7799 int txq_i;
7800 const struct sysctlnode *node;
7801
7802 sc->sc_dev = self;
7803 sc->sc_pct = pa->pa_pc;
7804 sc->sc_pcitag = pa->pa_tag;
7805 sc->sc_dmat = pa->pa_dmat;
7806 sc->sc_pciid = pa->pa_id;
7807
7808 pci_aprint_devinfo(pa, NULL);
7809
7810 if (workqueue_create(&sc->sc_nswq, "iwmns",
7811 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7812 panic("%s: could not create workqueue: newstate",
7813 device_xname(self));
7814 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7815 if (sc->sc_soft_ih == NULL)
7816 panic("%s: could not establish softint", device_xname(self));
7817
7818 /*
7819 * Get the offset of the PCI Express Capability Structure in PCI
7820 * Configuration Space.
7821 */
7822 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7823 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7824 if (err == 0) {
7825 aprint_error_dev(self,
7826 "PCIe capability structure not found!\n");
7827 return;
7828 }
7829
7830 /* Clear device-specific "PCI retry timeout" register (41h). */
7831 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7832 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7833
7834 /* Enable bus-mastering */
7835 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7836 reg |= PCI_COMMAND_MASTER_ENABLE;
7837 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7838
7839 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7840 err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7841 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7842 if (err) {
7843 aprint_error_dev(self, "can't map mem space\n");
7844 return;
7845 }
7846
7847 /* Install interrupt handler. */
7848 err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7849 if (err) {
7850 aprint_error_dev(self, "can't allocate interrupt\n");
7851 return;
7852 }
7853 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7854 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7855 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7856 else
7857 SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7858 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7859 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7860 sizeof(intrbuf));
7861 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7862 IPL_NET, iwm_intr, sc, device_xname(self));
7863 if (sc->sc_ih == NULL) {
7864 aprint_error_dev(self, "can't establish interrupt");
7865 if (intrstr != NULL)
7866 aprint_error(" at %s", intrstr);
7867 aprint_error("\n");
7868 return;
7869 }
7870 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7871
7872 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7873
7874 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7875 switch (PCI_PRODUCT(sc->sc_pciid)) {
7876 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7877 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7878 sc->sc_fwname = "iwlwifi-3160-17.ucode";
7879 sc->host_interrupt_operation_mode = 1;
7880 sc->apmg_wake_up_wa = 1;
7881 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7882 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7883 break;
7884 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7885 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7886 sc->sc_fwname = "iwlwifi-7265D-22.ucode";
7887 sc->host_interrupt_operation_mode = 0;
7888 sc->apmg_wake_up_wa = 1;
7889 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7890 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7891 break;
7892 case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
7893 sc->sc_fwname = "iwlwifi-3168-22.ucode";
7894 sc->host_interrupt_operation_mode = 0;
7895 sc->apmg_wake_up_wa = 1;
7896 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7897 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7898 break;
7899 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7900 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7901 sc->sc_fwname = "iwlwifi-7260-17.ucode";
7902 sc->host_interrupt_operation_mode = 1;
7903 sc->apmg_wake_up_wa = 1;
7904 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7905 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7906 break;
7907 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7908 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7909 sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7910 IWM_CSR_HW_REV_TYPE_7265D ?
7911 "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
7912 sc->host_interrupt_operation_mode = 0;
7913 sc->apmg_wake_up_wa = 1;
7914 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7915 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7916 break;
7917 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7918 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7919 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7920 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7921 sc->sc_fwname = "iwlwifi-8000C-22.ucode";
7922 sc->host_interrupt_operation_mode = 0;
7923 sc->apmg_wake_up_wa = 0;
7924 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7925 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7926 break;
7927 case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
7928 sc->sc_fwname = "iwlwifi-8265-22.ucode";
7929 sc->host_interrupt_operation_mode = 0;
7930 sc->apmg_wake_up_wa = 0;
7931 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7932 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7933 break;
7934 default:
7935 aprint_error_dev(self, "unknown product %#x",
7936 PCI_PRODUCT(sc->sc_pciid));
7937 return;
7938 }
7939 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7940
7941 /*
7942 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7943 * changed, and now the revision step also includes bit 0-1 (no more
7944 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7945 * in the old format.
7946 */
7947
7948 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7949 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7950 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7951
7952 if (iwm_prepare_card_hw(sc) != 0) {
7953 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7954 return;
7955 }
7956
7957 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7958 uint32_t hw_step;
7959
7960 /*
7961 * In order to recognize C step the driver should read the
7962 * chip version id located at the AUX bus MISC address.
7963 */
7964 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7965 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7966 DELAY(2);
7967
7968 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7969 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7970 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7971 25000);
7972 if (!err) {
7973 aprint_error_dev(sc->sc_dev,
7974 "failed to wake up the nic\n");
7975 return;
7976 }
7977
7978 if (iwm_nic_lock(sc)) {
7979 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7980 hw_step |= IWM_ENABLE_WFPM;
7981 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7982 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7983 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7984 if (hw_step == 0x3)
7985 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7986 (IWM_SILICON_C_STEP << 2);
7987 iwm_nic_unlock(sc);
7988 } else {
7989 aprint_error_dev(sc->sc_dev,
7990 "failed to lock the nic\n");
7991 return;
7992 }
7993 }
7994
7995 /*
7996 * Allocate DMA memory for firmware transfers.
7997 * Must be aligned on a 16-byte boundary.
7998 */
7999 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
8000 16);
8001 if (err) {
8002 aprint_error_dev(sc->sc_dev,
8003 "could not allocate memory for firmware\n");
8004 return;
8005 }
8006
8007 /* Allocate "Keep Warm" page, used internally by the card. */
8008 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
8009 if (err) {
8010 aprint_error_dev(sc->sc_dev,
8011 "could not allocate keep warm page\n");
8012 goto fail1;
8013 }
8014
8015 /* Allocate interrupt cause table (ICT).*/
8016 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
8017 1 << IWM_ICT_PADDR_SHIFT);
8018 if (err) {
8019 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
8020 goto fail2;
8021 }
8022
8023 /* TX scheduler rings must be aligned on a 1KB boundary. */
8024 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
8025 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
8026 if (err) {
8027 aprint_error_dev(sc->sc_dev,
8028 "could not allocate TX scheduler rings\n");
8029 goto fail3;
8030 }
8031
8032 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
8033 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8034 if (err) {
8035 aprint_error_dev(sc->sc_dev,
8036 "could not allocate TX ring %d\n", txq_i);
8037 goto fail4;
8038 }
8039 }
8040
8041 err = iwm_alloc_rx_ring(sc, &sc->rxq);
8042 if (err) {
8043 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
8044 goto fail5;
8045 }
8046
8047 /* Clear pending interrupts. */
8048 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
8049
8050 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8051 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
8052 SYSCTL_DESCR("iwm per-controller controls"),
8053 NULL, 0, NULL, 0,
8054 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
8055 CTL_EOL)) != 0) {
8056 aprint_normal_dev(sc->sc_dev,
8057 "couldn't create iwm per-controller sysctl node\n");
8058 }
8059 if (err == 0) {
8060 int iwm_nodenum = node->sysctl_num;
8061
8062 /* Reload firmware sysctl node */
8063 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8064 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
8065 SYSCTL_DESCR("Reload firmware"),
8066 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
8067 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
8068 CTL_EOL)) != 0) {
8069 aprint_normal_dev(sc->sc_dev,
8070 "couldn't create load_fw sysctl node\n");
8071 }
8072 }
8073
8074 /*
8075 * Attach interface
8076 */
8077 ic->ic_ifp = ifp;
8078 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
8079 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
8080 ic->ic_state = IEEE80211_S_INIT;
8081
8082 /* Set device capabilities. */
8083 ic->ic_caps =
8084 IEEE80211_C_WEP | /* WEP */
8085 IEEE80211_C_WPA | /* 802.11i */
8086 #ifdef notyet
8087 IEEE80211_C_SCANALL | /* device scans all channels at once */
8088 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
8089 #endif
8090 IEEE80211_C_SHSLOT | /* short slot time supported */
8091 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
8092
8093 #ifndef IEEE80211_NO_HT
8094 ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8095 ic->ic_htxcaps = 0;
8096 ic->ic_txbfcaps = 0;
8097 ic->ic_aselcaps = 0;
8098 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8099 #endif
8100
8101 /* all hardware can do 2.4GHz band */
8102 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8103 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8104
8105 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
8106 sc->sc_phyctxt[i].id = i;
8107 }
8108
8109 sc->sc_amrr.amrr_min_success_threshold = 1;
8110 sc->sc_amrr.amrr_max_success_threshold = 15;
8111
8112 /* IBSS channel undefined for now. */
8113 ic->ic_ibss_chan = &ic->ic_channels[1];
8114
8115 #if 0
8116 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
8117 #endif
8118
8119 ifp->if_softc = sc;
8120 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8121 ifp->if_init = iwm_init;
8122 ifp->if_stop = iwm_stop;
8123 ifp->if_ioctl = iwm_ioctl;
8124 ifp->if_start = iwm_start;
8125 ifp->if_watchdog = iwm_watchdog;
8126 IFQ_SET_READY(&ifp->if_snd);
8127 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8128
8129 err = if_initialize(ifp);
8130 if (err != 0) {
8131 aprint_error_dev(sc->sc_dev, "if_initialize failed(%d)\n",
8132 err);
8133 goto fail6;
8134 }
8135 #if 0
8136 ieee80211_ifattach(ic);
8137 #else
8138 ether_ifattach(ifp, NULL); /* XXX */
8139 #endif
8140 /* Use common softint-based if_input */
8141 ifp->if_percpuq = if_percpuq_create(ifp);
8142 if_register(ifp);
8143
8144 callout_init(&sc->sc_calib_to, 0);
8145 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
8146 callout_init(&sc->sc_led_blink_to, 0);
8147 callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8148 #ifndef IEEE80211_NO_HT
8149 if (workqueue_create(&sc->sc_setratewq, "iwmsr",
8150 iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
8151 panic("%s: could not create workqueue: setrates",
8152 device_xname(self));
8153 if (workqueue_create(&sc->sc_bawq, "iwmba",
8154 iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
8155 panic("%s: could not create workqueue: blockack",
8156 device_xname(self));
8157 if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
8158 iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
8159 panic("%s: could not create workqueue: htprot",
8160 device_xname(self));
8161 #endif
8162
8163 if (pmf_device_register(self, NULL, NULL))
8164 pmf_class_network_register(self, ifp);
8165 else
8166 aprint_error_dev(self, "couldn't establish power handler\n");
8167
8168 /*
8169 * We can't do normal attach before the file system is mounted
8170 * because we cannot read the MAC address without loading the
8171 * firmware from disk. So we postpone until mountroot is done.
8172 * Notably, this will require a full driver unload/load cycle
8173 * (or reboot) in case the firmware is not present when the
8174 * hook runs.
8175 */
8176 config_mountroot(self, iwm_attach_hook);
8177
8178 return;
8179
8180 fail6: iwm_free_rx_ring(sc, &sc->rxq);
8181 fail5: while (--txq_i >= 0)
8182 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8183 fail4: iwm_dma_contig_free(&sc->sched_dma);
8184 fail3: if (sc->ict_dma.vaddr != NULL)
8185 iwm_dma_contig_free(&sc->ict_dma);
8186 fail2: iwm_dma_contig_free(&sc->kw_dma);
8187 fail1: iwm_dma_contig_free(&sc->fw_dma);
8188 }
8189
8190 void
8191 iwm_radiotap_attach(struct iwm_softc *sc)
8192 {
8193 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8194
8195 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
8196 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
8197 &sc->sc_drvbpf);
8198
8199 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8200 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8201 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8202
8203 sc->sc_txtap_len = sizeof sc->sc_txtapu;
8204 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8205 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8206 }
8207
8208 #if 0
8209 static void
8210 iwm_init_task(void *arg)
8211 {
8212 struct iwm_softc *sc = arg;
8213 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8214 int s;
8215
8216 rw_enter_write(&sc->ioctl_rwl);
8217 s = splnet();
8218
8219 iwm_stop(ifp, 0);
8220 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8221 iwm_init(ifp);
8222
8223 splx(s);
8224 rw_exit(&sc->ioctl_rwl);
8225 }
8226
8227 static void
8228 iwm_wakeup(struct iwm_softc *sc)
8229 {
8230 pcireg_t reg;
8231
8232 /* Clear device-specific "PCI retry timeout" register (41h). */
8233 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8234 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8235
8236 iwm_init_task(sc);
8237 }
8238
8239 static int
8240 iwm_activate(device_t self, enum devact act)
8241 {
8242 struct iwm_softc *sc = device_private(self);
8243 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8244
8245 switch (act) {
8246 case DVACT_DEACTIVATE:
8247 if (ifp->if_flags & IFF_RUNNING)
8248 iwm_stop(ifp, 0);
8249 return 0;
8250 default:
8251 return EOPNOTSUPP;
8252 }
8253 }
8254 #endif
8255
8256 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
8257 NULL, NULL);
8258
8259 static int
8260 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
8261 {
8262 struct sysctlnode node;
8263 struct iwm_softc *sc;
8264 int err, t;
8265
8266 node = *rnode;
8267 sc = node.sysctl_data;
8268 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
8269 node.sysctl_data = &t;
8270 err = sysctl_lookup(SYSCTLFN_CALL(&node));
8271 if (err || newp == NULL)
8272 return err;
8273
8274 if (t == 0)
8275 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
8276 return 0;
8277 }
8278
8279 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
8280 {
8281 const struct sysctlnode *rnode;
8282 #ifdef IWM_DEBUG
8283 const struct sysctlnode *cnode;
8284 #endif /* IWM_DEBUG */
8285 int rc;
8286
8287 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
8288 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
8289 SYSCTL_DESCR("iwm global controls"),
8290 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
8291 goto err;
8292
8293 iwm_sysctl_root_num = rnode->sysctl_num;
8294
8295 #ifdef IWM_DEBUG
8296 /* control debugging printfs */
8297 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
8298 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
8299 "debug", SYSCTL_DESCR("Enable debugging output"),
8300 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
8301 goto err;
8302 #endif /* IWM_DEBUG */
8303
8304 return;
8305
8306 err:
8307 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
8308 }
8309