if_iwm.c revision 1.52 1 /* $NetBSD: if_iwm.c,v 1.52 2017/01/09 08:05:14 nonaka Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.147 2016/11/17 14:12:33 stsp Exp */
3 #define IEEE80211_NO_HT
4 /*
5 * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 * Author: Stefan Sperling <stsp (at) openbsd.org>
7 * Copyright (c) 2014 Fixup Software Ltd.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 /*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 * Copyright(c) 2016 Intel Deutschland GmbH
64 * All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 *
70 * * Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * * Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in
74 * the documentation and/or other materials provided with the
75 * distribution.
76 * * Neither the name Intel Corporation nor the names of its
77 * contributors may be used to endorse or promote products derived
78 * from this software without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93 /*-
94 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
95 *
96 * Permission to use, copy, modify, and distribute this software for any
97 * purpose with or without fee is hereby granted, provided that the above
98 * copyright notice and this permission notice appear in all copies.
99 *
100 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 */
108
109 #include <sys/cdefs.h>
110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.52 2017/01/09 08:05:14 nonaka Exp $");
111
112 #include <sys/param.h>
113 #include <sys/conf.h>
114 #include <sys/kernel.h>
115 #include <sys/kmem.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/sysctl.h>
122 #include <sys/systm.h>
123
124 #include <sys/cpu.h>
125 #include <sys/bus.h>
126 #include <sys/workqueue.h>
127 #include <machine/endian.h>
128 #include <machine/intr.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 #include <dev/firmload.h>
134
135 #include <net/bpf.h>
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_ether.h>
140
141 #include <netinet/in.h>
142 #include <netinet/ip.h>
143
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147
148 #define DEVNAME(_s) device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
150
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 0;
158 #else
159 #define DPRINTF(x) do { ; } while (0)
160 #define DPRINTFN(n, x) do { ; } while (0)
161 #endif
162
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165
166 static const uint8_t iwm_nvm_channels[] = {
167 /* 2.4 GHz */
168 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 /* 5 GHz */
170 36, 40, 44, 48, 52, 56, 60, 64,
171 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 149, 153, 157, 161, 165
173 };
174
175 static const uint8_t iwm_nvm_channels_8000[] = {
176 /* 2.4 GHz */
177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 /* 5 GHz */
179 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
180 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 149, 153, 157, 161, 165, 169, 173, 177, 181
182 };
183
184 #define IWM_NUM_2GHZ_CHANNELS 14
185
186 static const struct iwm_rate {
187 uint8_t rate;
188 uint8_t plcp;
189 uint8_t ht_plcp;
190 } iwm_rates[] = {
191 /* Legacy */ /* HT */
192 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
197 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
199 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
200 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
201 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
202 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
203 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
204 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
205 };
206 #define IWM_RIDX_CCK 0
207 #define IWM_RIDX_OFDM 4
208 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
211
212 #ifndef IEEE80211_NO_HT
213 /* Convert an MCS index into an iwm_rates[] index. */
214 static const int iwm_mcs2ridx[] = {
215 IWM_RATE_MCS_0_INDEX,
216 IWM_RATE_MCS_1_INDEX,
217 IWM_RATE_MCS_2_INDEX,
218 IWM_RATE_MCS_3_INDEX,
219 IWM_RATE_MCS_4_INDEX,
220 IWM_RATE_MCS_5_INDEX,
221 IWM_RATE_MCS_6_INDEX,
222 IWM_RATE_MCS_7_INDEX,
223 };
224 #endif
225
226 struct iwm_nvm_section {
227 uint16_t length;
228 uint8_t *data;
229 };
230
231 struct iwm_newstate_state {
232 struct work ns_wk;
233 enum ieee80211_state ns_nstate;
234 int ns_arg;
235 int ns_generation;
236 };
237
238 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
239 static int iwm_firmware_store_section(struct iwm_softc *,
240 enum iwm_ucode_type, uint8_t *, size_t);
241 static int iwm_set_default_calib(struct iwm_softc *, const void *);
242 static int iwm_read_firmware(struct iwm_softc *);
243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
244 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
245 #ifdef IWM_DEBUG
246 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
247 #endif
248 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
249 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
250 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
251 static int iwm_nic_lock(struct iwm_softc *);
252 static void iwm_nic_unlock(struct iwm_softc *);
253 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
254 uint32_t);
255 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
257 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
258 bus_size_t, bus_size_t);
259 static void iwm_dma_contig_free(struct iwm_dma_info *);
260 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
261 static void iwm_disable_rx_dma(struct iwm_softc *);
262 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
265 int);
266 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
268 static void iwm_enable_rfkill_int(struct iwm_softc *);
269 static int iwm_check_rfkill(struct iwm_softc *);
270 static void iwm_enable_interrupts(struct iwm_softc *);
271 static void iwm_restore_interrupts(struct iwm_softc *);
272 static void iwm_disable_interrupts(struct iwm_softc *);
273 static void iwm_ict_reset(struct iwm_softc *);
274 static int iwm_set_hw_ready(struct iwm_softc *);
275 static int iwm_prepare_card_hw(struct iwm_softc *);
276 static void iwm_apm_config(struct iwm_softc *);
277 static int iwm_apm_init(struct iwm_softc *);
278 static void iwm_apm_stop(struct iwm_softc *);
279 static int iwm_allow_mcast(struct iwm_softc *);
280 static int iwm_start_hw(struct iwm_softc *);
281 static void iwm_stop_device(struct iwm_softc *);
282 static void iwm_nic_config(struct iwm_softc *);
283 static int iwm_nic_rx_init(struct iwm_softc *);
284 static int iwm_nic_tx_init(struct iwm_softc *);
285 static int iwm_nic_init(struct iwm_softc *);
286 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
287 static int iwm_post_alive(struct iwm_softc *);
288 static struct iwm_phy_db_entry *
289 iwm_phy_db_get_section(struct iwm_softc *,
290 enum iwm_phy_db_section_type, uint16_t);
291 static int iwm_phy_db_set_section(struct iwm_softc *,
292 struct iwm_calib_res_notif_phy_db *, uint16_t);
293 static int iwm_is_valid_channel(uint16_t);
294 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
295 static uint16_t iwm_channel_id_to_papd(uint16_t);
296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
297 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
298 uint8_t **, uint16_t *, uint16_t);
299 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
300 void *);
301 static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
302 enum iwm_phy_db_section_type, uint8_t);
303 static int iwm_send_phy_db_data(struct iwm_softc *);
304 static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
305 struct iwm_time_event_cmd_v1 *);
306 static int iwm_send_time_event_cmd(struct iwm_softc *,
307 const struct iwm_time_event_cmd_v2 *);
308 static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
309 uint32_t, uint32_t);
310 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
311 uint16_t, uint8_t *, uint16_t *);
312 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
313 uint16_t *, size_t);
314 static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
315 const uint8_t *, size_t);
316 #ifndef IEEE80211_NO_HT
317 static void iwm_setup_ht_rates(struct iwm_softc *);
318 static void iwm_htprot_task(void *);
319 static void iwm_update_htprot(struct ieee80211com *,
320 struct ieee80211_node *);
321 static int iwm_ampdu_rx_start(struct ieee80211com *,
322 struct ieee80211_node *, uint8_t);
323 static void iwm_ampdu_rx_stop(struct ieee80211com *,
324 struct ieee80211_node *, uint8_t);
325 static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
326 uint8_t, uint16_t, int);
327 #ifdef notyet
328 static int iwm_ampdu_tx_start(struct ieee80211com *,
329 struct ieee80211_node *, uint8_t);
330 static void iwm_ampdu_tx_stop(struct ieee80211com *,
331 struct ieee80211_node *, uint8_t);
332 #endif
333 static void iwm_ba_task(void *);
334 #endif
335
336 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
337 const uint16_t *, const uint16_t *, const uint16_t *,
338 const uint16_t *, const uint16_t *);
339 static void iwm_set_hw_address_8000(struct iwm_softc *,
340 struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
341 static int iwm_parse_nvm_sections(struct iwm_softc *,
342 struct iwm_nvm_section *);
343 static int iwm_nvm_init(struct iwm_softc *);
344 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
345 const uint8_t *, uint32_t);
346 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
347 const uint8_t *, uint32_t);
348 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
349 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
350 struct iwm_fw_sects *, int , int *);
351 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
352 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
353 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
354 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
355 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
356 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
357 enum iwm_ucode_type);
358 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
359 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
360 static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
361 static int iwm_get_signal_strength(struct iwm_softc *,
362 struct iwm_rx_phy_info *);
363 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
364 struct iwm_rx_packet *, struct iwm_rx_data *);
365 static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
366 static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
367 struct iwm_rx_data *);
368 static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
369 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
370 struct iwm_rx_data *);
371 static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
372 uint32_t);
373 #if 0
374 static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
375 static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
376 #endif
377 static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
380 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
381 uint8_t, uint8_t);
382 static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
383 uint8_t, uint8_t, uint32_t, uint32_t);
384 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
385 static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
386 uint16_t, const void *);
387 static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
388 uint32_t *);
389 static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
390 const void *, uint32_t *);
391 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
392 static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
393 #if 0
394 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
395 uint16_t);
396 #endif
397 static const struct iwm_rate *
398 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
399 struct ieee80211_frame *, struct iwm_tx_cmd *);
400 static int iwm_tx(struct iwm_softc *, struct mbuf *,
401 struct ieee80211_node *, int);
402 static void iwm_led_enable(struct iwm_softc *);
403 static void iwm_led_disable(struct iwm_softc *);
404 static int iwm_led_is_enabled(struct iwm_softc *);
405 static void iwm_led_blink_timeout(void *);
406 static void iwm_led_blink_start(struct iwm_softc *);
407 static void iwm_led_blink_stop(struct iwm_softc *);
408 static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
409 struct iwm_beacon_filter_cmd *);
410 static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
411 struct iwm_node *, struct iwm_beacon_filter_cmd *);
412 static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
413 int);
414 static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
415 struct iwm_mac_power_cmd *);
416 static int iwm_power_mac_update_mode(struct iwm_softc *,
417 struct iwm_node *);
418 static int iwm_power_update_device(struct iwm_softc *);
419 #ifdef notyet
420 static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
421 #endif
422 static int iwm_disable_beacon_filter(struct iwm_softc *);
423 static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 static int iwm_add_aux_sta(struct iwm_softc *);
425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
427 #ifdef notyet
428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
430 #endif
431 static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
432 struct iwm_scan_channel_cfg_lmac *, int);
433 static int iwm_fill_probe_req(struct iwm_softc *,
434 struct iwm_scan_probe_req *);
435 static int iwm_lmac_scan(struct iwm_softc *);
436 static int iwm_config_umac_scan(struct iwm_softc *);
437 static int iwm_umac_scan(struct iwm_softc *);
438 static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
439 static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
440 int *);
441 static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
442 struct iwm_mac_ctx_cmd *, uint32_t, int);
443 static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
444 struct iwm_mac_data_sta *, int);
445 static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
446 uint32_t, int);
447 static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
448 static int iwm_auth(struct iwm_softc *);
449 static int iwm_assoc(struct iwm_softc *);
450 static void iwm_calib_timeout(void *);
451 #ifndef IEEE80211_NO_HT
452 static void iwm_setrates_task(void *);
453 static int iwm_setrates(struct iwm_node *);
454 #endif
455 static int iwm_media_change(struct ifnet *);
456 static void iwm_newstate_cb(struct work *, void *);
457 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
458 static void iwm_endscan(struct iwm_softc *);
459 static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
460 struct ieee80211_node *);
461 static int iwm_sf_config(struct iwm_softc *, int);
462 static int iwm_send_bt_init_conf(struct iwm_softc *);
463 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
464 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
465 static int iwm_init_hw(struct iwm_softc *);
466 static int iwm_init(struct ifnet *);
467 static void iwm_start(struct ifnet *);
468 static void iwm_stop(struct ifnet *, int);
469 static void iwm_watchdog(struct ifnet *);
470 static int iwm_ioctl(struct ifnet *, u_long, void *);
471 #ifdef IWM_DEBUG
472 static const char *iwm_desc_lookup(uint32_t);
473 static void iwm_nic_error(struct iwm_softc *);
474 static void iwm_nic_umac_error(struct iwm_softc *);
475 #endif
476 static void iwm_notif_intr(struct iwm_softc *);
477 static void iwm_softintr(void *);
478 static int iwm_intr(void *);
479 static int iwm_preinit(struct iwm_softc *);
480 static void iwm_attach_hook(device_t);
481 static void iwm_attach(device_t, device_t, void *);
482 #if 0
483 static void iwm_init_task(void *);
484 static int iwm_activate(device_t, enum devact);
485 static void iwm_wakeup(struct iwm_softc *);
486 #endif
487 static void iwm_radiotap_attach(struct iwm_softc *);
488 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
489
490 static int iwm_sysctl_root_num;
491
492 static int
493 iwm_firmload(struct iwm_softc *sc)
494 {
495 struct iwm_fw_info *fw = &sc->sc_fw;
496 firmware_handle_t fwh;
497 int err;
498
499 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
500 return 0;
501
502 /* Open firmware image. */
503 err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
504 if (err) {
505 aprint_error_dev(sc->sc_dev,
506 "could not get firmware handle %s\n", sc->sc_fwname);
507 return err;
508 }
509
510 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
511 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
512 fw->fw_rawdata = NULL;
513 }
514
515 fw->fw_rawsize = firmware_get_size(fwh);
516 /*
517 * Well, this is how the Linux driver checks it ....
518 */
519 if (fw->fw_rawsize < sizeof(uint32_t)) {
520 aprint_error_dev(sc->sc_dev,
521 "firmware too short: %zd bytes\n", fw->fw_rawsize);
522 err = EINVAL;
523 goto out;
524 }
525
526 /* some sanity */
527 if (fw->fw_rawsize > IWM_FWMAXSIZE) {
528 aprint_error_dev(sc->sc_dev,
529 "firmware size is ridiculous: %zd bytes\n", fw->fw_rawsize);
530 err = EINVAL;
531 goto out;
532 }
533
534 /* Read the firmware. */
535 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
536 if (fw->fw_rawdata == NULL) {
537 aprint_error_dev(sc->sc_dev,
538 "not enough memory to stock firmware %s\n", sc->sc_fwname);
539 err = ENOMEM;
540 goto out;
541 }
542 err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
543 if (err) {
544 aprint_error_dev(sc->sc_dev,
545 "could not read firmware %s\n", sc->sc_fwname);
546 goto out;
547 }
548
549 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
550 out:
551 /* caller will release memory, if necessary */
552
553 firmware_close(fwh);
554 return err;
555 }
556
557 /*
558 * just maintaining status quo.
559 */
560 static void
561 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
562 {
563 struct ieee80211com *ic = &sc->sc_ic;
564 struct ieee80211_frame *wh;
565 uint8_t subtype;
566
567 wh = mtod(m, struct ieee80211_frame *);
568
569 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
570 return;
571
572 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
573
574 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
575 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
576 return;
577
578 int chan = le32toh(sc->sc_last_phy_info.channel);
579 if (chan < __arraycount(ic->ic_channels))
580 ic->ic_curchan = &ic->ic_channels[chan];
581 }
582
583 static int
584 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
585 {
586 struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
587
588 if (dlen < sizeof(*l) ||
589 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
590 return EINVAL;
591
592 /* we don't actually store anything for now, always use s/w crypto */
593
594 return 0;
595 }
596
597 static int
598 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
599 uint8_t *data, size_t dlen)
600 {
601 struct iwm_fw_sects *fws;
602 struct iwm_fw_onesect *fwone;
603
604 if (type >= IWM_UCODE_TYPE_MAX)
605 return EINVAL;
606 if (dlen < sizeof(uint32_t))
607 return EINVAL;
608
609 fws = &sc->sc_fw.fw_sects[type];
610 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
611 return EINVAL;
612
613 fwone = &fws->fw_sect[fws->fw_count];
614
615 /* first 32bit are device load offset */
616 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
617
618 /* rest is data */
619 fwone->fws_data = data + sizeof(uint32_t);
620 fwone->fws_len = dlen - sizeof(uint32_t);
621
622 /* for freeing the buffer during driver unload */
623 fwone->fws_alloc = data;
624 fwone->fws_allocsize = dlen;
625
626 fws->fw_count++;
627 fws->fw_totlen += fwone->fws_len;
628
629 return 0;
630 }
631
632 struct iwm_tlv_calib_data {
633 uint32_t ucode_type;
634 struct iwm_tlv_calib_ctrl calib;
635 } __packed;
636
637 static int
638 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
639 {
640 const struct iwm_tlv_calib_data *def_calib = data;
641 uint32_t ucode_type = le32toh(def_calib->ucode_type);
642
643 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
644 DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
645 DEVNAME(sc), ucode_type));
646 return EINVAL;
647 }
648
649 sc->sc_default_calib[ucode_type].flow_trigger =
650 def_calib->calib.flow_trigger;
651 sc->sc_default_calib[ucode_type].event_trigger =
652 def_calib->calib.event_trigger;
653
654 return 0;
655 }
656
657 static int
658 iwm_read_firmware(struct iwm_softc *sc)
659 {
660 struct iwm_fw_info *fw = &sc->sc_fw;
661 struct iwm_tlv_ucode_header *uhdr;
662 struct iwm_ucode_tlv tlv;
663 enum iwm_ucode_tlv_type tlv_type;
664 uint8_t *data;
665 int err, status;
666 size_t len;
667
668 if (fw->fw_status == IWM_FW_STATUS_NONE) {
669 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
670 } else {
671 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
672 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
673 }
674 status = fw->fw_status;
675
676 if (status == IWM_FW_STATUS_DONE)
677 return 0;
678
679 err = iwm_firmload(sc);
680 if (err) {
681 aprint_error_dev(sc->sc_dev,
682 "could not read firmware %s (error %d)\n",
683 sc->sc_fwname, err);
684 goto out;
685 }
686
687 sc->sc_capaflags = 0;
688 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
689 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
690 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
691
692 uhdr = (void *)fw->fw_rawdata;
693 if (*(uint32_t *)fw->fw_rawdata != 0
694 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
695 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
696 sc->sc_fwname);
697 err = EINVAL;
698 goto out;
699 }
700
701 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
702 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
703 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
704 IWM_UCODE_API(le32toh(uhdr->ver)));
705 data = uhdr->data;
706 len = fw->fw_rawsize - sizeof(*uhdr);
707
708 while (len >= sizeof(tlv)) {
709 size_t tlv_len;
710 void *tlv_data;
711
712 memcpy(&tlv, data, sizeof(tlv));
713 tlv_len = le32toh(tlv.length);
714 tlv_type = le32toh(tlv.type);
715
716 len -= sizeof(tlv);
717 data += sizeof(tlv);
718 tlv_data = data;
719
720 if (len < tlv_len) {
721 aprint_error_dev(sc->sc_dev,
722 "firmware too short: %zu bytes\n", len);
723 err = EINVAL;
724 goto parse_out;
725 }
726
727 switch (tlv_type) {
728 case IWM_UCODE_TLV_PROBE_MAX_LEN:
729 if (tlv_len < sizeof(uint32_t)) {
730 err = EINVAL;
731 goto parse_out;
732 }
733 sc->sc_capa_max_probe_len
734 = le32toh(*(uint32_t *)tlv_data);
735 /* limit it to something sensible */
736 if (sc->sc_capa_max_probe_len >
737 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
738 err = EINVAL;
739 goto parse_out;
740 }
741 break;
742 case IWM_UCODE_TLV_PAN:
743 if (tlv_len) {
744 err = EINVAL;
745 goto parse_out;
746 }
747 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
748 break;
749 case IWM_UCODE_TLV_FLAGS:
750 if (tlv_len < sizeof(uint32_t)) {
751 err = EINVAL;
752 goto parse_out;
753 }
754 /*
755 * Apparently there can be many flags, but Linux driver
756 * parses only the first one, and so do we.
757 *
758 * XXX: why does this override IWM_UCODE_TLV_PAN?
759 * Intentional or a bug? Observations from
760 * current firmware file:
761 * 1) TLV_PAN is parsed first
762 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
763 * ==> this resets TLV_PAN to itself... hnnnk
764 */
765 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
766 break;
767 case IWM_UCODE_TLV_CSCHEME:
768 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
769 if (err)
770 goto parse_out;
771 break;
772 case IWM_UCODE_TLV_NUM_OF_CPU: {
773 uint32_t num_cpu;
774 if (tlv_len != sizeof(uint32_t)) {
775 err = EINVAL;
776 goto parse_out;
777 }
778 num_cpu = le32toh(*(uint32_t *)tlv_data);
779 if (num_cpu < 1 || num_cpu > 2) {
780 err = EINVAL;
781 goto parse_out;
782 }
783 break;
784 }
785 case IWM_UCODE_TLV_SEC_RT:
786 err = iwm_firmware_store_section(sc,
787 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
788 if (err)
789 goto parse_out;
790 break;
791 case IWM_UCODE_TLV_SEC_INIT:
792 err = iwm_firmware_store_section(sc,
793 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
794 if (err)
795 goto parse_out;
796 break;
797 case IWM_UCODE_TLV_SEC_WOWLAN:
798 err = iwm_firmware_store_section(sc,
799 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
800 if (err)
801 goto parse_out;
802 break;
803 case IWM_UCODE_TLV_DEF_CALIB:
804 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
805 err = EINVAL;
806 goto parse_out;
807 }
808 err = iwm_set_default_calib(sc, tlv_data);
809 if (err)
810 goto parse_out;
811 break;
812 case IWM_UCODE_TLV_PHY_SKU:
813 if (tlv_len != sizeof(uint32_t)) {
814 err = EINVAL;
815 goto parse_out;
816 }
817 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
818 break;
819
820 case IWM_UCODE_TLV_API_CHANGES_SET: {
821 struct iwm_ucode_api *api;
822 if (tlv_len != sizeof(*api)) {
823 err = EINVAL;
824 goto parse_out;
825 }
826 api = (struct iwm_ucode_api *)tlv_data;
827 /* Flags may exceed 32 bits in future firmware. */
828 if (le32toh(api->api_index) > 0) {
829 goto parse_out;
830 }
831 sc->sc_ucode_api = le32toh(api->api_flags);
832 break;
833 }
834
835 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
836 struct iwm_ucode_capa *capa;
837 int idx, i;
838 if (tlv_len != sizeof(*capa)) {
839 err = EINVAL;
840 goto parse_out;
841 }
842 capa = (struct iwm_ucode_capa *)tlv_data;
843 idx = le32toh(capa->api_index);
844 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
845 goto parse_out;
846 }
847 for (i = 0; i < 32; i++) {
848 if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
849 continue;
850 setbit(sc->sc_enabled_capa, i + (32 * idx));
851 }
852 break;
853 }
854
855 case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
856 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
857 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
858 /* ignore, not used by current driver */
859 break;
860
861 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
862 err = iwm_firmware_store_section(sc,
863 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
864 tlv_len);
865 if (err)
866 goto parse_out;
867 break;
868
869 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
870 if (tlv_len != sizeof(uint32_t)) {
871 err = EINVAL;
872 goto parse_out;
873 }
874 sc->sc_capa_n_scan_channels =
875 le32toh(*(uint32_t *)tlv_data);
876 break;
877
878 case IWM_UCODE_TLV_FW_VERSION:
879 if (tlv_len != sizeof(uint32_t) * 3) {
880 err = EINVAL;
881 goto parse_out;
882 }
883 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
884 "%d.%d.%d",
885 le32toh(((uint32_t *)tlv_data)[0]),
886 le32toh(((uint32_t *)tlv_data)[1]),
887 le32toh(((uint32_t *)tlv_data)[2]));
888 break;
889
890 default:
891 DPRINTF(("%s: unknown firmware section %d, abort\n",
892 DEVNAME(sc), tlv_type));
893 err = EINVAL;
894 goto parse_out;
895 }
896
897 len -= roundup(tlv_len, 4);
898 data += roundup(tlv_len, 4);
899 }
900
901 KASSERT(err == 0);
902
903 parse_out:
904 if (err) {
905 aprint_error_dev(sc->sc_dev,
906 "firmware parse error, section type %d\n", tlv_type);
907 }
908
909 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
910 aprint_error_dev(sc->sc_dev,
911 "device uses unsupported power ops\n");
912 err = ENOTSUP;
913 }
914
915 out:
916 if (err)
917 fw->fw_status = IWM_FW_STATUS_NONE;
918 else
919 fw->fw_status = IWM_FW_STATUS_DONE;
920 wakeup(&sc->sc_fw);
921
922 if (err && fw->fw_rawdata != NULL) {
923 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
924 fw->fw_rawdata = NULL;
925 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
926 /* don't touch fw->fw_status */
927 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
928 }
929 return err;
930 }
931
932 static uint32_t
933 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
934 {
935 IWM_WRITE(sc,
936 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
937 IWM_BARRIER_READ_WRITE(sc);
938 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
939 }
940
941 static void
942 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
943 {
944 IWM_WRITE(sc,
945 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
946 IWM_BARRIER_WRITE(sc);
947 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
948 }
949
950 #ifdef IWM_DEBUG
951 static int
952 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
953 {
954 int offs, err = 0;
955 uint32_t *vals = buf;
956
957 if (iwm_nic_lock(sc)) {
958 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
959 for (offs = 0; offs < dwords; offs++)
960 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
961 iwm_nic_unlock(sc);
962 } else {
963 err = EBUSY;
964 }
965 return err;
966 }
967 #endif
968
969 static int
970 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
971 {
972 int offs;
973 const uint32_t *vals = buf;
974
975 if (iwm_nic_lock(sc)) {
976 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
977 /* WADDR auto-increments */
978 for (offs = 0; offs < dwords; offs++) {
979 uint32_t val = vals ? vals[offs] : 0;
980 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
981 }
982 iwm_nic_unlock(sc);
983 } else {
984 return EBUSY;
985 }
986 return 0;
987 }
988
989 static int
990 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
991 {
992 return iwm_write_mem(sc, addr, &val, 1);
993 }
994
995 static int
996 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
997 int timo)
998 {
999 for (;;) {
1000 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1001 return 1;
1002 }
1003 if (timo < 10) {
1004 return 0;
1005 }
1006 timo -= 10;
1007 DELAY(10);
1008 }
1009 }
1010
1011 static int
1012 iwm_nic_lock(struct iwm_softc *sc)
1013 {
1014 int rv = 0;
1015
1016 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1017 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1018
1019 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1020 DELAY(2);
1021
1022 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1023 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1024 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1025 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1026 rv = 1;
1027 } else {
1028 aprint_error_dev(sc->sc_dev, "device timeout\n");
1029 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1030 }
1031
1032 return rv;
1033 }
1034
1035 static void
1036 iwm_nic_unlock(struct iwm_softc *sc)
1037 {
1038 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1039 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1040 }
1041
1042 static void
1043 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1044 uint32_t mask)
1045 {
1046 uint32_t val;
1047
1048 /* XXX: no error path? */
1049 if (iwm_nic_lock(sc)) {
1050 val = iwm_read_prph(sc, reg) & mask;
1051 val |= bits;
1052 iwm_write_prph(sc, reg, val);
1053 iwm_nic_unlock(sc);
1054 }
1055 }
1056
1057 static void
1058 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1059 {
1060 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1061 }
1062
1063 static void
1064 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1065 {
1066 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1067 }
1068
1069 static int
1070 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1071 bus_size_t size, bus_size_t alignment)
1072 {
1073 int nsegs, err;
1074 void *va;
1075
1076 dma->tag = tag;
1077 dma->size = size;
1078
1079 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1080 &dma->map);
1081 if (err)
1082 goto fail;
1083
1084 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1085 BUS_DMA_NOWAIT);
1086 if (err)
1087 goto fail;
1088
1089 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1090 if (err)
1091 goto fail;
1092 dma->vaddr = va;
1093
1094 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1095 BUS_DMA_NOWAIT);
1096 if (err)
1097 goto fail;
1098
1099 memset(dma->vaddr, 0, size);
1100 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1101 dma->paddr = dma->map->dm_segs[0].ds_addr;
1102
1103 return 0;
1104
1105 fail: iwm_dma_contig_free(dma);
1106 return err;
1107 }
1108
1109 static void
1110 iwm_dma_contig_free(struct iwm_dma_info *dma)
1111 {
1112 if (dma->map != NULL) {
1113 if (dma->vaddr != NULL) {
1114 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1115 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1116 bus_dmamap_unload(dma->tag, dma->map);
1117 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1118 bus_dmamem_free(dma->tag, &dma->seg, 1);
1119 dma->vaddr = NULL;
1120 }
1121 bus_dmamap_destroy(dma->tag, dma->map);
1122 dma->map = NULL;
1123 }
1124 }
1125
1126 static int
1127 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1128 {
1129 bus_size_t size;
1130 int i, err;
1131
1132 ring->cur = 0;
1133
1134 /* Allocate RX descriptors (256-byte aligned). */
1135 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1136 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1137 if (err) {
1138 aprint_error_dev(sc->sc_dev,
1139 "could not allocate RX ring DMA memory\n");
1140 goto fail;
1141 }
1142 ring->desc = ring->desc_dma.vaddr;
1143
1144 /* Allocate RX status area (16-byte aligned). */
1145 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1146 sizeof(*ring->stat), 16);
1147 if (err) {
1148 aprint_error_dev(sc->sc_dev,
1149 "could not allocate RX status DMA memory\n");
1150 goto fail;
1151 }
1152 ring->stat = ring->stat_dma.vaddr;
1153
1154 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1155 struct iwm_rx_data *data = &ring->data[i];
1156
1157 memset(data, 0, sizeof(*data));
1158 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1159 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1160 &data->map);
1161 if (err) {
1162 aprint_error_dev(sc->sc_dev,
1163 "could not create RX buf DMA map\n");
1164 goto fail;
1165 }
1166
1167 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1168 if (err)
1169 goto fail;
1170 }
1171 return 0;
1172
1173 fail: iwm_free_rx_ring(sc, ring);
1174 return err;
1175 }
1176
1177 static void
1178 iwm_disable_rx_dma(struct iwm_softc *sc)
1179 {
1180 int ntries;
1181
1182 if (iwm_nic_lock(sc)) {
1183 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1184 for (ntries = 0; ntries < 1000; ntries++) {
1185 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1186 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1187 break;
1188 DELAY(10);
1189 }
1190 iwm_nic_unlock(sc);
1191 }
1192 }
1193
1194 void
1195 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1196 {
1197 ring->cur = 0;
1198 memset(ring->stat, 0, sizeof(*ring->stat));
1199 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1200 ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1201 }
1202
1203 static void
1204 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1205 {
1206 int i;
1207
1208 iwm_dma_contig_free(&ring->desc_dma);
1209 iwm_dma_contig_free(&ring->stat_dma);
1210
1211 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1212 struct iwm_rx_data *data = &ring->data[i];
1213
1214 if (data->m != NULL) {
1215 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1216 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1217 bus_dmamap_unload(sc->sc_dmat, data->map);
1218 m_freem(data->m);
1219 data->m = NULL;
1220 }
1221 if (data->map != NULL) {
1222 bus_dmamap_destroy(sc->sc_dmat, data->map);
1223 data->map = NULL;
1224 }
1225 }
1226 }
1227
1228 static int
1229 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1230 {
1231 bus_addr_t paddr;
1232 bus_size_t size;
1233 int i, err;
1234
1235 ring->qid = qid;
1236 ring->queued = 0;
1237 ring->cur = 0;
1238
1239 /* Allocate TX descriptors (256-byte aligned). */
1240 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1241 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1242 if (err) {
1243 aprint_error_dev(sc->sc_dev,
1244 "could not allocate TX ring DMA memory\n");
1245 goto fail;
1246 }
1247 ring->desc = ring->desc_dma.vaddr;
1248
1249 /*
1250 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1251 * to allocate commands space for other rings.
1252 */
1253 if (qid > IWM_CMD_QUEUE)
1254 return 0;
1255
1256 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1257 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1258 if (err) {
1259 aprint_error_dev(sc->sc_dev,
1260 "could not allocate TX cmd DMA memory\n");
1261 goto fail;
1262 }
1263 ring->cmd = ring->cmd_dma.vaddr;
1264
1265 paddr = ring->cmd_dma.paddr;
1266 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1267 struct iwm_tx_data *data = &ring->data[i];
1268 size_t mapsize;
1269
1270 data->cmd_paddr = paddr;
1271 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1272 + offsetof(struct iwm_tx_cmd, scratch);
1273 paddr += sizeof(struct iwm_device_cmd);
1274
1275 /* FW commands may require more mapped space than packets. */
1276 if (qid == IWM_CMD_QUEUE)
1277 mapsize = (sizeof(struct iwm_cmd_header) +
1278 IWM_MAX_CMD_PAYLOAD_SIZE);
1279 else
1280 mapsize = MCLBYTES;
1281 err = bus_dmamap_create(sc->sc_dmat, mapsize,
1282 IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT, &data->map);
1283 if (err) {
1284 aprint_error_dev(sc->sc_dev,
1285 "could not create TX buf DMA map\n");
1286 goto fail;
1287 }
1288 }
1289 KASSERT(paddr == ring->cmd_dma.paddr + size);
1290 return 0;
1291
1292 fail: iwm_free_tx_ring(sc, ring);
1293 return err;
1294 }
1295
1296 static void
1297 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1298 {
1299 int i;
1300
1301 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1302 struct iwm_tx_data *data = &ring->data[i];
1303
1304 if (data->m != NULL) {
1305 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1306 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1307 bus_dmamap_unload(sc->sc_dmat, data->map);
1308 m_freem(data->m);
1309 data->m = NULL;
1310 }
1311 }
1312 /* Clear TX descriptors. */
1313 memset(ring->desc, 0, ring->desc_dma.size);
1314 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1315 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1316 sc->qfullmsk &= ~(1 << ring->qid);
1317 ring->queued = 0;
1318 ring->cur = 0;
1319 }
1320
1321 static void
1322 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1323 {
1324 int i;
1325
1326 iwm_dma_contig_free(&ring->desc_dma);
1327 iwm_dma_contig_free(&ring->cmd_dma);
1328
1329 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1330 struct iwm_tx_data *data = &ring->data[i];
1331
1332 if (data->m != NULL) {
1333 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1334 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1335 bus_dmamap_unload(sc->sc_dmat, data->map);
1336 m_freem(data->m);
1337 }
1338 if (data->map != NULL) {
1339 bus_dmamap_destroy(sc->sc_dmat, data->map);
1340 data->map = NULL;
1341 }
1342 }
1343 }
1344
1345 static void
1346 iwm_enable_rfkill_int(struct iwm_softc *sc)
1347 {
1348 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1349 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1350 }
1351
1352 static int
1353 iwm_check_rfkill(struct iwm_softc *sc)
1354 {
1355 uint32_t v;
1356 int s;
1357 int rv;
1358
1359 s = splnet();
1360
1361 /*
1362 * "documentation" is not really helpful here:
1363 * 27: HW_RF_KILL_SW
1364 * Indicates state of (platform's) hardware RF-Kill switch
1365 *
1366 * But apparently when it's off, it's on ...
1367 */
1368 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1369 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1370 if (rv) {
1371 sc->sc_flags |= IWM_FLAG_RFKILL;
1372 } else {
1373 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1374 }
1375
1376 splx(s);
1377 return rv;
1378 }
1379
1380 static void
1381 iwm_enable_interrupts(struct iwm_softc *sc)
1382 {
1383 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1384 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1385 }
1386
1387 static void
1388 iwm_restore_interrupts(struct iwm_softc *sc)
1389 {
1390 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1391 }
1392
1393 static void
1394 iwm_disable_interrupts(struct iwm_softc *sc)
1395 {
1396 int s = splnet();
1397
1398 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1399
1400 /* acknowledge all interrupts */
1401 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1402 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1403
1404 splx(s);
1405 }
1406
1407 static void
1408 iwm_ict_reset(struct iwm_softc *sc)
1409 {
1410 iwm_disable_interrupts(sc);
1411
1412 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1413 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1414 BUS_DMASYNC_PREWRITE);
1415 sc->ict_cur = 0;
1416
1417 /* Set physical address of ICT (4KB aligned). */
1418 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1419 IWM_CSR_DRAM_INT_TBL_ENABLE
1420 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1421 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1422 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1423
1424 /* Switch to ICT interrupt mode in driver. */
1425 sc->sc_flags |= IWM_FLAG_USE_ICT;
1426
1427 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1428 iwm_enable_interrupts(sc);
1429 }
1430
1431 #define IWM_HW_READY_TIMEOUT 50
1432 static int
1433 iwm_set_hw_ready(struct iwm_softc *sc)
1434 {
1435 int ready;
1436
1437 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1438 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1439
1440 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1441 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1442 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1443 IWM_HW_READY_TIMEOUT);
1444 if (ready)
1445 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1446 IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1447
1448 return ready;
1449 }
1450 #undef IWM_HW_READY_TIMEOUT
1451
1452 static int
1453 iwm_prepare_card_hw(struct iwm_softc *sc)
1454 {
1455 int t = 0;
1456
1457 if (iwm_set_hw_ready(sc))
1458 return 0;
1459
1460 DELAY(100);
1461
1462 /* If HW is not ready, prepare the conditions to check again */
1463 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1464 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1465
1466 do {
1467 if (iwm_set_hw_ready(sc))
1468 return 0;
1469 DELAY(200);
1470 t += 200;
1471 } while (t < 150000);
1472
1473 return ETIMEDOUT;
1474 }
1475
1476 static void
1477 iwm_apm_config(struct iwm_softc *sc)
1478 {
1479 pcireg_t reg;
1480
1481 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1482 sc->sc_cap_off + PCIE_LCSR);
1483 if (reg & PCIE_LCSR_ASPM_L1) {
1484 /* Um the Linux driver prints "Disabling L0S for this one ... */
1485 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1486 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1487 } else {
1488 /* ... and "Enabling" here */
1489 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1490 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1491 }
1492 }
1493
1494 /*
1495 * Start up NIC's basic functionality after it has been reset
1496 * e.g. after platform boot or shutdown.
1497 * NOTE: This does not load uCode nor start the embedded processor
1498 */
1499 static int
1500 iwm_apm_init(struct iwm_softc *sc)
1501 {
1502 int err = 0;
1503
1504 /* Disable L0S exit timer (platform NMI workaround) */
1505 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1506 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1507 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1508
1509 /*
1510 * Disable L0s without affecting L1;
1511 * don't wait for ICH L0s (ICH bug W/A)
1512 */
1513 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1514 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1515
1516 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1517 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1518
1519 /*
1520 * Enable HAP INTA (interrupt from management bus) to
1521 * wake device's PCI Express link L1a -> L0s
1522 */
1523 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1524 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1525
1526 iwm_apm_config(sc);
1527
1528 #if 0 /* not for 7k/8k */
1529 /* Configure analog phase-lock-loop before activating to D0A */
1530 if (trans->cfg->base_params->pll_cfg_val)
1531 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1532 trans->cfg->base_params->pll_cfg_val);
1533 #endif
1534
1535 /*
1536 * Set "initialization complete" bit to move adapter from
1537 * D0U* --> D0A* (powered-up active) state.
1538 */
1539 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1540
1541 /*
1542 * Wait for clock stabilization; once stabilized, access to
1543 * device-internal resources is supported, e.g. iwm_write_prph()
1544 * and accesses to uCode SRAM.
1545 */
1546 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1547 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1548 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1549 aprint_error_dev(sc->sc_dev,
1550 "timeout waiting for clock stabilization\n");
1551 err = ETIMEDOUT;
1552 goto out;
1553 }
1554
1555 if (sc->host_interrupt_operation_mode) {
1556 /*
1557 * This is a bit of an abuse - This is needed for 7260 / 3160
1558 * only check host_interrupt_operation_mode even if this is
1559 * not related to host_interrupt_operation_mode.
1560 *
1561 * Enable the oscillator to count wake up time for L1 exit. This
1562 * consumes slightly more power (100uA) - but allows to be sure
1563 * that we wake up from L1 on time.
1564 *
1565 * This looks weird: read twice the same register, discard the
1566 * value, set a bit, and yet again, read that same register
1567 * just to discard the value. But that's the way the hardware
1568 * seems to like it.
1569 */
1570 iwm_read_prph(sc, IWM_OSC_CLK);
1571 iwm_read_prph(sc, IWM_OSC_CLK);
1572 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1573 iwm_read_prph(sc, IWM_OSC_CLK);
1574 iwm_read_prph(sc, IWM_OSC_CLK);
1575 }
1576
1577 /*
1578 * Enable DMA clock and wait for it to stabilize.
1579 *
1580 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1581 * do not disable clocks. This preserves any hardware bits already
1582 * set by default in "CLK_CTRL_REG" after reset.
1583 */
1584 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1585 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1586 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1587 DELAY(20);
1588
1589 /* Disable L1-Active */
1590 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1591 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1592
1593 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1594 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1595 IWM_APMG_RTC_INT_STT_RFKILL);
1596 }
1597 out:
1598 if (err)
1599 aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1600 return err;
1601 }
1602
1603 static void
1604 iwm_apm_stop(struct iwm_softc *sc)
1605 {
1606 /* stop device's busmaster DMA activity */
1607 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1608
1609 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1610 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1611 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1612 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1613 DPRINTF(("iwm apm stop\n"));
1614 }
1615
1616 static int
1617 iwm_start_hw(struct iwm_softc *sc)
1618 {
1619 int err;
1620
1621 err = iwm_prepare_card_hw(sc);
1622 if (err)
1623 return err;
1624
1625 /* Reset the entire device */
1626 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1627 DELAY(10);
1628
1629 err = iwm_apm_init(sc);
1630 if (err)
1631 return err;
1632
1633 iwm_enable_rfkill_int(sc);
1634 iwm_check_rfkill(sc);
1635
1636 return 0;
1637 }
1638
1639 static void
1640 iwm_stop_device(struct iwm_softc *sc)
1641 {
1642 int chnl, ntries;
1643 int qid;
1644
1645 iwm_disable_interrupts(sc);
1646 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1647
1648 /* Deactivate TX scheduler. */
1649 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1650
1651 /* Stop all DMA channels. */
1652 if (iwm_nic_lock(sc)) {
1653 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1654 IWM_WRITE(sc,
1655 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1656 for (ntries = 0; ntries < 200; ntries++) {
1657 uint32_t r;
1658
1659 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1660 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1661 chnl))
1662 break;
1663 DELAY(20);
1664 }
1665 }
1666 iwm_nic_unlock(sc);
1667 }
1668 iwm_disable_rx_dma(sc);
1669
1670 iwm_reset_rx_ring(sc, &sc->rxq);
1671
1672 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1673 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1674
1675 /*
1676 * Power-down device's busmaster DMA clocks
1677 */
1678 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1679 DELAY(5);
1680
1681 /* Make sure (redundant) we've released our request to stay awake */
1682 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1683 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1684
1685 /* Stop the device, and put it in low power state */
1686 iwm_apm_stop(sc);
1687
1688 /*
1689 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1690 * Clean again the interrupt here
1691 */
1692 iwm_disable_interrupts(sc);
1693
1694 /* Reset the on-board processor. */
1695 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1696
1697 /* Even though we stop the HW we still want the RF kill interrupt. */
1698 iwm_enable_rfkill_int(sc);
1699 iwm_check_rfkill(sc);
1700 }
1701
1702 static void
1703 iwm_nic_config(struct iwm_softc *sc)
1704 {
1705 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1706 uint32_t reg_val = 0;
1707
1708 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1709 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1710 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1711 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1712 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1713 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1714
1715 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1716 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1717 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1718 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1719
1720 /* radio configuration */
1721 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1722 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1723 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1724
1725 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1726
1727 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1728 radio_cfg_step, radio_cfg_dash));
1729
1730 /*
1731 * W/A : NIC is stuck in a reset state after Early PCIe power off
1732 * (PCIe power is lost before PERST# is asserted), causing ME FW
1733 * to lose ownership and not being able to obtain it back.
1734 */
1735 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1736 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1737 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1738 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1739 }
1740
1741 static int
1742 iwm_nic_rx_init(struct iwm_softc *sc)
1743 {
1744 if (!iwm_nic_lock(sc))
1745 return EBUSY;
1746
1747 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1748 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1749 0, sc->rxq.stat_dma.size,
1750 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1751
1752 iwm_disable_rx_dma(sc);
1753 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1754 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1755 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1756 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1757
1758 /* Set physical address of RX ring (256-byte aligned). */
1759 IWM_WRITE(sc,
1760 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1761
1762 /* Set physical address of RX status (16-byte aligned). */
1763 IWM_WRITE(sc,
1764 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1765
1766 /* Enable RX. */
1767 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1768 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1769 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1770 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1771 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1772 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1773 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1774 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1775
1776 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1777
1778 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1779 if (sc->host_interrupt_operation_mode)
1780 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1781
1782 /*
1783 * This value should initially be 0 (before preparing any RBs),
1784 * and should be 8 after preparing the first 8 RBs (for example).
1785 */
1786 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1787
1788 iwm_nic_unlock(sc);
1789
1790 return 0;
1791 }
1792
1793 static int
1794 iwm_nic_tx_init(struct iwm_softc *sc)
1795 {
1796 int qid;
1797
1798 if (!iwm_nic_lock(sc))
1799 return EBUSY;
1800
1801 /* Deactivate TX scheduler. */
1802 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1803
1804 /* Set physical address of "keep warm" page (16-byte aligned). */
1805 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1806
1807 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1808 struct iwm_tx_ring *txq = &sc->txq[qid];
1809
1810 /* Set physical address of TX ring (256-byte aligned). */
1811 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1812 txq->desc_dma.paddr >> 8);
1813 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1814 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1815 }
1816
1817 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1818
1819 iwm_nic_unlock(sc);
1820
1821 return 0;
1822 }
1823
1824 static int
1825 iwm_nic_init(struct iwm_softc *sc)
1826 {
1827 int err;
1828
1829 iwm_apm_init(sc);
1830 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1831 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1832 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1833 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1834
1835 iwm_nic_config(sc);
1836
1837 err = iwm_nic_rx_init(sc);
1838 if (err)
1839 return err;
1840
1841 err = iwm_nic_tx_init(sc);
1842 if (err)
1843 return err;
1844
1845 DPRINTF(("shadow registers enabled\n"));
1846 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1847
1848 return 0;
1849 }
1850
1851 static const uint8_t iwm_ac_to_tx_fifo[] = {
1852 IWM_TX_FIFO_VO,
1853 IWM_TX_FIFO_VI,
1854 IWM_TX_FIFO_BE,
1855 IWM_TX_FIFO_BK,
1856 };
1857
1858 static int
1859 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1860 {
1861 if (!iwm_nic_lock(sc)) {
1862 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1863 return EBUSY;
1864 }
1865
1866 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1867
1868 if (qid == IWM_CMD_QUEUE) {
1869 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1870 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1871 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1872
1873 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1874
1875 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1876
1877 iwm_write_mem32(sc,
1878 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1879
1880 /* Set scheduler window size and frame limit. */
1881 iwm_write_mem32(sc,
1882 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1883 sizeof(uint32_t),
1884 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1885 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1886 ((IWM_FRAME_LIMIT
1887 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1888 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1889
1890 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1891 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1892 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1893 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1894 IWM_SCD_QUEUE_STTS_REG_MSK);
1895 } else {
1896 struct iwm_scd_txq_cfg_cmd cmd;
1897 int err;
1898
1899 iwm_nic_unlock(sc);
1900
1901 memset(&cmd, 0, sizeof(cmd));
1902 cmd.scd_queue = qid;
1903 cmd.enable = 1;
1904 cmd.sta_id = sta_id;
1905 cmd.tx_fifo = fifo;
1906 cmd.aggregate = 0;
1907 cmd.window = IWM_FRAME_LIMIT;
1908
1909 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
1910 &cmd);
1911 if (err)
1912 return err;
1913
1914 if (!iwm_nic_lock(sc))
1915 return EBUSY;
1916 }
1917
1918 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1919 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1920
1921 iwm_nic_unlock(sc);
1922
1923 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1924
1925 return 0;
1926 }
1927
1928 static int
1929 iwm_post_alive(struct iwm_softc *sc)
1930 {
1931 int nwords;
1932 int err, chnl;
1933 uint32_t base;
1934
1935 if (!iwm_nic_lock(sc))
1936 return EBUSY;
1937
1938 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1939 if (sc->sched_base != base) {
1940 DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
1941 DEVNAME(sc), sc->sched_base, base));
1942 err = EINVAL;
1943 goto out;
1944 }
1945
1946 iwm_ict_reset(sc);
1947
1948 /* Clear TX scheduler state in SRAM. */
1949 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1950 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1951 / sizeof(uint32_t);
1952 err = iwm_write_mem(sc,
1953 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1954 NULL, nwords);
1955 if (err)
1956 goto out;
1957
1958 /* Set physical address of TX scheduler rings (1KB aligned). */
1959 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1960
1961 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1962
1963 iwm_nic_unlock(sc);
1964
1965 /* enable command channel */
1966 err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1967 if (err)
1968 return err;
1969
1970 if (!iwm_nic_lock(sc))
1971 return EBUSY;
1972
1973 /* Activate TX scheduler. */
1974 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1975
1976 /* Enable DMA channels. */
1977 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1978 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1979 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1980 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1981 }
1982
1983 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1984 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1985
1986 /* Enable L1-Active */
1987 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1988 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1989 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1990
1991 out:
1992 iwm_nic_unlock(sc);
1993 return err;
1994 }
1995
1996 static struct iwm_phy_db_entry *
1997 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
1998 uint16_t chg_id)
1999 {
2000 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2001
2002 if (type >= IWM_PHY_DB_MAX)
2003 return NULL;
2004
2005 switch (type) {
2006 case IWM_PHY_DB_CFG:
2007 return &phy_db->cfg;
2008 case IWM_PHY_DB_CALIB_NCH:
2009 return &phy_db->calib_nch;
2010 case IWM_PHY_DB_CALIB_CHG_PAPD:
2011 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2012 return NULL;
2013 return &phy_db->calib_ch_group_papd[chg_id];
2014 case IWM_PHY_DB_CALIB_CHG_TXP:
2015 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2016 return NULL;
2017 return &phy_db->calib_ch_group_txp[chg_id];
2018 default:
2019 return NULL;
2020 }
2021 return NULL;
2022 }
2023
2024 static int
2025 iwm_phy_db_set_section(struct iwm_softc *sc,
2026 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2027 {
2028 struct iwm_phy_db_entry *entry;
2029 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2030 uint16_t chg_id = 0;
2031
2032 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2033 type == IWM_PHY_DB_CALIB_CHG_TXP)
2034 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2035
2036 entry = iwm_phy_db_get_section(sc, type, chg_id);
2037 if (!entry)
2038 return EINVAL;
2039
2040 if (entry->data)
2041 kmem_intr_free(entry->data, entry->size);
2042 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2043 if (!entry->data) {
2044 entry->size = 0;
2045 return ENOMEM;
2046 }
2047 memcpy(entry->data, phy_db_notif->data, size);
2048 entry->size = size;
2049
2050 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2051 __func__, __LINE__, type, size, entry->data));
2052
2053 return 0;
2054 }
2055
2056 static int
2057 iwm_is_valid_channel(uint16_t ch_id)
2058 {
2059 if (ch_id <= 14 ||
2060 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2061 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2062 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2063 return 1;
2064 return 0;
2065 }
2066
2067 static uint8_t
2068 iwm_ch_id_to_ch_index(uint16_t ch_id)
2069 {
2070 if (!iwm_is_valid_channel(ch_id))
2071 return 0xff;
2072
2073 if (ch_id <= 14)
2074 return ch_id - 1;
2075 if (ch_id <= 64)
2076 return (ch_id + 20) / 4;
2077 if (ch_id <= 140)
2078 return (ch_id - 12) / 4;
2079 return (ch_id - 13) / 4;
2080 }
2081
2082
2083 static uint16_t
2084 iwm_channel_id_to_papd(uint16_t ch_id)
2085 {
2086 if (!iwm_is_valid_channel(ch_id))
2087 return 0xff;
2088
2089 if (1 <= ch_id && ch_id <= 14)
2090 return 0;
2091 if (36 <= ch_id && ch_id <= 64)
2092 return 1;
2093 if (100 <= ch_id && ch_id <= 140)
2094 return 2;
2095 return 3;
2096 }
2097
2098 static uint16_t
2099 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2100 {
2101 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2102 struct iwm_phy_db_chg_txp *txp_chg;
2103 int i;
2104 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2105
2106 if (ch_index == 0xff)
2107 return 0xff;
2108
2109 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2110 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2111 if (!txp_chg)
2112 return 0xff;
2113 /*
2114 * Looking for the first channel group the max channel
2115 * of which is higher than the requested channel.
2116 */
2117 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2118 return i;
2119 }
2120 return 0xff;
2121 }
2122
2123 static int
2124 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2125 uint16_t *size, uint16_t ch_id)
2126 {
2127 struct iwm_phy_db_entry *entry;
2128 uint16_t ch_group_id = 0;
2129
2130 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2131 ch_group_id = iwm_channel_id_to_papd(ch_id);
2132 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2133 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2134
2135 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2136 if (!entry)
2137 return EINVAL;
2138
2139 *data = entry->data;
2140 *size = entry->size;
2141
2142 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2143 __func__, __LINE__, type, *size));
2144
2145 return 0;
2146 }
2147
2148 static int
2149 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2150 void *data)
2151 {
2152 struct iwm_phy_db_cmd phy_db_cmd;
2153 struct iwm_host_cmd cmd = {
2154 .id = IWM_PHY_DB_CMD,
2155 .flags = IWM_CMD_ASYNC,
2156 };
2157
2158 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2159 type, length));
2160
2161 phy_db_cmd.type = le16toh(type);
2162 phy_db_cmd.length = le16toh(length);
2163
2164 cmd.data[0] = &phy_db_cmd;
2165 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2166 cmd.data[1] = data;
2167 cmd.len[1] = length;
2168
2169 return iwm_send_cmd(sc, &cmd);
2170 }
2171
2172 static int
2173 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2174 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2175 {
2176 uint16_t i;
2177 int err;
2178 struct iwm_phy_db_entry *entry;
2179
2180 /* Send all the channel-specific groups to operational fw */
2181 for (i = 0; i < max_ch_groups; i++) {
2182 entry = iwm_phy_db_get_section(sc, type, i);
2183 if (!entry)
2184 return EINVAL;
2185
2186 if (!entry->size)
2187 continue;
2188
2189 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2190 if (err) {
2191 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2192 "err %d\n", DEVNAME(sc), type, i, err));
2193 return err;
2194 }
2195
2196 DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2197 DEVNAME(sc), type, i));
2198
2199 DELAY(1000);
2200 }
2201
2202 return 0;
2203 }
2204
2205 static int
2206 iwm_send_phy_db_data(struct iwm_softc *sc)
2207 {
2208 uint8_t *data = NULL;
2209 uint16_t size = 0;
2210 int err;
2211
2212 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2213 if (err)
2214 return err;
2215
2216 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2217 if (err)
2218 return err;
2219
2220 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2221 &data, &size, 0);
2222 if (err)
2223 return err;
2224
2225 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2226 if (err)
2227 return err;
2228
2229 err = iwm_phy_db_send_all_channel_groups(sc,
2230 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2231 if (err)
2232 return err;
2233
2234 err = iwm_phy_db_send_all_channel_groups(sc,
2235 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2236 if (err)
2237 return err;
2238
2239 return 0;
2240 }
2241
2242 /*
2243 * For the high priority TE use a time event type that has similar priority to
2244 * the FW's action scan priority.
2245 */
2246 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2247 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2248
2249 /* used to convert from time event API v2 to v1 */
2250 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2251 IWM_TE_V2_EVENT_SOCIOPATHIC)
2252 static inline uint16_t
2253 iwm_te_v2_get_notify(uint16_t policy)
2254 {
2255 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2256 }
2257
2258 static inline uint16_t
2259 iwm_te_v2_get_dep_policy(uint16_t policy)
2260 {
2261 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2262 IWM_TE_V2_PLACEMENT_POS;
2263 }
2264
2265 static inline uint16_t
2266 iwm_te_v2_get_absence(uint16_t policy)
2267 {
2268 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2269 }
2270
2271 static void
2272 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2273 struct iwm_time_event_cmd_v1 *cmd_v1)
2274 {
2275 cmd_v1->id_and_color = cmd_v2->id_and_color;
2276 cmd_v1->action = cmd_v2->action;
2277 cmd_v1->id = cmd_v2->id;
2278 cmd_v1->apply_time = cmd_v2->apply_time;
2279 cmd_v1->max_delay = cmd_v2->max_delay;
2280 cmd_v1->depends_on = cmd_v2->depends_on;
2281 cmd_v1->interval = cmd_v2->interval;
2282 cmd_v1->duration = cmd_v2->duration;
2283 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2284 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2285 else
2286 cmd_v1->repeat = htole32(cmd_v2->repeat);
2287 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2288 cmd_v1->interval_reciprocal = 0; /* unused */
2289
2290 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2291 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2292 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2293 }
2294
2295 static int
2296 iwm_send_time_event_cmd(struct iwm_softc *sc,
2297 const struct iwm_time_event_cmd_v2 *cmd)
2298 {
2299 struct iwm_time_event_cmd_v1 cmd_v1;
2300
2301 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2302 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2303 cmd);
2304
2305 iwm_te_v2_to_v1(cmd, &cmd_v1);
2306 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2307 &cmd_v1);
2308 }
2309
2310 static void
2311 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2312 uint32_t duration, uint32_t max_delay)
2313 {
2314 struct iwm_time_event_cmd_v2 time_cmd;
2315
2316 memset(&time_cmd, 0, sizeof(time_cmd));
2317
2318 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2319 time_cmd.id_and_color =
2320 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2321 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2322
2323 time_cmd.apply_time = htole32(0);
2324
2325 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2326 time_cmd.max_delay = htole32(max_delay);
2327 /* TODO: why do we need to interval = bi if it is not periodic? */
2328 time_cmd.interval = htole32(1);
2329 time_cmd.duration = htole32(duration);
2330 time_cmd.repeat = 1;
2331 time_cmd.policy
2332 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2333 IWM_TE_V2_NOTIF_HOST_EVENT_END |
2334 IWM_T2_V2_START_IMMEDIATELY);
2335
2336 iwm_send_time_event_cmd(sc, &time_cmd);
2337 }
2338
2339 /*
2340 * NVM read access and content parsing. We do not support
2341 * external NVM or writing NVM.
2342 */
2343
2344 /* list of NVM sections we are allowed/need to read */
2345 static const int iwm_nvm_to_read[] = {
2346 IWM_NVM_SECTION_TYPE_HW,
2347 IWM_NVM_SECTION_TYPE_SW,
2348 IWM_NVM_SECTION_TYPE_REGULATORY,
2349 IWM_NVM_SECTION_TYPE_CALIBRATION,
2350 IWM_NVM_SECTION_TYPE_PRODUCTION,
2351 IWM_NVM_SECTION_TYPE_HW_8000,
2352 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2353 IWM_NVM_SECTION_TYPE_PHY_SKU,
2354 };
2355
2356 /* Default NVM size to read */
2357 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2358 #define IWM_MAX_NVM_SECTION_SIZE 8192
2359
2360 #define IWM_NVM_WRITE_OPCODE 1
2361 #define IWM_NVM_READ_OPCODE 0
2362
2363 static int
2364 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2365 uint16_t length, uint8_t *data, uint16_t *len)
2366 {
2367 offset = 0;
2368 struct iwm_nvm_access_cmd nvm_access_cmd = {
2369 .offset = htole16(offset),
2370 .length = htole16(length),
2371 .type = htole16(section),
2372 .op_code = IWM_NVM_READ_OPCODE,
2373 };
2374 struct iwm_nvm_access_resp *nvm_resp;
2375 struct iwm_rx_packet *pkt;
2376 struct iwm_host_cmd cmd = {
2377 .id = IWM_NVM_ACCESS_CMD,
2378 .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2379 .data = { &nvm_access_cmd, },
2380 };
2381 int err, offset_read;
2382 size_t bytes_read;
2383 uint8_t *resp_data;
2384
2385 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2386
2387 err = iwm_send_cmd(sc, &cmd);
2388 if (err) {
2389 DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2390 DEVNAME(sc), err));
2391 return err;
2392 }
2393
2394 pkt = cmd.resp_pkt;
2395 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2396 err = EIO;
2397 goto exit;
2398 }
2399
2400 /* Extract NVM response */
2401 nvm_resp = (void *)pkt->data;
2402
2403 err = le16toh(nvm_resp->status);
2404 bytes_read = le16toh(nvm_resp->length);
2405 offset_read = le16toh(nvm_resp->offset);
2406 resp_data = nvm_resp->data;
2407 if (err) {
2408 err = EINVAL;
2409 goto exit;
2410 }
2411
2412 if (offset_read != offset) {
2413 err = EINVAL;
2414 goto exit;
2415 }
2416 if (bytes_read > length) {
2417 err = EINVAL;
2418 goto exit;
2419 }
2420
2421 memcpy(data + offset, resp_data, bytes_read);
2422 *len = bytes_read;
2423
2424 exit:
2425 iwm_free_resp(sc, &cmd);
2426 return err;
2427 }
2428
2429 /*
2430 * Reads an NVM section completely.
2431 * NICs prior to 7000 family doesn't have a real NVM, but just read
2432 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2433 * by uCode, we need to manually check in this case that we don't
2434 * overflow and try to read more than the EEPROM size.
2435 */
2436 static int
2437 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2438 uint16_t *len, size_t max_len)
2439 {
2440 uint16_t chunklen, seglen;
2441 int err;
2442
2443 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2444 *len = 0;
2445
2446 /* Read NVM chunks until exhausted (reading less than requested) */
2447 while (seglen == chunklen && *len < max_len) {
2448 err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2449 &seglen);
2450 if (err) {
2451 DPRINTF(("%s:Cannot read NVM from section %d "
2452 "offset %d, length %d\n",
2453 DEVNAME(sc), section, *len, chunklen));
2454 return err;
2455 }
2456 *len += seglen;
2457 }
2458
2459 DPRINTFN(4, ("NVM section %d read completed\n", section));
2460 return 0;
2461 }
2462
2463 static uint8_t
2464 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2465 {
2466 uint8_t tx_ant;
2467
2468 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2469 >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2470
2471 if (sc->sc_nvm.valid_tx_ant)
2472 tx_ant &= sc->sc_nvm.valid_tx_ant;
2473
2474 return tx_ant;
2475 }
2476
2477 static uint8_t
2478 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2479 {
2480 uint8_t rx_ant;
2481
2482 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2483 >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2484
2485 if (sc->sc_nvm.valid_rx_ant)
2486 rx_ant &= sc->sc_nvm.valid_rx_ant;
2487
2488 return rx_ant;
2489 }
2490
2491 static void
2492 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2493 const uint8_t *nvm_channels, size_t nchan)
2494 {
2495 struct ieee80211com *ic = &sc->sc_ic;
2496 struct iwm_nvm_data *data = &sc->sc_nvm;
2497 int ch_idx;
2498 struct ieee80211_channel *channel;
2499 uint16_t ch_flags;
2500 int is_5ghz;
2501 int flags, hw_value;
2502
2503 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2504 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2505
2506 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2507 !data->sku_cap_band_52GHz_enable)
2508 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2509
2510 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2511 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2512 iwm_nvm_channels[ch_idx],
2513 ch_flags,
2514 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2515 "5.2" : "2.4"));
2516 continue;
2517 }
2518
2519 hw_value = nvm_channels[ch_idx];
2520 channel = &ic->ic_channels[hw_value];
2521
2522 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2523 if (!is_5ghz) {
2524 flags = IEEE80211_CHAN_2GHZ;
2525 channel->ic_flags
2526 = IEEE80211_CHAN_CCK
2527 | IEEE80211_CHAN_OFDM
2528 | IEEE80211_CHAN_DYN
2529 | IEEE80211_CHAN_2GHZ;
2530 } else {
2531 flags = IEEE80211_CHAN_5GHZ;
2532 channel->ic_flags =
2533 IEEE80211_CHAN_A;
2534 }
2535 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2536
2537 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2538 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2539
2540 #ifndef IEEE80211_NO_HT
2541 if (data->sku_cap_11n_enable)
2542 channel->ic_flags |= IEEE80211_CHAN_HT;
2543 #endif
2544 }
2545 }
2546
2547 #ifndef IEEE80211_NO_HT
2548 static void
2549 iwm_setup_ht_rates(struct iwm_softc *sc)
2550 {
2551 struct ieee80211com *ic = &sc->sc_ic;
2552
2553 /* TX is supported with the same MCS as RX. */
2554 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2555
2556 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2557
2558 #ifdef notyet
2559 if (sc->sc_nvm.sku_cap_mimo_disable)
2560 return;
2561
2562 if (iwm_fw_valid_rx_ant(sc) > 1)
2563 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2564 if (iwm_fw_valid_rx_ant(sc) > 2)
2565 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2566 #endif
2567 }
2568
2569 #define IWM_MAX_RX_BA_SESSIONS 16
2570
2571 static void
2572 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2573 uint16_t ssn, int start)
2574 {
2575 struct ieee80211com *ic = &sc->sc_ic;
2576 struct iwm_add_sta_cmd_v7 cmd;
2577 struct iwm_node *in = (struct iwm_node *)ni;
2578 int err, s;
2579 uint32_t status;
2580
2581 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2582 ieee80211_addba_req_refuse(ic, ni, tid);
2583 return;
2584 }
2585
2586 memset(&cmd, 0, sizeof(cmd));
2587
2588 cmd.sta_id = IWM_STATION_ID;
2589 cmd.mac_id_n_color
2590 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2591 cmd.add_modify = IWM_STA_MODE_MODIFY;
2592
2593 if (start) {
2594 cmd.add_immediate_ba_tid = (uint8_t)tid;
2595 cmd.add_immediate_ba_ssn = ssn;
2596 } else {
2597 cmd.remove_immediate_ba_tid = (uint8_t)tid;
2598 }
2599 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2600 IWM_STA_MODIFY_REMOVE_BA_TID;
2601
2602 status = IWM_ADD_STA_SUCCESS;
2603 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2604 &status);
2605
2606 s = splnet();
2607 if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2608 if (start) {
2609 sc->sc_rx_ba_sessions++;
2610 ieee80211_addba_req_accept(ic, ni, tid);
2611 } else if (sc->sc_rx_ba_sessions > 0)
2612 sc->sc_rx_ba_sessions--;
2613 } else if (start)
2614 ieee80211_addba_req_refuse(ic, ni, tid);
2615
2616 splx(s);
2617 }
2618
2619 static void
2620 iwm_htprot_task(void *arg)
2621 {
2622 struct iwm_softc *sc = arg;
2623 struct ieee80211com *ic = &sc->sc_ic;
2624 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2625 int err;
2626
2627 /* This call updates HT protection based on in->in_ni.ni_htop1. */
2628 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2629 if (err)
2630 aprint_error_dev(sc->sc_dev,
2631 "could not change HT protection: error %d\n", err);
2632 }
2633
2634 /*
2635 * This function is called by upper layer when HT protection settings in
2636 * beacons have changed.
2637 */
2638 static void
2639 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2640 {
2641 struct iwm_softc *sc = ic->ic_softc;
2642
2643 /* assumes that ni == ic->ic_bss */
2644 task_add(systq, &sc->htprot_task);
2645 }
2646
2647 static void
2648 iwm_ba_task(void *arg)
2649 {
2650 struct iwm_softc *sc = arg;
2651 struct ieee80211com *ic = &sc->sc_ic;
2652 struct ieee80211_node *ni = ic->ic_bss;
2653
2654 if (sc->ba_start)
2655 iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2656 else
2657 iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2658 }
2659
2660 /*
2661 * This function is called by upper layer when an ADDBA request is received
2662 * from another STA and before the ADDBA response is sent.
2663 */
2664 static int
2665 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2666 uint8_t tid)
2667 {
2668 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2669 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2670
2671 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2672 return ENOSPC;
2673
2674 sc->ba_start = 1;
2675 sc->ba_tid = tid;
2676 sc->ba_ssn = htole16(ba->ba_winstart);
2677 task_add(systq, &sc->ba_task);
2678
2679 return EBUSY;
2680 }
2681
2682 /*
2683 * This function is called by upper layer on teardown of an HT-immediate
2684 * Block Ack agreement (eg. upon receipt of a DELBA frame).
2685 */
2686 static void
2687 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2688 uint8_t tid)
2689 {
2690 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2691
2692 sc->ba_start = 0;
2693 sc->ba_tid = tid;
2694 task_add(systq, &sc->ba_task);
2695 }
2696 #endif
2697
2698 static void
2699 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2700 const uint16_t *mac_override, const uint16_t *nvm_hw)
2701 {
2702 static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
2703 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2704 };
2705 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
2706 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
2707 };
2708 const uint8_t *hw_addr;
2709
2710 if (mac_override) {
2711 hw_addr = (const uint8_t *)(mac_override +
2712 IWM_MAC_ADDRESS_OVERRIDE_8000);
2713
2714 /*
2715 * Store the MAC address from MAO section.
2716 * No byte swapping is required in MAO section
2717 */
2718 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2719
2720 /*
2721 * Force the use of the OTP MAC address in case of reserved MAC
2722 * address in the NVM, or if address is given but invalid.
2723 */
2724 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2725 (memcmp(etherbroadcastaddr, data->hw_addr,
2726 sizeof(etherbroadcastaddr)) != 0) &&
2727 (memcmp(etheranyaddr, data->hw_addr,
2728 sizeof(etheranyaddr)) != 0) &&
2729 !ETHER_IS_MULTICAST(data->hw_addr))
2730 return;
2731 }
2732
2733 if (nvm_hw) {
2734 /* Read the mac address from WFMP registers. */
2735 uint32_t mac_addr0 =
2736 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2737 uint32_t mac_addr1 =
2738 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2739
2740 hw_addr = (const uint8_t *)&mac_addr0;
2741 data->hw_addr[0] = hw_addr[3];
2742 data->hw_addr[1] = hw_addr[2];
2743 data->hw_addr[2] = hw_addr[1];
2744 data->hw_addr[3] = hw_addr[0];
2745
2746 hw_addr = (const uint8_t *)&mac_addr1;
2747 data->hw_addr[4] = hw_addr[1];
2748 data->hw_addr[5] = hw_addr[0];
2749
2750 return;
2751 }
2752
2753 aprint_error_dev(sc->sc_dev, "mac address not found\n");
2754 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2755 }
2756
2757 static int
2758 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2759 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2760 const uint16_t *mac_override, const uint16_t *phy_sku,
2761 const uint16_t *regulatory)
2762 {
2763 struct iwm_nvm_data *data = &sc->sc_nvm;
2764 uint8_t hw_addr[ETHER_ADDR_LEN];
2765 uint32_t sku;
2766
2767 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2768
2769 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2770 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2771 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2772 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2773 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2774 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2775
2776 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2777 } else {
2778 uint32_t radio_cfg = le32_to_cpup(
2779 (const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2780 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2781 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2782 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2783 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2784 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2785 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2786
2787 sku = le32_to_cpup(
2788 (const uint32_t *)(phy_sku + IWM_SKU_8000));
2789 }
2790
2791 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2792 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2793 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2794 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2795
2796 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2797
2798 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2799 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2800 data->hw_addr[0] = hw_addr[1];
2801 data->hw_addr[1] = hw_addr[0];
2802 data->hw_addr[2] = hw_addr[3];
2803 data->hw_addr[3] = hw_addr[2];
2804 data->hw_addr[4] = hw_addr[5];
2805 data->hw_addr[5] = hw_addr[4];
2806 } else
2807 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2808
2809 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2810 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2811 iwm_nvm_channels, __arraycount(iwm_nvm_channels));
2812 else
2813 iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
2814 iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
2815
2816 data->calib_version = 255; /* TODO:
2817 this value will prevent some checks from
2818 failing, we need to check if this
2819 field is still needed, and if it does,
2820 where is it in the NVM */
2821
2822 return 0;
2823 }
2824
2825 static int
2826 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2827 {
2828 const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2829 const uint16_t *regulatory = NULL;
2830
2831 /* Checking for required sections */
2832 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2833 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2834 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2835 return ENOENT;
2836 }
2837
2838 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2839 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2840 /* SW and REGULATORY sections are mandatory */
2841 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2842 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2843 return ENOENT;
2844 }
2845 /* MAC_OVERRIDE or at least HW section must exist */
2846 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2847 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2848 return ENOENT;
2849 }
2850
2851 /* PHY_SKU section is mandatory in B0 */
2852 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2853 return ENOENT;
2854 }
2855
2856 regulatory = (const uint16_t *)
2857 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2858 hw = (const uint16_t *)
2859 sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2860 mac_override =
2861 (const uint16_t *)
2862 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2863 phy_sku = (const uint16_t *)
2864 sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2865 } else {
2866 panic("unknown device family %d\n", sc->sc_device_family);
2867 }
2868
2869 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2870 calib = (const uint16_t *)
2871 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2872
2873 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2874 phy_sku, regulatory);
2875 }
2876
2877 static int
2878 iwm_nvm_init(struct iwm_softc *sc)
2879 {
2880 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2881 int i, section, err;
2882 uint16_t len;
2883 uint8_t *buf;
2884 const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2885
2886 /* Read From FW NVM */
2887 DPRINTF(("Read NVM\n"));
2888
2889 memset(nvm_sections, 0, sizeof(nvm_sections));
2890
2891 buf = kmem_alloc(bufsz, KM_SLEEP);
2892 if (buf == NULL)
2893 return ENOMEM;
2894
2895 for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
2896 section = iwm_nvm_to_read[i];
2897 KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
2898
2899 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2900 if (err) {
2901 err = 0;
2902 continue;
2903 }
2904 nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
2905 if (nvm_sections[section].data == NULL) {
2906 err = ENOMEM;
2907 break;
2908 }
2909 memcpy(nvm_sections[section].data, buf, len);
2910 nvm_sections[section].length = len;
2911 }
2912 kmem_free(buf, bufsz);
2913 if (err == 0)
2914 err = iwm_parse_nvm_sections(sc, nvm_sections);
2915
2916 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2917 if (nvm_sections[i].data != NULL)
2918 kmem_free(nvm_sections[i].data, nvm_sections[i].length);
2919 }
2920
2921 return err;
2922 }
2923
2924 static int
2925 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2926 const uint8_t *section, uint32_t byte_cnt)
2927 {
2928 int err = EINVAL;
2929 uint32_t chunk_sz, offset;
2930
2931 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2932
2933 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2934 uint32_t addr, len;
2935 const uint8_t *data;
2936
2937 addr = dst_addr + offset;
2938 len = MIN(chunk_sz, byte_cnt - offset);
2939 data = section + offset;
2940
2941 err = iwm_firmware_load_chunk(sc, addr, data, len);
2942 if (err)
2943 break;
2944 }
2945
2946 return err;
2947 }
2948
2949 static int
2950 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2951 const uint8_t *section, uint32_t byte_cnt)
2952 {
2953 struct iwm_dma_info *dma = &sc->fw_dma;
2954 bool is_extended = false;
2955 int err;
2956
2957 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2958 memcpy(dma->vaddr, section, byte_cnt);
2959 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
2960 BUS_DMASYNC_PREWRITE);
2961
2962 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2963 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2964 is_extended = true;
2965
2966 if (is_extended) {
2967 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2968 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2969 }
2970
2971 sc->sc_fw_chunk_done = 0;
2972
2973 if (!iwm_nic_lock(sc)) {
2974 if (is_extended)
2975 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2976 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2977 return EBUSY;
2978 }
2979
2980 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2981 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2982 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2983 dst_addr);
2984 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2985 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2986 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2987 (iwm_get_dma_hi_addr(dma->paddr)
2988 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2989 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2990 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2991 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2992 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2993 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2994 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2995 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2996 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2997
2998 iwm_nic_unlock(sc);
2999
3000 /* Wait for this segment to load. */
3001 err = 0;
3002 while (!sc->sc_fw_chunk_done) {
3003 err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3004 if (err)
3005 break;
3006 }
3007 if (!sc->sc_fw_chunk_done) {
3008 aprint_error_dev(sc->sc_dev,
3009 "fw chunk addr 0x%x len %d failed to load\n",
3010 dst_addr, byte_cnt);
3011 }
3012
3013 if (is_extended) {
3014 int rv = iwm_nic_lock(sc);
3015 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3016 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3017 if (rv == 0)
3018 iwm_nic_unlock(sc);
3019 }
3020
3021 return err;
3022 }
3023
3024 static int
3025 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3026 {
3027 struct iwm_fw_sects *fws;
3028 int err, i;
3029 void *data;
3030 uint32_t dlen;
3031 uint32_t offset;
3032
3033 fws = &sc->sc_fw.fw_sects[ucode_type];
3034 for (i = 0; i < fws->fw_count; i++) {
3035 data = fws->fw_sect[i].fws_data;
3036 dlen = fws->fw_sect[i].fws_len;
3037 offset = fws->fw_sect[i].fws_devoff;
3038 if (dlen > sc->sc_fwdmasegsz) {
3039 err = EFBIG;
3040 } else
3041 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3042 if (err) {
3043 aprint_error_dev(sc->sc_dev,
3044 "could not load firmware chunk %u of %u\n",
3045 i, fws->fw_count);
3046 return err;
3047 }
3048 }
3049
3050 IWM_WRITE(sc, IWM_CSR_RESET, 0);
3051
3052 return 0;
3053 }
3054
3055 static int
3056 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3057 int cpu, int *first_ucode_section)
3058 {
3059 int shift_param;
3060 int i, err = 0, sec_num = 0x1;
3061 uint32_t val, last_read_idx = 0;
3062 void *data;
3063 uint32_t dlen;
3064 uint32_t offset;
3065
3066 if (cpu == 1) {
3067 shift_param = 0;
3068 *first_ucode_section = 0;
3069 } else {
3070 shift_param = 16;
3071 (*first_ucode_section)++;
3072 }
3073
3074 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3075 last_read_idx = i;
3076 data = fws->fw_sect[i].fws_data;
3077 dlen = fws->fw_sect[i].fws_len;
3078 offset = fws->fw_sect[i].fws_devoff;
3079
3080 /*
3081 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3082 * CPU1 to CPU2.
3083 * PAGING_SEPARATOR_SECTION delimiter - separate between
3084 * CPU2 non paged to CPU2 paging sec.
3085 */
3086 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3087 offset == IWM_PAGING_SEPARATOR_SECTION)
3088 break;
3089
3090 if (dlen > sc->sc_fwdmasegsz) {
3091 err = EFBIG;
3092 } else
3093 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3094 if (err) {
3095 aprint_error_dev(sc->sc_dev,
3096 "could not load firmware chunk %d (error %d)\n",
3097 i, err);
3098 return err;
3099 }
3100
3101 /* Notify the ucode of the loaded section number and status */
3102 if (iwm_nic_lock(sc)) {
3103 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3104 val = val | (sec_num << shift_param);
3105 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3106 sec_num = (sec_num << 1) | 0x1;
3107 iwm_nic_unlock(sc);
3108
3109 /*
3110 * The firmware won't load correctly without this delay.
3111 */
3112 DELAY(8000);
3113 }
3114 }
3115
3116 *first_ucode_section = last_read_idx;
3117
3118 if (iwm_nic_lock(sc)) {
3119 if (cpu == 1)
3120 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3121 else
3122 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3123 iwm_nic_unlock(sc);
3124 }
3125
3126 return 0;
3127 }
3128
3129 static int
3130 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3131 {
3132 struct iwm_fw_sects *fws;
3133 int err = 0;
3134 int first_ucode_section;
3135
3136 fws = &sc->sc_fw.fw_sects[ucode_type];
3137
3138 /* configure the ucode to be ready to get the secured image */
3139 /* release CPU reset */
3140 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
3141
3142 /* load to FW the binary Secured sections of CPU1 */
3143 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3144 if (err)
3145 return err;
3146
3147 /* load to FW the binary sections of CPU2 */
3148 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3149 }
3150
3151 static int
3152 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3153 {
3154 int err, w;
3155
3156 sc->sc_uc.uc_intr = 0;
3157
3158 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3159 err = iwm_load_firmware_8000(sc, ucode_type);
3160 else
3161 err = iwm_load_firmware_7000(sc, ucode_type);
3162
3163 if (err)
3164 return err;
3165
3166 /* wait for the firmware to load */
3167 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3168 err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3169 if (err || !sc->sc_uc.uc_ok)
3170 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
3171
3172 return err;
3173 }
3174
3175 static int
3176 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3177 {
3178 int err;
3179
3180 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3181
3182 err = iwm_nic_init(sc);
3183 if (err) {
3184 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3185 return err;
3186 }
3187
3188 /* make sure rfkill handshake bits are cleared */
3189 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3190 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3191 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3192
3193 /* clear (again), then enable host interrupts */
3194 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3195 iwm_enable_interrupts(sc);
3196
3197 /* really make sure rfkill handshake bits are cleared */
3198 /* maybe we should write a few times more? just to make sure */
3199 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3200 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3201
3202 return iwm_load_firmware(sc, ucode_type);
3203 }
3204
3205 static int
3206 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3207 {
3208 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3209 .valid = htole32(valid_tx_ant),
3210 };
3211
3212 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3213 sizeof(tx_ant_cmd), &tx_ant_cmd);
3214 }
3215
3216 static int
3217 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3218 {
3219 struct iwm_phy_cfg_cmd phy_cfg_cmd;
3220 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3221
3222 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3223 phy_cfg_cmd.calib_control.event_trigger =
3224 sc->sc_default_calib[ucode_type].event_trigger;
3225 phy_cfg_cmd.calib_control.flow_trigger =
3226 sc->sc_default_calib[ucode_type].flow_trigger;
3227
3228 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3229 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3230 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3231 }
3232
3233 static int
3234 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3235 {
3236 enum iwm_ucode_type old_type = sc->sc_uc_current;
3237 int err;
3238
3239 err = iwm_read_firmware(sc);
3240 if (err)
3241 return err;
3242
3243 sc->sc_uc_current = ucode_type;
3244 err = iwm_start_fw(sc, ucode_type);
3245 if (err) {
3246 sc->sc_uc_current = old_type;
3247 return err;
3248 }
3249
3250 return iwm_post_alive(sc);
3251 }
3252
3253 static int
3254 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3255 {
3256 int err;
3257
3258 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3259 aprint_error_dev(sc->sc_dev,
3260 "radio is disabled by hardware switch\n");
3261 return EPERM;
3262 }
3263
3264 sc->sc_init_complete = 0;
3265 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3266 if (err) {
3267 aprint_error_dev(sc->sc_dev, "failed to load init firmware\n");
3268 return err;
3269 }
3270
3271 if (justnvm) {
3272 err = iwm_nvm_init(sc);
3273 if (err) {
3274 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3275 return err;
3276 }
3277
3278 memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3279 ETHER_ADDR_LEN);
3280 return 0;
3281 }
3282
3283 err = iwm_send_bt_init_conf(sc);
3284 if (err)
3285 return err;
3286
3287 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3288 if (err)
3289 return err;
3290
3291 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3292 if (err)
3293 return err;
3294
3295 /*
3296 * Send phy configurations command to init uCode
3297 * to start the 16.0 uCode init image internal calibrations.
3298 */
3299 err = iwm_send_phy_cfg_cmd(sc);
3300 if (err)
3301 return err;
3302
3303 /*
3304 * Nothing to do but wait for the init complete notification
3305 * from the firmware
3306 */
3307 while (!sc->sc_init_complete) {
3308 err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3309 if (err)
3310 break;
3311 }
3312
3313 return err;
3314 }
3315
3316 static int
3317 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3318 {
3319 struct iwm_rx_ring *ring = &sc->rxq;
3320 struct iwm_rx_data *data = &ring->data[idx];
3321 struct mbuf *m;
3322 int err;
3323 int fatal = 0;
3324
3325 m = m_gethdr(M_DONTWAIT, MT_DATA);
3326 if (m == NULL)
3327 return ENOBUFS;
3328
3329 if (size <= MCLBYTES) {
3330 MCLGET(m, M_DONTWAIT);
3331 } else {
3332 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3333 }
3334 if ((m->m_flags & M_EXT) == 0) {
3335 m_freem(m);
3336 return ENOBUFS;
3337 }
3338
3339 if (data->m != NULL) {
3340 bus_dmamap_unload(sc->sc_dmat, data->map);
3341 fatal = 1;
3342 }
3343
3344 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3345 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3346 BUS_DMA_READ|BUS_DMA_NOWAIT);
3347 if (err) {
3348 /* XXX */
3349 if (fatal)
3350 panic("iwm: could not load RX mbuf");
3351 m_freem(m);
3352 return err;
3353 }
3354 data->m = m;
3355 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3356
3357 /* Update RX descriptor. */
3358 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3359 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3360 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3361
3362 return 0;
3363 }
3364
3365 #define IWM_RSSI_OFFSET 50
3366 static int
3367 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3368 {
3369 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3370 uint32_t agc_a, agc_b;
3371 uint32_t val;
3372
3373 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3374 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3375 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3376
3377 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3378 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3379 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3380
3381 /*
3382 * dBm = rssi dB - agc dB - constant.
3383 * Higher AGC (higher radio gain) means lower signal.
3384 */
3385 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3386 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3387 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3388
3389 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3390 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3391
3392 return max_rssi_dbm;
3393 }
3394
3395 /*
3396 * RSSI values are reported by the FW as positive values - need to negate
3397 * to obtain their dBM. Account for missing antennas by replacing 0
3398 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3399 */
3400 static int
3401 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3402 {
3403 int energy_a, energy_b, energy_c, max_energy;
3404 uint32_t val;
3405
3406 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3407 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3408 IWM_RX_INFO_ENERGY_ANT_A_POS;
3409 energy_a = energy_a ? -energy_a : -256;
3410 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3411 IWM_RX_INFO_ENERGY_ANT_B_POS;
3412 energy_b = energy_b ? -energy_b : -256;
3413 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3414 IWM_RX_INFO_ENERGY_ANT_C_POS;
3415 energy_c = energy_c ? -energy_c : -256;
3416 max_energy = MAX(energy_a, energy_b);
3417 max_energy = MAX(max_energy, energy_c);
3418
3419 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3420 energy_a, energy_b, energy_c, max_energy));
3421
3422 return max_energy;
3423 }
3424
3425 static void
3426 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3427 struct iwm_rx_data *data)
3428 {
3429 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3430
3431 DPRINTFN(20, ("received PHY stats\n"));
3432 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3433 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3434
3435 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3436 }
3437
3438 /*
3439 * Retrieve the average noise (in dBm) among receivers.
3440 */
3441 static int
3442 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3443 {
3444 int i, total, nbant, noise;
3445
3446 total = nbant = noise = 0;
3447 for (i = 0; i < 3; i++) {
3448 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3449 if (noise) {
3450 total += noise;
3451 nbant++;
3452 }
3453 }
3454
3455 /* There should be at least one antenna but check anyway. */
3456 return (nbant == 0) ? -127 : (total / nbant) - 107;
3457 }
3458
3459 static void
3460 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3461 struct iwm_rx_data *data)
3462 {
3463 struct ieee80211com *ic = &sc->sc_ic;
3464 struct ieee80211_frame *wh;
3465 struct ieee80211_node *ni;
3466 struct ieee80211_channel *c = NULL;
3467 struct mbuf *m;
3468 struct iwm_rx_phy_info *phy_info;
3469 struct iwm_rx_mpdu_res_start *rx_res;
3470 int device_timestamp;
3471 uint32_t len;
3472 uint32_t rx_pkt_status;
3473 int rssi;
3474 int s;
3475
3476 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3477 BUS_DMASYNC_POSTREAD);
3478
3479 phy_info = &sc->sc_last_phy_info;
3480 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3481 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3482 len = le16toh(rx_res->byte_count);
3483 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3484 sizeof(*rx_res) + len));
3485
3486 m = data->m;
3487 m->m_data = pkt->data + sizeof(*rx_res);
3488 m->m_pkthdr.len = m->m_len = len;
3489
3490 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3491 DPRINTF(("dsp size out of range [0,20]: %d\n",
3492 phy_info->cfg_phy_cnt));
3493 return;
3494 }
3495
3496 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3497 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3498 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3499 return; /* drop */
3500 }
3501
3502 device_timestamp = le32toh(phy_info->system_timestamp);
3503
3504 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3505 rssi = iwm_get_signal_strength(sc, phy_info);
3506 } else {
3507 rssi = iwm_calc_rssi(sc, phy_info);
3508 }
3509 rssi = -rssi;
3510
3511 if (ic->ic_state == IEEE80211_S_SCAN)
3512 iwm_fix_channel(sc, m);
3513
3514 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3515 return;
3516
3517 m_set_rcvif(m, IC2IFP(ic));
3518
3519 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3520 c = &ic->ic_channels[le32toh(phy_info->channel)];
3521
3522 s = splnet();
3523
3524 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3525 if (c)
3526 ni->ni_chan = c;
3527
3528 if (__predict_false(sc->sc_drvbpf != NULL)) {
3529 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3530
3531 tap->wr_flags = 0;
3532 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3533 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3534 tap->wr_chan_freq =
3535 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3536 tap->wr_chan_flags =
3537 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3538 tap->wr_dbm_antsignal = (int8_t)rssi;
3539 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3540 tap->wr_tsft = phy_info->system_timestamp;
3541 if (phy_info->phy_flags &
3542 htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3543 uint8_t mcs = (phy_info->rate_n_flags &
3544 htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
3545 tap->wr_rate = (0x80 | mcs);
3546 } else {
3547 uint8_t rate = (phy_info->rate_n_flags &
3548 htole32(IWM_RATE_LEGACY_RATE_MSK));
3549 switch (rate) {
3550 /* CCK rates. */
3551 case 10: tap->wr_rate = 2; break;
3552 case 20: tap->wr_rate = 4; break;
3553 case 55: tap->wr_rate = 11; break;
3554 case 110: tap->wr_rate = 22; break;
3555 /* OFDM rates. */
3556 case 0xd: tap->wr_rate = 12; break;
3557 case 0xf: tap->wr_rate = 18; break;
3558 case 0x5: tap->wr_rate = 24; break;
3559 case 0x7: tap->wr_rate = 36; break;
3560 case 0x9: tap->wr_rate = 48; break;
3561 case 0xb: tap->wr_rate = 72; break;
3562 case 0x1: tap->wr_rate = 96; break;
3563 case 0x3: tap->wr_rate = 108; break;
3564 /* Unknown rate: should not happen. */
3565 default: tap->wr_rate = 0;
3566 }
3567 }
3568
3569 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3570 }
3571 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3572 ieee80211_free_node(ni);
3573
3574 splx(s);
3575 }
3576
3577 static void
3578 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3579 struct iwm_node *in)
3580 {
3581 struct ieee80211com *ic = &sc->sc_ic;
3582 struct ifnet *ifp = IC2IFP(ic);
3583 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3584 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3585 int failack = tx_resp->failure_frame;
3586
3587 KASSERT(tx_resp->frame_count == 1);
3588
3589 /* Update rate control statistics. */
3590 in->in_amn.amn_txcnt++;
3591 if (failack > 0) {
3592 in->in_amn.amn_retrycnt++;
3593 }
3594
3595 if (status != IWM_TX_STATUS_SUCCESS &&
3596 status != IWM_TX_STATUS_DIRECT_DONE)
3597 ifp->if_oerrors++;
3598 else
3599 ifp->if_opackets++;
3600 }
3601
3602 static void
3603 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3604 struct iwm_rx_data *data)
3605 {
3606 struct ieee80211com *ic = &sc->sc_ic;
3607 struct ifnet *ifp = IC2IFP(ic);
3608 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3609 int idx = cmd_hdr->idx;
3610 int qid = cmd_hdr->qid;
3611 struct iwm_tx_ring *ring = &sc->txq[qid];
3612 struct iwm_tx_data *txd = &ring->data[idx];
3613 struct iwm_node *in = txd->in;
3614
3615 if (txd->done) {
3616 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3617 DEVNAME(sc)));
3618 return;
3619 }
3620
3621 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3622 BUS_DMASYNC_POSTREAD);
3623
3624 sc->sc_tx_timer = 0;
3625
3626 iwm_rx_tx_cmd_single(sc, pkt, in);
3627
3628 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3629 BUS_DMASYNC_POSTWRITE);
3630 bus_dmamap_unload(sc->sc_dmat, txd->map);
3631 m_freem(txd->m);
3632
3633 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3634 KASSERT(txd->done == 0);
3635 txd->done = 1;
3636 KASSERT(txd->in);
3637
3638 txd->m = NULL;
3639 txd->in = NULL;
3640 ieee80211_free_node(&in->in_ni);
3641
3642 if (--ring->queued < IWM_TX_RING_LOMARK) {
3643 sc->qfullmsk &= ~(1 << ring->qid);
3644 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3645 ifp->if_flags &= ~IFF_OACTIVE;
3646 if_start_lock(ifp);
3647 }
3648 }
3649 }
3650
3651 static int
3652 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3653 {
3654 struct iwm_binding_cmd cmd;
3655 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3656 int i, err;
3657 uint32_t status;
3658
3659 memset(&cmd, 0, sizeof(cmd));
3660
3661 cmd.id_and_color
3662 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3663 cmd.action = htole32(action);
3664 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3665
3666 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3667 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3668 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3669
3670 status = 0;
3671 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3672 sizeof(cmd), &cmd, &status);
3673 if (err == 0 && status != 0)
3674 err = EIO;
3675
3676 return err;
3677 }
3678
3679 static void
3680 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3681 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3682 {
3683 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3684
3685 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3686 ctxt->color));
3687 cmd->action = htole32(action);
3688 cmd->apply_time = htole32(apply_time);
3689 }
3690
3691 static void
3692 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3693 struct ieee80211_channel *chan, uint8_t chains_static,
3694 uint8_t chains_dynamic)
3695 {
3696 struct ieee80211com *ic = &sc->sc_ic;
3697 uint8_t active_cnt, idle_cnt;
3698
3699 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3700 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3701
3702 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3703 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3704 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3705
3706 /* Set rx the chains */
3707 idle_cnt = chains_static;
3708 active_cnt = chains_dynamic;
3709
3710 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3711 IWM_PHY_RX_CHAIN_VALID_POS);
3712 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3713 cmd->rxchain_info |= htole32(active_cnt <<
3714 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3715
3716 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3717 }
3718
3719 static int
3720 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3721 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3722 uint32_t apply_time)
3723 {
3724 struct iwm_phy_context_cmd cmd;
3725
3726 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3727
3728 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3729 chains_static, chains_dynamic);
3730
3731 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3732 sizeof(struct iwm_phy_context_cmd), &cmd);
3733 }
3734
3735 static int
3736 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3737 {
3738 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3739 struct iwm_tfd *desc;
3740 struct iwm_tx_data *txdata;
3741 struct iwm_device_cmd *cmd;
3742 struct mbuf *m;
3743 bus_addr_t paddr;
3744 uint32_t addr_lo;
3745 int err = 0, i, paylen, off, s;
3746 int code;
3747 int async, wantresp;
3748 int group_id;
3749 size_t hdrlen, datasz;
3750 uint8_t *data;
3751
3752 code = hcmd->id;
3753 async = hcmd->flags & IWM_CMD_ASYNC;
3754 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3755
3756 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3757 paylen += hcmd->len[i];
3758 }
3759
3760 /* if the command wants an answer, busy sc_cmd_resp */
3761 if (wantresp) {
3762 KASSERT(!async);
3763 while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3764 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3765 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3766 }
3767
3768 /*
3769 * Is the hardware still available? (after e.g. above wait).
3770 */
3771 s = splnet();
3772 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3773 err = ENXIO;
3774 goto out;
3775 }
3776
3777 desc = &ring->desc[ring->cur];
3778 txdata = &ring->data[ring->cur];
3779
3780 group_id = iwm_cmd_groupid(code);
3781 if (group_id != 0) {
3782 hdrlen = sizeof(cmd->hdr_wide);
3783 datasz = sizeof(cmd->data_wide);
3784 } else {
3785 hdrlen = sizeof(cmd->hdr);
3786 datasz = sizeof(cmd->data);
3787 }
3788
3789 if (paylen > datasz) {
3790 /* Command is too large to fit in pre-allocated space. */
3791 size_t totlen = hdrlen + paylen;
3792 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3793 aprint_error_dev(sc->sc_dev,
3794 "firmware command too long (%zd bytes)\n", totlen);
3795 err = EINVAL;
3796 goto out;
3797 }
3798 m = m_gethdr(M_DONTWAIT, MT_DATA);
3799 if (m == NULL) {
3800 err = ENOMEM;
3801 goto out;
3802 }
3803 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3804 if (!(m->m_flags & M_EXT)) {
3805 aprint_error_dev(sc->sc_dev,
3806 "could not get fw cmd mbuf (%zd bytes)\n", totlen);
3807 m_freem(m);
3808 err = ENOMEM;
3809 goto out;
3810 }
3811 cmd = mtod(m, struct iwm_device_cmd *);
3812 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3813 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3814 if (err) {
3815 aprint_error_dev(sc->sc_dev,
3816 "could not load fw cmd mbuf (%zd bytes)\n", totlen);
3817 m_freem(m);
3818 goto out;
3819 }
3820 txdata->m = m;
3821 paddr = txdata->map->dm_segs[0].ds_addr;
3822 } else {
3823 cmd = &ring->cmd[ring->cur];
3824 paddr = txdata->cmd_paddr;
3825 }
3826
3827 if (group_id != 0) {
3828 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3829 cmd->hdr_wide.group_id = group_id;
3830 cmd->hdr_wide.qid = ring->qid;
3831 cmd->hdr_wide.idx = ring->cur;
3832 cmd->hdr_wide.length = htole16(paylen);
3833 cmd->hdr_wide.version = iwm_cmd_version(code);
3834 data = cmd->data_wide;
3835 } else {
3836 cmd->hdr.code = code;
3837 cmd->hdr.flags = 0;
3838 cmd->hdr.qid = ring->qid;
3839 cmd->hdr.idx = ring->cur;
3840 data = cmd->data;
3841 }
3842
3843 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3844 if (hcmd->len[i] == 0)
3845 continue;
3846 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3847 off += hcmd->len[i];
3848 }
3849 KASSERT(off == paylen);
3850
3851 /* lo field is not aligned */
3852 addr_lo = htole32((uint32_t)paddr);
3853 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3854 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3855 | ((hdrlen + paylen) << 4));
3856 desc->num_tbs = 1;
3857
3858 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3859 code, hdrlen + paylen, async ? " (async)" : ""));
3860
3861 if (paylen > datasz) {
3862 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3863 hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3864 } else {
3865 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3866 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3867 hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3868 }
3869 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3870 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3871 sizeof(*desc), BUS_DMASYNC_PREWRITE);
3872
3873 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3874 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3875 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3876 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3877 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3878 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3879 aprint_error_dev(sc->sc_dev, "acquiring device failed\n");
3880 err = EBUSY;
3881 goto out;
3882 }
3883
3884 #if 0
3885 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3886 #endif
3887 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3888 code, ring->qid, ring->cur));
3889
3890 /* Kick command ring. */
3891 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3892 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3893
3894 if (!async) {
3895 int generation = sc->sc_generation;
3896 err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
3897 if (err == 0) {
3898 /* if hardware is no longer up, return error */
3899 if (generation != sc->sc_generation) {
3900 err = ENXIO;
3901 } else {
3902 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3903 }
3904 }
3905 }
3906 out:
3907 if (wantresp && err) {
3908 iwm_free_resp(sc, hcmd);
3909 }
3910 splx(s);
3911
3912 return err;
3913 }
3914
3915 static int
3916 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
3917 uint16_t len, const void *data)
3918 {
3919 struct iwm_host_cmd cmd = {
3920 .id = id,
3921 .len = { len, },
3922 .data = { data, },
3923 .flags = flags,
3924 };
3925
3926 return iwm_send_cmd(sc, &cmd);
3927 }
3928
3929 static int
3930 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
3931 uint32_t *status)
3932 {
3933 struct iwm_rx_packet *pkt;
3934 struct iwm_cmd_response *resp;
3935 int err, resp_len;
3936
3937 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3938 cmd->flags |= IWM_CMD_WANT_SKB;
3939
3940 err = iwm_send_cmd(sc, cmd);
3941 if (err)
3942 return err;
3943 pkt = cmd->resp_pkt;
3944
3945 /* Can happen if RFKILL is asserted */
3946 if (!pkt) {
3947 err = 0;
3948 goto out_free_resp;
3949 }
3950
3951 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3952 err = EIO;
3953 goto out_free_resp;
3954 }
3955
3956 resp_len = iwm_rx_packet_payload_len(pkt);
3957 if (resp_len != sizeof(*resp)) {
3958 err = EIO;
3959 goto out_free_resp;
3960 }
3961
3962 resp = (void *)pkt->data;
3963 *status = le32toh(resp->status);
3964 out_free_resp:
3965 iwm_free_resp(sc, cmd);
3966 return err;
3967 }
3968
3969 static int
3970 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
3971 const void *data, uint32_t *status)
3972 {
3973 struct iwm_host_cmd cmd = {
3974 .id = id,
3975 .len = { len, },
3976 .data = { data, },
3977 };
3978
3979 return iwm_send_cmd_status(sc, &cmd, status);
3980 }
3981
3982 static void
3983 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3984 {
3985 KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
3986 KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
3987 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
3988 wakeup(&sc->sc_wantresp);
3989 }
3990
3991 static void
3992 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
3993 {
3994 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3995 struct iwm_tx_data *data;
3996
3997 if (qid != IWM_CMD_QUEUE) {
3998 return; /* Not a command ack. */
3999 }
4000
4001 data = &ring->data[idx];
4002
4003 if (data->m != NULL) {
4004 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4005 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4006 bus_dmamap_unload(sc->sc_dmat, data->map);
4007 m_freem(data->m);
4008 data->m = NULL;
4009 }
4010 wakeup(&ring->desc[idx]);
4011 }
4012
4013 #if 0
4014 /*
4015 * necessary only for block ack mode
4016 */
4017 void
4018 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4019 uint16_t len)
4020 {
4021 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4022 uint16_t w_val;
4023
4024 scd_bc_tbl = sc->sched_dma.vaddr;
4025
4026 len += 8; /* magic numbers came naturally from paris */
4027 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4028 len = roundup(len, 4) / 4;
4029
4030 w_val = htole16(sta_id << 12 | len);
4031
4032 /* Update TX scheduler. */
4033 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4034 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4035 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4036 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4037
4038 /* I really wonder what this is ?!? */
4039 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4040 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4041 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4042 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4043 (char *)(void *)sc->sched_dma.vaddr,
4044 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4045 }
4046 }
4047 #endif
4048
4049 /*
4050 * Fill in various bit for management frames, and leave them
4051 * unfilled for data frames (firmware takes care of that).
4052 * Return the selected TX rate.
4053 */
4054 static const struct iwm_rate *
4055 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4056 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4057 {
4058 struct ieee80211com *ic = &sc->sc_ic;
4059 struct ieee80211_node *ni = &in->in_ni;
4060 const struct iwm_rate *rinfo;
4061 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4062 int ridx, rate_flags, i;
4063 int nrates = ni->ni_rates.rs_nrates;
4064
4065 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4066 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4067
4068 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4069 type != IEEE80211_FC0_TYPE_DATA) {
4070 /* for non-data, use the lowest supported rate */
4071 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4072 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4073 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4074 #ifndef IEEE80211_NO_HT
4075 } else if (ic->ic_fixed_mcs != -1) {
4076 ridx = sc->sc_fixed_ridx;
4077 #endif
4078 } else if (ic->ic_fixed_rate != -1) {
4079 ridx = sc->sc_fixed_ridx;
4080 } else {
4081 /* for data frames, use RS table */
4082 tx->initial_rate_index = 0;
4083 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4084 DPRINTFN(12, ("start with txrate %d\n",
4085 tx->initial_rate_index));
4086 #ifndef IEEE80211_NO_HT
4087 if (ni->ni_flags & IEEE80211_NODE_HT) {
4088 ridx = iwm_mcs2ridx[ni->ni_txmcs];
4089 return &iwm_rates[ridx];
4090 }
4091 #endif
4092 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4093 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4094 for (i = 0; i < nrates; i++) {
4095 if (iwm_rates[i].rate == (ni->ni_txrate &
4096 IEEE80211_RATE_VAL)) {
4097 ridx = i;
4098 break;
4099 }
4100 }
4101 return &iwm_rates[ridx];
4102 }
4103
4104 rinfo = &iwm_rates[ridx];
4105 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
4106 if (IWM_RIDX_IS_CCK(ridx))
4107 rate_flags |= IWM_RATE_MCS_CCK_MSK;
4108 #ifndef IEEE80211_NO_HT
4109 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4110 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4111 rate_flags |= IWM_RATE_MCS_HT_MSK;
4112 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4113 } else
4114 #endif
4115 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4116
4117 return rinfo;
4118 }
4119
4120 #define TB0_SIZE 16
4121 static int
4122 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4123 {
4124 struct ieee80211com *ic = &sc->sc_ic;
4125 struct iwm_node *in = (struct iwm_node *)ni;
4126 struct iwm_tx_ring *ring;
4127 struct iwm_tx_data *data;
4128 struct iwm_tfd *desc;
4129 struct iwm_device_cmd *cmd;
4130 struct iwm_tx_cmd *tx;
4131 struct ieee80211_frame *wh;
4132 struct ieee80211_key *k = NULL;
4133 struct mbuf *m1;
4134 const struct iwm_rate *rinfo;
4135 uint32_t flags;
4136 u_int hdrlen;
4137 bus_dma_segment_t *seg;
4138 uint8_t tid, type;
4139 int i, totlen, err, pad;
4140
4141 wh = mtod(m, struct ieee80211_frame *);
4142 hdrlen = ieee80211_anyhdrsize(wh);
4143 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4144
4145 tid = 0;
4146
4147 ring = &sc->txq[ac];
4148 desc = &ring->desc[ring->cur];
4149 memset(desc, 0, sizeof(*desc));
4150 data = &ring->data[ring->cur];
4151
4152 cmd = &ring->cmd[ring->cur];
4153 cmd->hdr.code = IWM_TX_CMD;
4154 cmd->hdr.flags = 0;
4155 cmd->hdr.qid = ring->qid;
4156 cmd->hdr.idx = ring->cur;
4157
4158 tx = (void *)cmd->data;
4159 memset(tx, 0, sizeof(*tx));
4160
4161 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4162
4163 if (__predict_false(sc->sc_drvbpf != NULL)) {
4164 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4165
4166 tap->wt_flags = 0;
4167 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4168 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4169 #ifndef IEEE80211_NO_HT
4170 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4171 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4172 type == IEEE80211_FC0_TYPE_DATA &&
4173 rinfo->plcp == IWM_RATE_INVM_PLCP) {
4174 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4175 } else
4176 #endif
4177 tap->wt_rate = rinfo->rate;
4178 tap->wt_hwqueue = ac;
4179 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4180 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4181
4182 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4183 }
4184
4185 /* Encrypt the frame if need be. */
4186 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4187 k = ieee80211_crypto_encap(ic, ni, m);
4188 if (k == NULL) {
4189 m_freem(m);
4190 return ENOBUFS;
4191 }
4192 /* Packet header may have moved, reset our local pointer. */
4193 wh = mtod(m, struct ieee80211_frame *);
4194 }
4195 totlen = m->m_pkthdr.len;
4196
4197 flags = 0;
4198 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4199 flags |= IWM_TX_CMD_FLG_ACK;
4200 }
4201
4202 if (type == IEEE80211_FC0_TYPE_DATA &&
4203 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4204 (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4205 (ic->ic_flags & IEEE80211_F_USEPROT)))
4206 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4207
4208 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4209 type != IEEE80211_FC0_TYPE_DATA)
4210 tx->sta_id = IWM_AUX_STA_ID;
4211 else
4212 tx->sta_id = IWM_STATION_ID;
4213
4214 if (type == IEEE80211_FC0_TYPE_MGT) {
4215 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4216
4217 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4218 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4219 tx->pm_frame_timeout = htole16(3);
4220 else
4221 tx->pm_frame_timeout = htole16(2);
4222 } else {
4223 tx->pm_frame_timeout = htole16(0);
4224 }
4225
4226 if (hdrlen & 3) {
4227 /* First segment length must be a multiple of 4. */
4228 flags |= IWM_TX_CMD_FLG_MH_PAD;
4229 pad = 4 - (hdrlen & 3);
4230 } else
4231 pad = 0;
4232
4233 tx->driver_txop = 0;
4234 tx->next_frame_len = 0;
4235
4236 tx->len = htole16(totlen);
4237 tx->tid_tspec = tid;
4238 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4239
4240 /* Set physical address of "scratch area". */
4241 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4242 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4243
4244 /* Copy 802.11 header in TX command. */
4245 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4246
4247 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4248
4249 tx->sec_ctl = 0;
4250 tx->tx_flags |= htole32(flags);
4251
4252 /* Trim 802.11 header. */
4253 m_adj(m, hdrlen);
4254
4255 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4256 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4257 if (err) {
4258 if (err != EFBIG) {
4259 aprint_error_dev(sc->sc_dev,
4260 "can't map mbuf (error %d)\n", err);
4261 m_freem(m);
4262 return err;
4263 }
4264 /* Too many DMA segments, linearize mbuf. */
4265 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4266 if (m1 == NULL) {
4267 m_freem(m);
4268 return ENOBUFS;
4269 }
4270 if (m->m_pkthdr.len > MHLEN) {
4271 MCLGET(m1, M_DONTWAIT);
4272 if (!(m1->m_flags & M_EXT)) {
4273 m_freem(m);
4274 m_freem(m1);
4275 return ENOBUFS;
4276 }
4277 }
4278 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4279 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4280 m_freem(m);
4281 m = m1;
4282
4283 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4284 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4285 if (err) {
4286 aprint_error_dev(sc->sc_dev,
4287 "can't map mbuf (error %d)\n", err);
4288 m_freem(m);
4289 return err;
4290 }
4291 }
4292 data->m = m;
4293 data->in = in;
4294 data->done = 0;
4295
4296 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4297 KASSERT(data->in != NULL);
4298
4299 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4300 ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4301
4302 /* Fill TX descriptor. */
4303 desc->num_tbs = 2 + data->map->dm_nsegs;
4304
4305 desc->tbs[0].lo = htole32(data->cmd_paddr);
4306 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4307 (TB0_SIZE << 4);
4308 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4309 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4310 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4311 + hdrlen + pad - TB0_SIZE) << 4);
4312
4313 /* Other DMA segments are for data payload. */
4314 seg = data->map->dm_segs;
4315 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4316 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4317 desc->tbs[i+2].hi_n_len =
4318 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4319 | ((seg->ds_len) << 4);
4320 }
4321
4322 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4323 BUS_DMASYNC_PREWRITE);
4324 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4325 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4326 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4327 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4328 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4329 sizeof (*desc), BUS_DMASYNC_PREWRITE);
4330
4331 #if 0
4332 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4333 le16toh(tx->len));
4334 #endif
4335
4336 /* Kick TX ring. */
4337 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4338 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4339
4340 /* Mark TX ring as full if we reach a certain threshold. */
4341 if (++ring->queued > IWM_TX_RING_HIMARK) {
4342 sc->qfullmsk |= 1 << ring->qid;
4343 }
4344
4345 return 0;
4346 }
4347
4348 #if 0
4349 /* not necessary? */
4350 static int
4351 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4352 {
4353 struct iwm_tx_path_flush_cmd flush_cmd = {
4354 .queues_ctl = htole32(tfd_msk),
4355 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4356 };
4357 int err;
4358
4359 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4360 sizeof(flush_cmd), &flush_cmd);
4361 if (err)
4362 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4363 err);
4364 return err;
4365 }
4366 #endif
4367
4368 static void
4369 iwm_led_enable(struct iwm_softc *sc)
4370 {
4371 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4372 }
4373
4374 static void
4375 iwm_led_disable(struct iwm_softc *sc)
4376 {
4377 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4378 }
4379
4380 static int
4381 iwm_led_is_enabled(struct iwm_softc *sc)
4382 {
4383 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4384 }
4385
4386 static void
4387 iwm_led_blink_timeout(void *arg)
4388 {
4389 struct iwm_softc *sc = arg;
4390
4391 if (iwm_led_is_enabled(sc))
4392 iwm_led_disable(sc);
4393 else
4394 iwm_led_enable(sc);
4395
4396 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4397 }
4398
4399 static void
4400 iwm_led_blink_start(struct iwm_softc *sc)
4401 {
4402 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4403 }
4404
4405 static void
4406 iwm_led_blink_stop(struct iwm_softc *sc)
4407 {
4408 callout_stop(&sc->sc_led_blink_to);
4409 iwm_led_disable(sc);
4410 }
4411
4412 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4413
4414 static int
4415 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4416 struct iwm_beacon_filter_cmd *cmd)
4417 {
4418 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4419 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4420 }
4421
4422 static void
4423 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4424 struct iwm_beacon_filter_cmd *cmd)
4425 {
4426 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4427 }
4428
4429 static int
4430 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4431 {
4432 struct iwm_beacon_filter_cmd cmd = {
4433 IWM_BF_CMD_CONFIG_DEFAULTS,
4434 .bf_enable_beacon_filter = htole32(1),
4435 .ba_enable_beacon_abort = htole32(enable),
4436 };
4437
4438 if (!sc->sc_bf.bf_enabled)
4439 return 0;
4440
4441 sc->sc_bf.ba_enabled = enable;
4442 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4443 return iwm_beacon_filter_send_cmd(sc, &cmd);
4444 }
4445
4446 static void
4447 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4448 struct iwm_mac_power_cmd *cmd)
4449 {
4450 struct ieee80211_node *ni = &in->in_ni;
4451 int dtim_period, dtim_msec, keep_alive;
4452
4453 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4454 in->in_color));
4455 if (ni->ni_dtim_period)
4456 dtim_period = ni->ni_dtim_period;
4457 else
4458 dtim_period = 1;
4459
4460 /*
4461 * Regardless of power management state the driver must set
4462 * keep alive period. FW will use it for sending keep alive NDPs
4463 * immediately after association. Check that keep alive period
4464 * is at least 3 * DTIM.
4465 */
4466 dtim_msec = dtim_period * ni->ni_intval;
4467 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4468 keep_alive = roundup(keep_alive, 1000) / 1000;
4469 cmd->keep_alive_seconds = htole16(keep_alive);
4470
4471 #ifdef notyet
4472 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4473 cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4474 cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4475 #endif
4476 }
4477
4478 static int
4479 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4480 {
4481 int err;
4482 int ba_enable;
4483 struct iwm_mac_power_cmd cmd;
4484
4485 memset(&cmd, 0, sizeof(cmd));
4486
4487 iwm_power_build_cmd(sc, in, &cmd);
4488
4489 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4490 sizeof(cmd), &cmd);
4491 if (err)
4492 return err;
4493
4494 ba_enable = !!(cmd.flags &
4495 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4496 return iwm_update_beacon_abort(sc, in, ba_enable);
4497 }
4498
4499 static int
4500 iwm_power_update_device(struct iwm_softc *sc)
4501 {
4502 struct iwm_device_power_cmd cmd = {
4503 #ifdef notyet
4504 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4505 #endif
4506 };
4507
4508 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4509 return 0;
4510
4511 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4512 DPRINTF(("Sending device power command with flags = 0x%X\n",
4513 cmd.flags));
4514
4515 return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4516 }
4517
4518 #ifdef notyet
4519 static int
4520 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4521 {
4522 struct iwm_beacon_filter_cmd cmd = {
4523 IWM_BF_CMD_CONFIG_DEFAULTS,
4524 .bf_enable_beacon_filter = htole32(1),
4525 };
4526 int err;
4527
4528 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4529 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4530
4531 if (err == 0)
4532 sc->sc_bf.bf_enabled = 1;
4533
4534 return err;
4535 }
4536 #endif
4537
4538 static int
4539 iwm_disable_beacon_filter(struct iwm_softc *sc)
4540 {
4541 struct iwm_beacon_filter_cmd cmd;
4542 int err;
4543
4544 memset(&cmd, 0, sizeof(cmd));
4545 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4546 return 0;
4547
4548 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4549 if (err == 0)
4550 sc->sc_bf.bf_enabled = 0;
4551
4552 return err;
4553 }
4554
4555 static int
4556 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4557 {
4558 struct iwm_add_sta_cmd_v7 add_sta_cmd;
4559 int err;
4560 uint32_t status;
4561
4562 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4563
4564 add_sta_cmd.sta_id = IWM_STATION_ID;
4565 add_sta_cmd.mac_id_n_color
4566 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4567 if (!update) {
4568 int ac;
4569 for (ac = 0; ac < WME_NUM_AC; ac++) {
4570 add_sta_cmd.tfd_queue_msk |=
4571 htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
4572 }
4573 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4574 }
4575 add_sta_cmd.add_modify = update ? 1 : 0;
4576 add_sta_cmd.station_flags_msk
4577 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4578 add_sta_cmd.tid_disable_tx = htole16(0xffff);
4579 if (update)
4580 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4581
4582 #ifndef IEEE80211_NO_HT
4583 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4584 add_sta_cmd.station_flags_msk
4585 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4586 IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4587
4588 add_sta_cmd.station_flags
4589 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4590 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4591 case IEEE80211_AMPDU_PARAM_SS_2:
4592 add_sta_cmd.station_flags
4593 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4594 break;
4595 case IEEE80211_AMPDU_PARAM_SS_4:
4596 add_sta_cmd.station_flags
4597 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4598 break;
4599 case IEEE80211_AMPDU_PARAM_SS_8:
4600 add_sta_cmd.station_flags
4601 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4602 break;
4603 case IEEE80211_AMPDU_PARAM_SS_16:
4604 add_sta_cmd.station_flags
4605 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4606 break;
4607 default:
4608 break;
4609 }
4610 }
4611 #endif
4612
4613 status = IWM_ADD_STA_SUCCESS;
4614 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4615 &add_sta_cmd, &status);
4616 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4617 err = EIO;
4618
4619 return err;
4620 }
4621
4622 static int
4623 iwm_add_aux_sta(struct iwm_softc *sc)
4624 {
4625 struct iwm_add_sta_cmd_v7 cmd;
4626 int err;
4627 uint32_t status;
4628
4629 err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4630 if (err)
4631 return err;
4632
4633 memset(&cmd, 0, sizeof(cmd));
4634 cmd.sta_id = IWM_AUX_STA_ID;
4635 cmd.mac_id_n_color =
4636 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4637 cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4638 cmd.tid_disable_tx = htole16(0xffff);
4639
4640 status = IWM_ADD_STA_SUCCESS;
4641 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4642 &status);
4643 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4644 err = EIO;
4645
4646 return err;
4647 }
4648
4649 #define IWM_PLCP_QUIET_THRESH 1
4650 #define IWM_ACTIVE_QUIET_TIME 10
4651 #define LONG_OUT_TIME_PERIOD 600
4652 #define SHORT_OUT_TIME_PERIOD 200
4653 #define SUSPEND_TIME_PERIOD 100
4654
4655 static uint16_t
4656 iwm_scan_rx_chain(struct iwm_softc *sc)
4657 {
4658 uint16_t rx_chain;
4659 uint8_t rx_ant;
4660
4661 rx_ant = iwm_fw_valid_rx_ant(sc);
4662 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4663 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4664 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4665 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4666 return htole16(rx_chain);
4667 }
4668
4669 static uint32_t
4670 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4671 {
4672 uint32_t tx_ant;
4673 int i, ind;
4674
4675 for (i = 0, ind = sc->sc_scan_last_antenna;
4676 i < IWM_RATE_MCS_ANT_NUM; i++) {
4677 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4678 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4679 sc->sc_scan_last_antenna = ind;
4680 break;
4681 }
4682 }
4683 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4684
4685 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4686 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4687 tx_ant);
4688 else
4689 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4690 }
4691
4692 #ifdef notyet
4693 /*
4694 * If req->n_ssids > 0, it means we should do an active scan.
4695 * In case of active scan w/o directed scan, we receive a zero-length SSID
4696 * just to notify that this scan is active and not passive.
4697 * In order to notify the FW of the number of SSIDs we wish to scan (including
4698 * the zero-length one), we need to set the corresponding bits in chan->type,
4699 * one for each SSID, and set the active bit (first). If the first SSID is
4700 * already included in the probe template, so we need to set only
4701 * req->n_ssids - 1 bits in addition to the first bit.
4702 */
4703 static uint16_t
4704 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4705 {
4706 if (flags & IEEE80211_CHAN_2GHZ)
4707 return 30 + 3 * (n_ssids + 1);
4708 return 20 + 2 * (n_ssids + 1);
4709 }
4710
4711 static uint16_t
4712 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4713 {
4714 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4715 }
4716 #endif
4717
4718 static uint8_t
4719 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4720 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4721 {
4722 struct ieee80211com *ic = &sc->sc_ic;
4723 struct ieee80211_channel *c;
4724 uint8_t nchan;
4725
4726 for (nchan = 0, c = &ic->ic_channels[1];
4727 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4728 nchan < sc->sc_capa_n_scan_channels;
4729 c++) {
4730 if (c->ic_flags == 0)
4731 continue;
4732
4733 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4734 chan->iter_count = htole16(1);
4735 chan->iter_interval = 0;
4736 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4737 #if 0 /* makes scanning while associated less useful */
4738 if (n_ssids != 0)
4739 chan->flags |= htole32(1 << 1); /* select SSID 0 */
4740 #endif
4741 chan++;
4742 nchan++;
4743 }
4744
4745 return nchan;
4746 }
4747
4748 static uint8_t
4749 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4750 struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4751 {
4752 struct ieee80211com *ic = &sc->sc_ic;
4753 struct ieee80211_channel *c;
4754 uint8_t nchan;
4755
4756 for (nchan = 0, c = &ic->ic_channels[1];
4757 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4758 nchan < sc->sc_capa_n_scan_channels;
4759 c++) {
4760 if (c->ic_flags == 0)
4761 continue;
4762 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4763 chan->iter_count = 1;
4764 chan->iter_interval = htole16(0);
4765 #if 0 /* makes scanning while associated less useful */
4766 if (n_ssids != 0)
4767 chan->flags = htole32(1 << 0); /* select SSID 0 */
4768 #endif
4769 chan++;
4770 nchan++;
4771 }
4772
4773 return nchan;
4774 }
4775
4776 static int
4777 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4778 {
4779 struct ieee80211com *ic = &sc->sc_ic;
4780 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4781 struct ieee80211_rateset *rs;
4782 size_t remain = sizeof(preq->buf);
4783 uint8_t *frm, *pos;
4784
4785 memset(preq, 0, sizeof(*preq));
4786
4787 if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4788 return ENOBUFS;
4789
4790 /*
4791 * Build a probe request frame. Most of the following code is a
4792 * copy & paste of what is done in net80211.
4793 */
4794 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4795 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4796 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4797 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4798 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4799 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4800 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4801 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4802
4803 frm = (uint8_t *)(wh + 1);
4804 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4805
4806 /* Tell the firmware where the MAC header is. */
4807 preq->mac_header.offset = 0;
4808 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4809 remain -= frm - (uint8_t *)wh;
4810
4811 /* Fill in 2GHz IEs and tell firmware where they are. */
4812 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4813 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4814 if (remain < 4 + rs->rs_nrates)
4815 return ENOBUFS;
4816 } else if (remain < 2 + rs->rs_nrates)
4817 return ENOBUFS;
4818 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4819 pos = frm;
4820 frm = ieee80211_add_rates(frm, rs);
4821 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4822 frm = ieee80211_add_xrates(frm, rs);
4823 preq->band_data[0].len = htole16(frm - pos);
4824 remain -= frm - pos;
4825
4826 if (isset(sc->sc_enabled_capa,
4827 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4828 if (remain < 3)
4829 return ENOBUFS;
4830 *frm++ = IEEE80211_ELEMID_DSPARMS;
4831 *frm++ = 1;
4832 *frm++ = 0;
4833 remain -= 3;
4834 }
4835
4836 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4837 /* Fill in 5GHz IEs. */
4838 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4839 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4840 if (remain < 4 + rs->rs_nrates)
4841 return ENOBUFS;
4842 } else if (remain < 2 + rs->rs_nrates)
4843 return ENOBUFS;
4844 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4845 pos = frm;
4846 frm = ieee80211_add_rates(frm, rs);
4847 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4848 frm = ieee80211_add_xrates(frm, rs);
4849 preq->band_data[1].len = htole16(frm - pos);
4850 remain -= frm - pos;
4851 }
4852
4853 #ifndef IEEE80211_NO_HT
4854 /* Send 11n IEs on both 2GHz and 5GHz bands. */
4855 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4856 pos = frm;
4857 if (ic->ic_flags & IEEE80211_F_HTON) {
4858 if (remain < 28)
4859 return ENOBUFS;
4860 frm = ieee80211_add_htcaps(frm, ic);
4861 /* XXX add WME info? */
4862 }
4863 #endif
4864
4865 preq->common_data.len = htole16(frm - pos);
4866
4867 return 0;
4868 }
4869
4870 static int
4871 iwm_lmac_scan(struct iwm_softc *sc)
4872 {
4873 struct ieee80211com *ic = &sc->sc_ic;
4874 struct iwm_host_cmd hcmd = {
4875 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4876 .len = { 0, },
4877 .data = { NULL, },
4878 .flags = 0,
4879 };
4880 struct iwm_scan_req_lmac *req;
4881 size_t req_len;
4882 int err;
4883
4884 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
4885
4886 req_len = sizeof(struct iwm_scan_req_lmac) +
4887 (sizeof(struct iwm_scan_channel_cfg_lmac) *
4888 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
4889 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4890 return ENOMEM;
4891 req = kmem_zalloc(req_len, KM_SLEEP);
4892 if (req == NULL)
4893 return ENOMEM;
4894
4895 hcmd.len[0] = (uint16_t)req_len;
4896 hcmd.data[0] = (void *)req;
4897
4898 /* These timings correspond to iwlwifi's UNASSOC scan. */
4899 req->active_dwell = 10;
4900 req->passive_dwell = 110;
4901 req->fragmented_dwell = 44;
4902 req->extended_dwell = 90;
4903 req->max_out_time = 0;
4904 req->suspend_time = 0;
4905
4906 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
4907 req->rx_chain_select = iwm_scan_rx_chain(sc);
4908 req->iter_num = htole32(1);
4909 req->delay = 0;
4910
4911 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
4912 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
4913 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
4914 if (ic->ic_des_esslen == 0)
4915 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
4916 else
4917 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
4918 if (isset(sc->sc_enabled_capa,
4919 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4920 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
4921
4922 req->flags = htole32(IWM_PHY_BAND_24);
4923 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
4924 req->flags |= htole32(IWM_PHY_BAND_5);
4925 req->filter_flags =
4926 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
4927
4928 /* Tx flags 2 GHz. */
4929 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4930 IWM_TX_CMD_FLG_BT_DIS);
4931 req->tx_cmd[0].rate_n_flags =
4932 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
4933 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
4934
4935 /* Tx flags 5 GHz. */
4936 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4937 IWM_TX_CMD_FLG_BT_DIS);
4938 req->tx_cmd[1].rate_n_flags =
4939 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
4940 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
4941
4942 /* Check if we're doing an active directed scan. */
4943 if (ic->ic_des_esslen != 0) {
4944 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4945 req->direct_scan[0].len = ic->ic_des_esslen;
4946 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
4947 ic->ic_des_esslen);
4948 }
4949
4950 req->n_channels = iwm_lmac_scan_fill_channels(sc,
4951 (struct iwm_scan_channel_cfg_lmac *)req->data,
4952 ic->ic_des_esslen != 0);
4953
4954 err = iwm_fill_probe_req(sc,
4955 (struct iwm_scan_probe_req *)(req->data +
4956 (sizeof(struct iwm_scan_channel_cfg_lmac) *
4957 sc->sc_capa_n_scan_channels)));
4958 if (err) {
4959 kmem_free(req, req_len);
4960 return err;
4961 }
4962
4963 /* Specify the scan plan: We'll do one iteration. */
4964 req->schedule[0].iterations = 1;
4965 req->schedule[0].full_scan_mul = 1;
4966
4967 /* Disable EBS. */
4968 req->channel_opt[0].non_ebs_ratio = 1;
4969 req->channel_opt[1].non_ebs_ratio = 1;
4970
4971 err = iwm_send_cmd(sc, &hcmd);
4972 kmem_free(req, req_len);
4973 return err;
4974 }
4975
4976 static int
4977 iwm_config_umac_scan(struct iwm_softc *sc)
4978 {
4979 struct ieee80211com *ic = &sc->sc_ic;
4980 struct iwm_scan_config *scan_config;
4981 int err, nchan;
4982 size_t cmd_size;
4983 struct ieee80211_channel *c;
4984 struct iwm_host_cmd hcmd = {
4985 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
4986 .flags = 0,
4987 };
4988 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
4989 IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
4990 IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
4991 IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
4992 IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
4993 IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
4994 IWM_SCAN_CONFIG_RATE_54M);
4995
4996 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4997
4998 scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
4999 if (scan_config == NULL)
5000 return ENOMEM;
5001
5002 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5003 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5004 scan_config->legacy_rates = htole32(rates |
5005 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5006
5007 /* These timings correspond to iwlwifi's UNASSOC scan. */
5008 scan_config->dwell_active = 10;
5009 scan_config->dwell_passive = 110;
5010 scan_config->dwell_fragmented = 44;
5011 scan_config->dwell_extended = 90;
5012 scan_config->out_of_channel_time = htole32(0);
5013 scan_config->suspend_time = htole32(0);
5014
5015 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5016
5017 scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5018 scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5019 IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5020 IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5021
5022 for (c = &ic->ic_channels[1], nchan = 0;
5023 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5024 nchan < sc->sc_capa_n_scan_channels; c++) {
5025 if (c->ic_flags == 0)
5026 continue;
5027 scan_config->channel_array[nchan++] =
5028 ieee80211_mhz2ieee(c->ic_freq, 0);
5029 }
5030
5031 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5032 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5033 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5034 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5035 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5036 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5037 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5038 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5039 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5040 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5041 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5042
5043 hcmd.data[0] = scan_config;
5044 hcmd.len[0] = cmd_size;
5045
5046 err = iwm_send_cmd(sc, &hcmd);
5047 kmem_free(scan_config, cmd_size);
5048 return err;
5049 }
5050
5051 static int
5052 iwm_umac_scan(struct iwm_softc *sc)
5053 {
5054 struct ieee80211com *ic = &sc->sc_ic;
5055 struct iwm_host_cmd hcmd = {
5056 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5057 .len = { 0, },
5058 .data = { NULL, },
5059 .flags = 0,
5060 };
5061 struct iwm_scan_req_umac *req;
5062 struct iwm_scan_req_umac_tail *tail;
5063 size_t req_len;
5064 int err;
5065
5066 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5067
5068 req_len = sizeof(struct iwm_scan_req_umac) +
5069 (sizeof(struct iwm_scan_channel_cfg_umac) *
5070 sc->sc_capa_n_scan_channels) +
5071 sizeof(struct iwm_scan_req_umac_tail);
5072 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5073 return ENOMEM;
5074 req = kmem_zalloc(req_len, KM_SLEEP);
5075 if (req == NULL)
5076 return ENOMEM;
5077
5078 hcmd.len[0] = (uint16_t)req_len;
5079 hcmd.data[0] = (void *)req;
5080
5081 /* These timings correspond to iwlwifi's UNASSOC scan. */
5082 req->active_dwell = 10;
5083 req->passive_dwell = 110;
5084 req->fragmented_dwell = 44;
5085 req->extended_dwell = 90;
5086 req->max_out_time = 0;
5087 req->suspend_time = 0;
5088
5089 req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5090 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5091
5092 req->n_channels = iwm_umac_scan_fill_channels(sc,
5093 (struct iwm_scan_channel_cfg_umac *)req->data,
5094 ic->ic_des_esslen != 0);
5095
5096 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5097 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5098 IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5099
5100 tail = (struct iwm_scan_req_umac_tail *)(req->data +
5101 sizeof(struct iwm_scan_channel_cfg_umac) *
5102 sc->sc_capa_n_scan_channels);
5103
5104 /* Check if we're doing an active directed scan. */
5105 if (ic->ic_des_esslen != 0) {
5106 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5107 tail->direct_scan[0].len = ic->ic_des_esslen;
5108 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5109 ic->ic_des_esslen);
5110 req->general_flags |=
5111 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5112 } else
5113 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5114
5115 if (isset(sc->sc_enabled_capa,
5116 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5117 req->general_flags |=
5118 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5119
5120 err = iwm_fill_probe_req(sc, &tail->preq);
5121 if (err) {
5122 kmem_free(req, req_len);
5123 return err;
5124 }
5125
5126 /* Specify the scan plan: We'll do one iteration. */
5127 tail->schedule[0].interval = 0;
5128 tail->schedule[0].iter_count = 1;
5129
5130 err = iwm_send_cmd(sc, &hcmd);
5131 kmem_free(req, req_len);
5132 return err;
5133 }
5134
5135 static uint8_t
5136 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5137 {
5138 int i;
5139 uint8_t rval;
5140
5141 for (i = 0; i < rs->rs_nrates; i++) {
5142 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5143 if (rval == iwm_rates[ridx].rate)
5144 return rs->rs_rates[i];
5145 }
5146 return 0;
5147 }
5148
5149 static void
5150 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5151 int *ofdm_rates)
5152 {
5153 struct ieee80211_node *ni = &in->in_ni;
5154 struct ieee80211_rateset *rs = &ni->ni_rates;
5155 int lowest_present_ofdm = 100;
5156 int lowest_present_cck = 100;
5157 uint8_t cck = 0;
5158 uint8_t ofdm = 0;
5159 int i;
5160
5161 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5162 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5163 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5164 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5165 continue;
5166 cck |= (1 << i);
5167 if (lowest_present_cck > i)
5168 lowest_present_cck = i;
5169 }
5170 }
5171 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5172 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5173 continue;
5174 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5175 if (lowest_present_ofdm > i)
5176 lowest_present_ofdm = i;
5177 }
5178
5179 /*
5180 * Now we've got the basic rates as bitmaps in the ofdm and cck
5181 * variables. This isn't sufficient though, as there might not
5182 * be all the right rates in the bitmap. E.g. if the only basic
5183 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5184 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5185 *
5186 * [...] a STA responding to a received frame shall transmit
5187 * its Control Response frame [...] at the highest rate in the
5188 * BSSBasicRateSet parameter that is less than or equal to the
5189 * rate of the immediately previous frame in the frame exchange
5190 * sequence ([...]) and that is of the same modulation class
5191 * ([...]) as the received frame. If no rate contained in the
5192 * BSSBasicRateSet parameter meets these conditions, then the
5193 * control frame sent in response to a received frame shall be
5194 * transmitted at the highest mandatory rate of the PHY that is
5195 * less than or equal to the rate of the received frame, and
5196 * that is of the same modulation class as the received frame.
5197 *
5198 * As a consequence, we need to add all mandatory rates that are
5199 * lower than all of the basic rates to these bitmaps.
5200 */
5201
5202 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5203 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5204 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5205 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5206 /* 6M already there or needed so always add */
5207 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5208
5209 /*
5210 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5211 * Note, however:
5212 * - if no CCK rates are basic, it must be ERP since there must
5213 * be some basic rates at all, so they're OFDM => ERP PHY
5214 * (or we're in 5 GHz, and the cck bitmap will never be used)
5215 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5216 * - if 5.5M is basic, 1M and 2M are mandatory
5217 * - if 2M is basic, 1M is mandatory
5218 * - if 1M is basic, that's the only valid ACK rate.
5219 * As a consequence, it's not as complicated as it sounds, just add
5220 * any lower rates to the ACK rate bitmap.
5221 */
5222 if (IWM_RATE_11M_INDEX < lowest_present_cck)
5223 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5224 if (IWM_RATE_5M_INDEX < lowest_present_cck)
5225 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5226 if (IWM_RATE_2M_INDEX < lowest_present_cck)
5227 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5228 /* 1M already there or needed so always add */
5229 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5230
5231 *cck_rates = cck;
5232 *ofdm_rates = ofdm;
5233 }
5234
5235 static void
5236 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5237 struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5238 {
5239 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5240 struct ieee80211com *ic = &sc->sc_ic;
5241 struct ieee80211_node *ni = ic->ic_bss;
5242 int cck_ack_rates, ofdm_ack_rates;
5243 int i;
5244
5245 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5246 in->in_color));
5247 cmd->action = htole32(action);
5248
5249 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5250 cmd->tsf_id = htole32(IWM_TSF_ID_A);
5251
5252 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5253 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5254
5255 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5256 cmd->cck_rates = htole32(cck_ack_rates);
5257 cmd->ofdm_rates = htole32(ofdm_ack_rates);
5258
5259 cmd->cck_short_preamble
5260 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5261 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5262 cmd->short_slot
5263 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5264 ? IWM_MAC_FLG_SHORT_SLOT : 0);
5265
5266 for (i = 0; i < WME_NUM_AC; i++) {
5267 struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5268 int txf = iwm_ac_to_tx_fifo[i];
5269
5270 cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5271 cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5272 cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5273 cmd->ac[txf].fifos_mask = (1 << txf);
5274 cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5275 }
5276 if (ni->ni_flags & IEEE80211_NODE_QOS)
5277 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5278
5279 #ifndef IEEE80211_NO_HT
5280 if (ni->ni_flags & IEEE80211_NODE_HT) {
5281 enum ieee80211_htprot htprot =
5282 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5283 switch (htprot) {
5284 case IEEE80211_HTPROT_NONE:
5285 break;
5286 case IEEE80211_HTPROT_NONMEMBER:
5287 case IEEE80211_HTPROT_NONHT_MIXED:
5288 cmd->protection_flags |=
5289 htole32(IWM_MAC_PROT_FLG_HT_PROT);
5290 case IEEE80211_HTPROT_20MHZ:
5291 cmd->protection_flags |=
5292 htole32(IWM_MAC_PROT_FLG_HT_PROT |
5293 IWM_MAC_PROT_FLG_FAT_PROT);
5294 break;
5295 default:
5296 break;
5297 }
5298
5299 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5300 }
5301 #endif
5302
5303 if (ic->ic_flags & IEEE80211_F_USEPROT)
5304 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5305
5306 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5307 #undef IWM_EXP2
5308 }
5309
5310 static void
5311 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5312 struct iwm_mac_data_sta *sta, int assoc)
5313 {
5314 struct ieee80211_node *ni = &in->in_ni;
5315 uint32_t dtim_off;
5316 uint64_t tsf;
5317
5318 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5319 tsf = le64toh(ni->ni_tstamp.tsf);
5320
5321 sta->is_assoc = htole32(assoc);
5322 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5323 sta->dtim_tsf = htole64(tsf + dtim_off);
5324 sta->bi = htole32(ni->ni_intval);
5325 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5326 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5327 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5328 sta->listen_interval = htole32(10);
5329 sta->assoc_id = htole32(ni->ni_associd);
5330 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5331 }
5332
5333 static int
5334 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5335 int assoc)
5336 {
5337 struct ieee80211_node *ni = &in->in_ni;
5338 struct iwm_mac_ctx_cmd cmd;
5339
5340 memset(&cmd, 0, sizeof(cmd));
5341
5342 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5343
5344 /* Allow beacons to pass through as long as we are not associated or we
5345 * do not have dtim period information */
5346 if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5347 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5348 else
5349 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5350
5351 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5352 }
5353
5354 #define IWM_MISSED_BEACONS_THRESHOLD 8
5355
5356 static void
5357 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5358 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5359 {
5360 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5361
5362 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5363 le32toh(mb->mac_id),
5364 le32toh(mb->consec_missed_beacons),
5365 le32toh(mb->consec_missed_beacons_since_last_rx),
5366 le32toh(mb->num_recvd_beacons),
5367 le32toh(mb->num_expected_beacons)));
5368
5369 /*
5370 * TODO: the threshold should be adjusted based on latency conditions,
5371 * and/or in case of a CS flow on one of the other AP vifs.
5372 */
5373 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5374 IWM_MISSED_BEACONS_THRESHOLD)
5375 ieee80211_beacon_miss(&sc->sc_ic);
5376 }
5377
5378 static int
5379 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5380 {
5381 struct iwm_time_quota_cmd cmd;
5382 int i, idx, num_active_macs, quota, quota_rem;
5383 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5384 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5385 uint16_t id;
5386
5387 memset(&cmd, 0, sizeof(cmd));
5388
5389 /* currently, PHY ID == binding ID */
5390 if (in) {
5391 id = in->in_phyctxt->id;
5392 KASSERT(id < IWM_MAX_BINDINGS);
5393 colors[id] = in->in_phyctxt->color;
5394
5395 if (1)
5396 n_ifs[id] = 1;
5397 }
5398
5399 /*
5400 * The FW's scheduling session consists of
5401 * IWM_MAX_QUOTA fragments. Divide these fragments
5402 * equally between all the bindings that require quota
5403 */
5404 num_active_macs = 0;
5405 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5406 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5407 num_active_macs += n_ifs[i];
5408 }
5409
5410 quota = 0;
5411 quota_rem = 0;
5412 if (num_active_macs) {
5413 quota = IWM_MAX_QUOTA / num_active_macs;
5414 quota_rem = IWM_MAX_QUOTA % num_active_macs;
5415 }
5416
5417 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5418 if (colors[i] < 0)
5419 continue;
5420
5421 cmd.quotas[idx].id_and_color =
5422 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5423
5424 if (n_ifs[i] <= 0) {
5425 cmd.quotas[idx].quota = htole32(0);
5426 cmd.quotas[idx].max_duration = htole32(0);
5427 } else {
5428 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5429 cmd.quotas[idx].max_duration = htole32(0);
5430 }
5431 idx++;
5432 }
5433
5434 /* Give the remainder of the session to the first binding */
5435 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5436
5437 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5438 }
5439
5440 static int
5441 iwm_auth(struct iwm_softc *sc)
5442 {
5443 struct ieee80211com *ic = &sc->sc_ic;
5444 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5445 uint32_t duration;
5446 int err;
5447
5448 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5449 if (err)
5450 return err;
5451
5452 err = iwm_allow_mcast(sc);
5453 if (err)
5454 return err;
5455
5456 sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5457 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5458 IWM_FW_CTXT_ACTION_MODIFY, 0);
5459 if (err)
5460 return err;
5461 in->in_phyctxt = &sc->sc_phyctxt[0];
5462
5463 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5464 if (err) {
5465 aprint_error_dev(sc->sc_dev,
5466 "could not add MAC context (error %d)\n", err);
5467 return err;
5468 }
5469
5470 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5471 if (err)
5472 return err;
5473
5474 err = iwm_add_sta_cmd(sc, in, 0);
5475 if (err)
5476 return err;
5477
5478 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5479 if (err) {
5480 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5481 return err;
5482 }
5483
5484 /*
5485 * Prevent the FW from wandering off channel during association
5486 * by "protecting" the session with a time event.
5487 */
5488 if (in->in_ni.ni_intval)
5489 duration = in->in_ni.ni_intval * 2;
5490 else
5491 duration = IEEE80211_DUR_TU;
5492 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5493 DELAY(100);
5494
5495 return 0;
5496 }
5497
5498 static int
5499 iwm_assoc(struct iwm_softc *sc)
5500 {
5501 struct ieee80211com *ic = &sc->sc_ic;
5502 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5503 int err;
5504
5505 err = iwm_add_sta_cmd(sc, in, 1);
5506 if (err)
5507 return err;
5508
5509 return 0;
5510 }
5511
5512 static struct ieee80211_node *
5513 iwm_node_alloc(struct ieee80211_node_table *nt)
5514 {
5515 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5516 }
5517
5518 static void
5519 iwm_calib_timeout(void *arg)
5520 {
5521 struct iwm_softc *sc = arg;
5522 struct ieee80211com *ic = &sc->sc_ic;
5523 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5524 #ifndef IEEE80211_NO_HT
5525 struct ieee80211_node *ni = &in->in_ni;
5526 int otxrate;
5527 #endif
5528 int s;
5529
5530 s = splnet();
5531 if ((ic->ic_fixed_rate == -1
5532 #ifndef IEEE80211_NO_HT
5533 || ic->ic_fixed_mcs == -1
5534 #endif
5535 ) &&
5536 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5537 #ifndef IEEE80211_NO_HT
5538 if (ni->ni_flags & IEEE80211_NODE_HT)
5539 otxrate = ni->ni_txmcs;
5540 else
5541 otxrate = ni->ni_txrate;
5542 #endif
5543 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5544
5545 #ifndef IEEE80211_NO_HT
5546 /*
5547 * If AMRR has chosen a new TX rate we must update
5548 * the firwmare's LQ rate table from process context.
5549 */
5550 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5551 otxrate != ni->ni_txmcs)
5552 softint_schedule(sc->setrates_task);
5553 else if (otxrate != ni->ni_txrate)
5554 softint_schedule(sc->setrates_task);
5555 #endif
5556 }
5557 splx(s);
5558
5559 callout_schedule(&sc->sc_calib_to, mstohz(500));
5560 }
5561
5562 #ifndef IEEE80211_NO_HT
5563 static void
5564 iwm_setrates_task(void *arg)
5565 {
5566 struct iwm_softc *sc = arg;
5567 struct ieee80211com *ic = &sc->sc_ic;
5568 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5569
5570 /* Update rates table based on new TX rate determined by AMRR. */
5571 iwm_setrates(in);
5572 }
5573
5574 static int
5575 iwm_setrates(struct iwm_node *in)
5576 {
5577 struct ieee80211_node *ni = &in->in_ni;
5578 struct ieee80211com *ic = ni->ni_ic;
5579 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5580 struct iwm_lq_cmd *lq = &in->in_lq;
5581 struct ieee80211_rateset *rs = &ni->ni_rates;
5582 int i, j, ridx, ridx_min, tab = 0;
5583 #ifndef IEEE80211_NO_HT
5584 int sgi_ok;
5585 #endif
5586 struct iwm_host_cmd cmd = {
5587 .id = IWM_LQ_CMD,
5588 .len = { sizeof(in->in_lq), },
5589 };
5590
5591 memset(lq, 0, sizeof(*lq));
5592 lq->sta_id = IWM_STATION_ID;
5593
5594 if (ic->ic_flags & IEEE80211_F_USEPROT)
5595 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5596
5597 #ifndef IEEE80211_NO_HT
5598 sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5599 (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5600 #endif
5601
5602
5603 /*
5604 * Fill the LQ rate selection table with legacy and/or HT rates
5605 * in descending order, i.e. with the node's current TX rate first.
5606 * In cases where throughput of an HT rate corresponds to a legacy
5607 * rate it makes no sense to add both. We rely on the fact that
5608 * iwm_rates is laid out such that equivalent HT/legacy rates share
5609 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5610 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5611 */
5612 j = 0;
5613 ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5614 IWM_RIDX_OFDM : IWM_RIDX_CCK;
5615 for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5616 if (j >= __arraycount(lq->rs_table))
5617 break;
5618 tab = 0;
5619 #ifndef IEEE80211_NO_HT
5620 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5621 iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5622 for (i = ni->ni_txmcs; i >= 0; i--) {
5623 if (isclr(ni->ni_rxmcs, i))
5624 continue;
5625 if (ridx == iwm_mcs2ridx[i]) {
5626 tab = iwm_rates[ridx].ht_plcp;
5627 tab |= IWM_RATE_MCS_HT_MSK;
5628 if (sgi_ok)
5629 tab |= IWM_RATE_MCS_SGI_MSK;
5630 break;
5631 }
5632 }
5633 }
5634 #endif
5635 if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5636 for (i = ni->ni_txrate; i >= 0; i--) {
5637 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5638 IEEE80211_RATE_VAL)) {
5639 tab = iwm_rates[ridx].plcp;
5640 break;
5641 }
5642 }
5643 }
5644
5645 if (tab == 0)
5646 continue;
5647
5648 tab |= 1 << IWM_RATE_MCS_ANT_POS;
5649 if (IWM_RIDX_IS_CCK(ridx))
5650 tab |= IWM_RATE_MCS_CCK_MSK;
5651 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5652 lq->rs_table[j++] = htole32(tab);
5653 }
5654
5655 /* Fill the rest with the lowest possible rate */
5656 i = j > 0 ? j - 1 : 0;
5657 while (j < __arraycount(lq->rs_table))
5658 lq->rs_table[j++] = lq->rs_table[i];
5659
5660 lq->single_stream_ant_msk = IWM_ANT_A;
5661 lq->dual_stream_ant_msk = IWM_ANT_AB;
5662
5663 lq->agg_time_limit = htole16(4000); /* 4ms */
5664 lq->agg_disable_start_th = 3;
5665 #ifdef notyet
5666 lq->agg_frame_cnt_limit = 0x3f;
5667 #else
5668 lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5669 #endif
5670
5671 cmd.data[0] = &in->in_lq;
5672 return iwm_send_cmd(sc, &cmd);
5673 }
5674 #endif
5675
5676 static int
5677 iwm_media_change(struct ifnet *ifp)
5678 {
5679 struct iwm_softc *sc = ifp->if_softc;
5680 struct ieee80211com *ic = &sc->sc_ic;
5681 uint8_t rate, ridx;
5682 int err;
5683
5684 err = ieee80211_media_change(ifp);
5685 if (err != ENETRESET)
5686 return err;
5687
5688 #ifndef IEEE80211_NO_HT
5689 if (ic->ic_fixed_mcs != -1)
5690 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5691 else
5692 #endif
5693 if (ic->ic_fixed_rate != -1) {
5694 rate = ic->ic_sup_rates[ic->ic_curmode].
5695 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5696 /* Map 802.11 rate to HW rate index. */
5697 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5698 if (iwm_rates[ridx].rate == rate)
5699 break;
5700 sc->sc_fixed_ridx = ridx;
5701 }
5702
5703 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5704 (IFF_UP | IFF_RUNNING)) {
5705 iwm_stop(ifp, 0);
5706 err = iwm_init(ifp);
5707 }
5708 return err;
5709 }
5710
5711 static void
5712 iwm_newstate_cb(struct work *wk, void *v)
5713 {
5714 struct iwm_softc *sc = v;
5715 struct ieee80211com *ic = &sc->sc_ic;
5716 struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
5717 enum ieee80211_state nstate = iwmns->ns_nstate;
5718 enum ieee80211_state ostate = ic->ic_state;
5719 int generation = iwmns->ns_generation;
5720 struct iwm_node *in;
5721 int arg = iwmns->ns_arg;
5722 int err;
5723
5724 kmem_free(iwmns, sizeof(*iwmns));
5725
5726 DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
5727 if (sc->sc_generation != generation) {
5728 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5729 if (nstate == IEEE80211_S_INIT) {
5730 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5731 sc->sc_newstate(ic, nstate, arg);
5732 }
5733 return;
5734 }
5735
5736 DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
5737 ieee80211_state_name[nstate]));
5738
5739 if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5740 iwm_led_blink_stop(sc);
5741
5742 if (ostate == IEEE80211_S_RUN && nstate != ostate)
5743 iwm_disable_beacon_filter(sc);
5744
5745 /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5746 /* XXX Is there a way to switch states without a full reset? */
5747 if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5748 iwm_stop_device(sc);
5749 iwm_init_hw(sc);
5750
5751 /*
5752 * Upon receiving a deauth frame from AP the net80211 stack
5753 * puts the driver into AUTH state. This will fail with this
5754 * driver so bring the FSM from RUN to SCAN in this case.
5755 */
5756 if (nstate == IEEE80211_S_SCAN ||
5757 nstate == IEEE80211_S_AUTH ||
5758 nstate == IEEE80211_S_ASSOC) {
5759 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5760 /* Always pass arg as -1 since we can't Tx right now. */
5761 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5762 DPRINTF(("Going INIT->SCAN\n"));
5763 nstate = IEEE80211_S_SCAN;
5764 }
5765 }
5766
5767 switch (nstate) {
5768 case IEEE80211_S_INIT:
5769 break;
5770
5771 case IEEE80211_S_SCAN:
5772 if (ostate == nstate &&
5773 ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
5774 return;
5775 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5776 err = iwm_umac_scan(sc);
5777 else
5778 err = iwm_lmac_scan(sc);
5779 if (err) {
5780 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5781 return;
5782 }
5783 SET(sc->sc_flags, IWM_FLAG_SCANNING);
5784 ic->ic_state = nstate;
5785 iwm_led_blink_start(sc);
5786 return;
5787
5788 case IEEE80211_S_AUTH:
5789 err = iwm_auth(sc);
5790 if (err) {
5791 DPRINTF(("%s: could not move to auth state: %d\n",
5792 DEVNAME(sc), err));
5793 return;
5794 }
5795 break;
5796
5797 case IEEE80211_S_ASSOC:
5798 err = iwm_assoc(sc);
5799 if (err) {
5800 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5801 err));
5802 return;
5803 }
5804 break;
5805
5806 case IEEE80211_S_RUN:
5807 in = (struct iwm_node *)ic->ic_bss;
5808
5809 /* We have now been assigned an associd by the AP. */
5810 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5811 if (err) {
5812 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5813 return;
5814 }
5815
5816 err = iwm_power_update_device(sc);
5817 if (err) {
5818 aprint_error_dev(sc->sc_dev,
5819 "could send power command (error %d)\n", err);
5820 return;
5821 }
5822 #ifdef notyet
5823 /*
5824 * Disabled for now. Default beacon filter settings
5825 * prevent net80211 from getting ERP and HT protection
5826 * updates from beacons.
5827 */
5828 err = iwm_enable_beacon_filter(sc, in);
5829 if (err) {
5830 aprint_error_dev(sc->sc_dev,
5831 "could not enable beacon filter\n");
5832 return;
5833 }
5834 #endif
5835 err = iwm_power_mac_update_mode(sc, in);
5836 if (err) {
5837 aprint_error_dev(sc->sc_dev,
5838 "could not update MAC power (error %d)\n", err);
5839 return;
5840 }
5841
5842 err = iwm_update_quotas(sc, in);
5843 if (err) {
5844 aprint_error_dev(sc->sc_dev,
5845 "could not update quotas (error %d)\n", err);
5846 return;
5847 }
5848
5849 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5850
5851 /* Start at lowest available bit-rate, AMRR will raise. */
5852 in->in_ni.ni_txrate = 0;
5853 #ifndef IEEE80211_NO_HT
5854 in->in_ni.ni_txmcs = 0;
5855 iwm_setrates(in);
5856 #endif
5857
5858 callout_schedule(&sc->sc_calib_to, mstohz(500));
5859 iwm_led_enable(sc);
5860 break;
5861
5862 default:
5863 break;
5864 }
5865
5866 sc->sc_newstate(ic, nstate, arg);
5867 }
5868
5869 static int
5870 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5871 {
5872 struct iwm_newstate_state *iwmns;
5873 struct ifnet *ifp = IC2IFP(ic);
5874 struct iwm_softc *sc = ifp->if_softc;
5875
5876 callout_stop(&sc->sc_calib_to);
5877
5878 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5879 if (!iwmns) {
5880 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5881 return ENOMEM;
5882 }
5883
5884 iwmns->ns_nstate = nstate;
5885 iwmns->ns_arg = arg;
5886 iwmns->ns_generation = sc->sc_generation;
5887
5888 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5889
5890 return 0;
5891 }
5892
5893 static void
5894 iwm_endscan(struct iwm_softc *sc)
5895 {
5896 struct ieee80211com *ic = &sc->sc_ic;
5897
5898 DPRINTF(("scan ended\n"));
5899
5900 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
5901 ieee80211_end_scan(ic);
5902 }
5903
5904 /*
5905 * Aging and idle timeouts for the different possible scenarios
5906 * in default configuration
5907 */
5908 static const uint32_t
5909 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5910 {
5911 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
5912 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
5913 },
5914 {
5915 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
5916 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
5917 },
5918 {
5919 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
5920 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
5921 },
5922 {
5923 htole32(IWM_SF_BA_AGING_TIMER_DEF),
5924 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
5925 },
5926 {
5927 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
5928 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
5929 },
5930 };
5931
5932 /*
5933 * Aging and idle timeouts for the different possible scenarios
5934 * in single BSS MAC configuration.
5935 */
5936 static const uint32_t
5937 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5938 {
5939 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
5940 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
5941 },
5942 {
5943 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
5944 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
5945 },
5946 {
5947 htole32(IWM_SF_MCAST_AGING_TIMER),
5948 htole32(IWM_SF_MCAST_IDLE_TIMER)
5949 },
5950 {
5951 htole32(IWM_SF_BA_AGING_TIMER),
5952 htole32(IWM_SF_BA_IDLE_TIMER)
5953 },
5954 {
5955 htole32(IWM_SF_TX_RE_AGING_TIMER),
5956 htole32(IWM_SF_TX_RE_IDLE_TIMER)
5957 },
5958 };
5959
5960 static void
5961 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
5962 struct ieee80211_node *ni)
5963 {
5964 int i, j, watermark;
5965
5966 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
5967
5968 /*
5969 * If we are in association flow - check antenna configuration
5970 * capabilities of the AP station, and choose the watermark accordingly.
5971 */
5972 if (ni) {
5973 #ifndef IEEE80211_NO_HT
5974 if (ni->ni_flags & IEEE80211_NODE_HT) {
5975 #ifdef notyet
5976 if (ni->ni_rxmcs[2] != 0)
5977 watermark = IWM_SF_W_MARK_MIMO3;
5978 else if (ni->ni_rxmcs[1] != 0)
5979 watermark = IWM_SF_W_MARK_MIMO2;
5980 else
5981 #endif
5982 watermark = IWM_SF_W_MARK_SISO;
5983 } else
5984 #endif
5985 watermark = IWM_SF_W_MARK_LEGACY;
5986 /* default watermark value for unassociated mode. */
5987 } else {
5988 watermark = IWM_SF_W_MARK_MIMO2;
5989 }
5990 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
5991
5992 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
5993 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
5994 sf_cmd->long_delay_timeouts[i][j] =
5995 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
5996 }
5997 }
5998
5999 if (ni) {
6000 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6001 sizeof(iwm_sf_full_timeout));
6002 } else {
6003 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6004 sizeof(iwm_sf_full_timeout_def));
6005 }
6006 }
6007
6008 static int
6009 iwm_sf_config(struct iwm_softc *sc, int new_state)
6010 {
6011 struct ieee80211com *ic = &sc->sc_ic;
6012 struct iwm_sf_cfg_cmd sf_cmd = {
6013 .state = htole32(IWM_SF_FULL_ON),
6014 };
6015
6016 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6017 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6018
6019 switch (new_state) {
6020 case IWM_SF_UNINIT:
6021 case IWM_SF_INIT_OFF:
6022 iwm_fill_sf_command(sc, &sf_cmd, NULL);
6023 break;
6024 case IWM_SF_FULL_ON:
6025 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6026 break;
6027 default:
6028 return EINVAL;
6029 }
6030
6031 return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6032 sizeof(sf_cmd), &sf_cmd);
6033 }
6034
6035 static int
6036 iwm_send_bt_init_conf(struct iwm_softc *sc)
6037 {
6038 struct iwm_bt_coex_cmd bt_cmd;
6039
6040 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6041 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6042
6043 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6044 }
6045
6046 static int
6047 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6048 {
6049 struct iwm_mcc_update_cmd mcc_cmd;
6050 struct iwm_host_cmd hcmd = {
6051 .id = IWM_MCC_UPDATE_CMD,
6052 .flags = IWM_CMD_WANT_SKB,
6053 .data = { &mcc_cmd },
6054 };
6055 int resp_v2 = isset(sc->sc_enabled_capa,
6056 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6057 int err;
6058
6059 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6060 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6061 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6062 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6063 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6064 else
6065 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6066
6067 if (resp_v2)
6068 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6069 else
6070 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6071
6072 err = iwm_send_cmd(sc, &hcmd);
6073 if (err)
6074 return err;
6075
6076 iwm_free_resp(sc, &hcmd);
6077
6078 return 0;
6079 }
6080
6081 static void
6082 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6083 {
6084 struct iwm_host_cmd cmd = {
6085 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6086 .len = { sizeof(uint32_t), },
6087 .data = { &backoff, },
6088 };
6089
6090 iwm_send_cmd(sc, &cmd);
6091 }
6092
6093 static int
6094 iwm_init_hw(struct iwm_softc *sc)
6095 {
6096 struct ieee80211com *ic = &sc->sc_ic;
6097 int err, i, ac;
6098
6099 err = iwm_preinit(sc);
6100 if (err)
6101 return err;
6102
6103 err = iwm_start_hw(sc);
6104 if (err) {
6105 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6106 return err;
6107 }
6108
6109 err = iwm_run_init_mvm_ucode(sc, 0);
6110 if (err)
6111 return err;
6112
6113 /* Should stop and start HW since INIT image just loaded. */
6114 iwm_stop_device(sc);
6115 err = iwm_start_hw(sc);
6116 if (err) {
6117 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6118 return err;
6119 }
6120
6121 /* Restart, this time with the regular firmware */
6122 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6123 if (err) {
6124 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
6125 goto err;
6126 }
6127
6128 err = iwm_send_bt_init_conf(sc);
6129 if (err) {
6130 aprint_error_dev(sc->sc_dev,
6131 "could not init bt coex (error %d)\n", err);
6132 goto err;
6133 }
6134
6135 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6136 if (err) {
6137 aprint_error_dev(sc->sc_dev,
6138 "could not init tx ant config (error %d)\n", err);
6139 goto err;
6140 }
6141
6142 /* Send phy db control command and then phy db calibration*/
6143 err = iwm_send_phy_db_data(sc);
6144 if (err) {
6145 aprint_error_dev(sc->sc_dev,
6146 "could not init phy db (error %d)\n", err);
6147 goto err;
6148 }
6149
6150 err = iwm_send_phy_cfg_cmd(sc);
6151 if (err) {
6152 aprint_error_dev(sc->sc_dev,
6153 "could not send phy config (error %d)\n", err);
6154 goto err;
6155 }
6156
6157 /* Add auxiliary station for scanning */
6158 err = iwm_add_aux_sta(sc);
6159 if (err) {
6160 aprint_error_dev(sc->sc_dev,
6161 "could not add aux station (error %d)\n", err);
6162 goto err;
6163 }
6164
6165 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6166 /*
6167 * The channel used here isn't relevant as it's
6168 * going to be overwritten in the other flows.
6169 * For now use the first channel we have.
6170 */
6171 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6172 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6173 IWM_FW_CTXT_ACTION_ADD, 0);
6174 if (err) {
6175 aprint_error_dev(sc->sc_dev,
6176 "could not add phy context %d (error %d)\n",
6177 i, err);
6178 goto err;
6179 }
6180 }
6181
6182 /* Initialize tx backoffs to the minimum. */
6183 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6184 iwm_tt_tx_backoff(sc, 0);
6185
6186 err = iwm_power_update_device(sc);
6187 if (err) {
6188 aprint_error_dev(sc->sc_dev,
6189 "could send power command (error %d)\n", err);
6190 goto err;
6191 }
6192
6193 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
6194 err = iwm_send_update_mcc_cmd(sc, "ZZ");
6195 if (err) {
6196 aprint_error_dev(sc->sc_dev,
6197 "could not init LAR (error %d)\n", err);
6198 goto err;
6199 }
6200 }
6201
6202 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6203 err = iwm_config_umac_scan(sc);
6204 if (err) {
6205 aprint_error_dev(sc->sc_dev,
6206 "could not configure scan (error %d)\n", err);
6207 goto err;
6208 }
6209 }
6210
6211 for (ac = 0; ac < WME_NUM_AC; ac++) {
6212 err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6213 iwm_ac_to_tx_fifo[ac]);
6214 if (err) {
6215 aprint_error_dev(sc->sc_dev,
6216 "could not enable Tx queue %d (error %d)\n",
6217 i, err);
6218 goto err;
6219 }
6220 }
6221
6222 err = iwm_disable_beacon_filter(sc);
6223 if (err) {
6224 aprint_error_dev(sc->sc_dev,
6225 "could not disable beacon filter (error %d)\n", err);
6226 goto err;
6227 }
6228
6229 return 0;
6230
6231 err:
6232 iwm_stop_device(sc);
6233 return err;
6234 }
6235
6236 /* Allow multicast from our BSSID. */
6237 static int
6238 iwm_allow_mcast(struct iwm_softc *sc)
6239 {
6240 struct ieee80211com *ic = &sc->sc_ic;
6241 struct ieee80211_node *ni = ic->ic_bss;
6242 struct iwm_mcast_filter_cmd *cmd;
6243 size_t size;
6244 int err;
6245
6246 size = roundup(sizeof(*cmd), 4);
6247 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6248 if (cmd == NULL)
6249 return ENOMEM;
6250 cmd->filter_own = 1;
6251 cmd->port_id = 0;
6252 cmd->count = 0;
6253 cmd->pass_all = 1;
6254 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6255
6256 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6257 kmem_intr_free(cmd, size);
6258 return err;
6259 }
6260
6261 static int
6262 iwm_init(struct ifnet *ifp)
6263 {
6264 struct iwm_softc *sc = ifp->if_softc;
6265 int err;
6266
6267 if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6268 return 0;
6269
6270 sc->sc_generation++;
6271 sc->sc_flags &= ~IWM_FLAG_STOPPED;
6272
6273 err = iwm_init_hw(sc);
6274 if (err) {
6275 iwm_stop(ifp, 1);
6276 return err;
6277 }
6278
6279 ifp->if_flags &= ~IFF_OACTIVE;
6280 ifp->if_flags |= IFF_RUNNING;
6281
6282 ieee80211_begin_scan(&sc->sc_ic, 0);
6283 SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6284
6285 return 0;
6286 }
6287
6288 static void
6289 iwm_start(struct ifnet *ifp)
6290 {
6291 struct iwm_softc *sc = ifp->if_softc;
6292 struct ieee80211com *ic = &sc->sc_ic;
6293 struct ieee80211_node *ni;
6294 struct ether_header *eh;
6295 struct mbuf *m;
6296 int ac;
6297
6298 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6299 return;
6300
6301 for (;;) {
6302 /* why isn't this done per-queue? */
6303 if (sc->qfullmsk != 0) {
6304 ifp->if_flags |= IFF_OACTIVE;
6305 break;
6306 }
6307
6308 /* need to send management frames even if we're not RUNning */
6309 IF_DEQUEUE(&ic->ic_mgtq, m);
6310 if (m) {
6311 ni = M_GETCTX(m, struct ieee80211_node *);
6312 M_CLEARCTX(m);
6313 ac = WME_AC_BE;
6314 goto sendit;
6315 }
6316 if (ic->ic_state != IEEE80211_S_RUN) {
6317 break;
6318 }
6319
6320 IFQ_DEQUEUE(&ifp->if_snd, m);
6321 if (m == NULL)
6322 break;
6323
6324 if (m->m_len < sizeof (*eh) &&
6325 (m = m_pullup(m, sizeof (*eh))) == NULL) {
6326 ifp->if_oerrors++;
6327 continue;
6328 }
6329
6330 eh = mtod(m, struct ether_header *);
6331 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6332 if (ni == NULL) {
6333 m_freem(m);
6334 ifp->if_oerrors++;
6335 continue;
6336 }
6337
6338 /* classify mbuf so we can find which tx ring to use */
6339 if (ieee80211_classify(ic, m, ni) != 0) {
6340 m_freem(m);
6341 ieee80211_free_node(ni);
6342 ifp->if_oerrors++;
6343 continue;
6344 }
6345
6346 /* No QoS encapsulation for EAPOL frames. */
6347 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6348 M_WME_GETAC(m) : WME_AC_BE;
6349
6350 bpf_mtap(ifp, m);
6351
6352 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6353 ieee80211_free_node(ni);
6354 ifp->if_oerrors++;
6355 continue;
6356 }
6357
6358 sendit:
6359 bpf_mtap3(ic->ic_rawbpf, m);
6360
6361 if (iwm_tx(sc, m, ni, ac) != 0) {
6362 ieee80211_free_node(ni);
6363 ifp->if_oerrors++;
6364 continue;
6365 }
6366
6367 if (ifp->if_flags & IFF_UP) {
6368 sc->sc_tx_timer = 15;
6369 ifp->if_timer = 1;
6370 }
6371 }
6372 }
6373
6374 static void
6375 iwm_stop(struct ifnet *ifp, int disable)
6376 {
6377 struct iwm_softc *sc = ifp->if_softc;
6378 struct ieee80211com *ic = &sc->sc_ic;
6379 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6380
6381 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6382 sc->sc_flags |= IWM_FLAG_STOPPED;
6383 sc->sc_generation++;
6384 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6385
6386 if (in)
6387 in->in_phyctxt = NULL;
6388
6389 if (ic->ic_state != IEEE80211_S_INIT)
6390 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6391
6392 callout_stop(&sc->sc_calib_to);
6393 iwm_led_blink_stop(sc);
6394 ifp->if_timer = sc->sc_tx_timer = 0;
6395 iwm_stop_device(sc);
6396 }
6397
6398 static void
6399 iwm_watchdog(struct ifnet *ifp)
6400 {
6401 struct iwm_softc *sc = ifp->if_softc;
6402
6403 ifp->if_timer = 0;
6404 if (sc->sc_tx_timer > 0) {
6405 if (--sc->sc_tx_timer == 0) {
6406 aprint_error_dev(sc->sc_dev, "device timeout\n");
6407 #ifdef IWM_DEBUG
6408 iwm_nic_error(sc);
6409 #endif
6410 ifp->if_flags &= ~IFF_UP;
6411 iwm_stop(ifp, 1);
6412 ifp->if_oerrors++;
6413 return;
6414 }
6415 ifp->if_timer = 1;
6416 }
6417
6418 ieee80211_watchdog(&sc->sc_ic);
6419 }
6420
6421 static int
6422 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6423 {
6424 struct iwm_softc *sc = ifp->if_softc;
6425 struct ieee80211com *ic = &sc->sc_ic;
6426 const struct sockaddr *sa;
6427 int s, err = 0;
6428
6429 s = splnet();
6430
6431 switch (cmd) {
6432 case SIOCSIFADDR:
6433 ifp->if_flags |= IFF_UP;
6434 /* FALLTHROUGH */
6435 case SIOCSIFFLAGS:
6436 err = ifioctl_common(ifp, cmd, data);
6437 if (err)
6438 break;
6439 if (ifp->if_flags & IFF_UP) {
6440 if (!(ifp->if_flags & IFF_RUNNING)) {
6441 err = iwm_init(ifp);
6442 if (err)
6443 ifp->if_flags &= ~IFF_UP;
6444 }
6445 } else {
6446 if (ifp->if_flags & IFF_RUNNING)
6447 iwm_stop(ifp, 1);
6448 }
6449 break;
6450
6451 case SIOCADDMULTI:
6452 case SIOCDELMULTI:
6453 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6454 err = ENXIO;
6455 break;
6456 }
6457 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6458 err = (cmd == SIOCADDMULTI) ?
6459 ether_addmulti(sa, &sc->sc_ec) :
6460 ether_delmulti(sa, &sc->sc_ec);
6461 if (err == ENETRESET)
6462 err = 0;
6463 break;
6464
6465 default:
6466 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6467 err = ether_ioctl(ifp, cmd, data);
6468 break;
6469 }
6470 err = ieee80211_ioctl(ic, cmd, data);
6471 break;
6472 }
6473
6474 if (err == ENETRESET) {
6475 err = 0;
6476 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6477 (IFF_UP | IFF_RUNNING)) {
6478 iwm_stop(ifp, 0);
6479 err = iwm_init(ifp);
6480 }
6481 }
6482
6483 splx(s);
6484 return err;
6485 }
6486
6487 /*
6488 * Note: This structure is read from the device with IO accesses,
6489 * and the reading already does the endian conversion. As it is
6490 * read with uint32_t-sized accesses, any members with a different size
6491 * need to be ordered correctly though!
6492 */
6493 struct iwm_error_event_table {
6494 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6495 uint32_t error_id; /* type of error */
6496 uint32_t trm_hw_status0; /* TRM HW status */
6497 uint32_t trm_hw_status1; /* TRM HW status */
6498 uint32_t blink2; /* branch link */
6499 uint32_t ilink1; /* interrupt link */
6500 uint32_t ilink2; /* interrupt link */
6501 uint32_t data1; /* error-specific data */
6502 uint32_t data2; /* error-specific data */
6503 uint32_t data3; /* error-specific data */
6504 uint32_t bcon_time; /* beacon timer */
6505 uint32_t tsf_low; /* network timestamp function timer */
6506 uint32_t tsf_hi; /* network timestamp function timer */
6507 uint32_t gp1; /* GP1 timer register */
6508 uint32_t gp2; /* GP2 timer register */
6509 uint32_t fw_rev_type; /* firmware revision type */
6510 uint32_t major; /* uCode version major */
6511 uint32_t minor; /* uCode version minor */
6512 uint32_t hw_ver; /* HW Silicon version */
6513 uint32_t brd_ver; /* HW board version */
6514 uint32_t log_pc; /* log program counter */
6515 uint32_t frame_ptr; /* frame pointer */
6516 uint32_t stack_ptr; /* stack pointer */
6517 uint32_t hcmd; /* last host command header */
6518 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
6519 * rxtx_flag */
6520 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
6521 * host_flag */
6522 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
6523 * enc_flag */
6524 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
6525 * time_flag */
6526 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
6527 * wico interrupt */
6528 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
6529 uint32_t wait_event; /* wait event() caller address */
6530 uint32_t l2p_control; /* L2pControlField */
6531 uint32_t l2p_duration; /* L2pDurationField */
6532 uint32_t l2p_mhvalid; /* L2pMhValidBits */
6533 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
6534 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
6535 * (LMPM_PMG_SEL) */
6536 uint32_t u_timestamp; /* indicate when the date and time of the
6537 * compilation */
6538 uint32_t flow_handler; /* FH read/write pointers, RX credit */
6539 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6540
6541 /*
6542 * UMAC error struct - relevant starting from family 8000 chip.
6543 * Note: This structure is read from the device with IO accesses,
6544 * and the reading already does the endian conversion. As it is
6545 * read with u32-sized accesses, any members with a different size
6546 * need to be ordered correctly though!
6547 */
6548 struct iwm_umac_error_event_table {
6549 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6550 uint32_t error_id; /* type of error */
6551 uint32_t blink1; /* branch link */
6552 uint32_t blink2; /* branch link */
6553 uint32_t ilink1; /* interrupt link */
6554 uint32_t ilink2; /* interrupt link */
6555 uint32_t data1; /* error-specific data */
6556 uint32_t data2; /* error-specific data */
6557 uint32_t data3; /* error-specific data */
6558 uint32_t umac_major;
6559 uint32_t umac_minor;
6560 uint32_t frame_pointer; /* core register 27 */
6561 uint32_t stack_pointer; /* core register 28 */
6562 uint32_t cmd_header; /* latest host cmd sent to UMAC */
6563 uint32_t nic_isr_pref; /* ISR status register */
6564 } __packed;
6565
6566 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6567 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6568
6569 #ifdef IWM_DEBUG
6570 static const struct {
6571 const char *name;
6572 uint8_t num;
6573 } advanced_lookup[] = {
6574 { "NMI_INTERRUPT_WDG", 0x34 },
6575 { "SYSASSERT", 0x35 },
6576 { "UCODE_VERSION_MISMATCH", 0x37 },
6577 { "BAD_COMMAND", 0x38 },
6578 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6579 { "FATAL_ERROR", 0x3D },
6580 { "NMI_TRM_HW_ERR", 0x46 },
6581 { "NMI_INTERRUPT_TRM", 0x4C },
6582 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6583 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6584 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6585 { "NMI_INTERRUPT_HOST", 0x66 },
6586 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6587 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6588 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6589 { "ADVANCED_SYSASSERT", 0 },
6590 };
6591
6592 static const char *
6593 iwm_desc_lookup(uint32_t num)
6594 {
6595 int i;
6596
6597 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6598 if (advanced_lookup[i].num == num)
6599 return advanced_lookup[i].name;
6600
6601 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6602 return advanced_lookup[i].name;
6603 }
6604
6605 /*
6606 * Support for dumping the error log seemed like a good idea ...
6607 * but it's mostly hex junk and the only sensible thing is the
6608 * hw/ucode revision (which we know anyway). Since it's here,
6609 * I'll just leave it in, just in case e.g. the Intel guys want to
6610 * help us decipher some "ADVANCED_SYSASSERT" later.
6611 */
6612 static void
6613 iwm_nic_error(struct iwm_softc *sc)
6614 {
6615 struct iwm_error_event_table t;
6616 uint32_t base;
6617
6618 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6619 base = sc->sc_uc.uc_error_event_table;
6620 if (base < 0x800000) {
6621 aprint_error_dev(sc->sc_dev,
6622 "Invalid error log pointer 0x%08x\n", base);
6623 return;
6624 }
6625
6626 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6627 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6628 return;
6629 }
6630
6631 if (!t.valid) {
6632 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6633 return;
6634 }
6635
6636 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6637 aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
6638 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6639 sc->sc_flags, t.valid);
6640 }
6641
6642 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
6643 iwm_desc_lookup(t.error_id));
6644 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
6645 t.trm_hw_status0);
6646 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
6647 t.trm_hw_status1);
6648 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
6649 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
6650 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
6651 aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
6652 aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
6653 aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
6654 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
6655 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
6656 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
6657 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
6658 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
6659 aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
6660 t.fw_rev_type);
6661 aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
6662 t.major);
6663 aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
6664 t.minor);
6665 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
6666 aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
6667 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
6668 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
6669 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
6670 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
6671 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
6672 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
6673 aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
6674 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
6675 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
6676 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
6677 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
6678 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6679 t.l2p_addr_match);
6680 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
6681 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
6682 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
6683
6684 if (sc->sc_uc.uc_umac_error_event_table)
6685 iwm_nic_umac_error(sc);
6686 }
6687
6688 static void
6689 iwm_nic_umac_error(struct iwm_softc *sc)
6690 {
6691 struct iwm_umac_error_event_table t;
6692 uint32_t base;
6693
6694 base = sc->sc_uc.uc_umac_error_event_table;
6695
6696 if (base < 0x800000) {
6697 aprint_error_dev(sc->sc_dev,
6698 "Invalid error log pointer 0x%08x\n", base);
6699 return;
6700 }
6701
6702 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6703 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6704 return;
6705 }
6706
6707 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6708 aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
6709 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6710 sc->sc_flags, t.valid);
6711 }
6712
6713 aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
6714 iwm_desc_lookup(t.error_id));
6715 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
6716 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
6717 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
6718 t.ilink1);
6719 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
6720 t.ilink2);
6721 aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
6722 aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
6723 aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
6724 aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
6725 aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
6726 aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
6727 t.frame_pointer);
6728 aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
6729 t.stack_pointer);
6730 aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
6731 aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
6732 t.nic_isr_pref);
6733 }
6734 #endif
6735
6736 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6737 do { \
6738 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6739 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6740 _var_ = (void *)((_pkt_)+1); \
6741 } while (/*CONSTCOND*/0)
6742
6743 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6744 do { \
6745 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6746 sizeof(len), BUS_DMASYNC_POSTREAD); \
6747 _ptr_ = (void *)((_pkt_)+1); \
6748 } while (/*CONSTCOND*/0)
6749
6750 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6751
6752 static void
6753 iwm_notif_intr(struct iwm_softc *sc)
6754 {
6755 uint16_t hw;
6756
6757 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6758 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6759
6760 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6761 while (sc->rxq.cur != hw) {
6762 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6763 struct iwm_rx_packet *pkt;
6764 struct iwm_cmd_response *cresp;
6765 int orig_qid, qid, idx, code;
6766
6767 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6768 BUS_DMASYNC_POSTREAD);
6769 pkt = mtod(data->m, struct iwm_rx_packet *);
6770
6771 orig_qid = pkt->hdr.qid;
6772 qid = orig_qid & ~0x80;
6773 idx = pkt->hdr.idx;
6774
6775 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6776
6777 /*
6778 * randomly get these from the firmware, no idea why.
6779 * they at least seem harmless, so just ignore them for now
6780 */
6781 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6782 || pkt->len_n_flags == htole32(0x55550000))) {
6783 ADVANCE_RXQ(sc);
6784 continue;
6785 }
6786
6787 switch (code) {
6788 case IWM_REPLY_RX_PHY_CMD:
6789 iwm_rx_rx_phy_cmd(sc, pkt, data);
6790 break;
6791
6792 case IWM_REPLY_RX_MPDU_CMD:
6793 iwm_rx_rx_mpdu(sc, pkt, data);
6794 break;
6795
6796 case IWM_TX_CMD:
6797 iwm_rx_tx_cmd(sc, pkt, data);
6798 break;
6799
6800 case IWM_MISSED_BEACONS_NOTIFICATION:
6801 iwm_rx_missed_beacons_notif(sc, pkt, data);
6802 break;
6803
6804 case IWM_MFUART_LOAD_NOTIFICATION:
6805 break;
6806
6807 case IWM_ALIVE: {
6808 struct iwm_alive_resp_v1 *resp1;
6809 struct iwm_alive_resp_v2 *resp2;
6810 struct iwm_alive_resp_v3 *resp3;
6811
6812 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6813 SYNC_RESP_STRUCT(resp1, pkt);
6814 sc->sc_uc.uc_error_event_table
6815 = le32toh(resp1->error_event_table_ptr);
6816 sc->sc_uc.uc_log_event_table
6817 = le32toh(resp1->log_event_table_ptr);
6818 sc->sched_base = le32toh(resp1->scd_base_ptr);
6819 if (resp1->status == IWM_ALIVE_STATUS_OK)
6820 sc->sc_uc.uc_ok = 1;
6821 else
6822 sc->sc_uc.uc_ok = 0;
6823 }
6824 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
6825 SYNC_RESP_STRUCT(resp2, pkt);
6826 sc->sc_uc.uc_error_event_table
6827 = le32toh(resp2->error_event_table_ptr);
6828 sc->sc_uc.uc_log_event_table
6829 = le32toh(resp2->log_event_table_ptr);
6830 sc->sched_base = le32toh(resp2->scd_base_ptr);
6831 sc->sc_uc.uc_umac_error_event_table
6832 = le32toh(resp2->error_info_addr);
6833 if (resp2->status == IWM_ALIVE_STATUS_OK)
6834 sc->sc_uc.uc_ok = 1;
6835 else
6836 sc->sc_uc.uc_ok = 0;
6837 }
6838 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
6839 SYNC_RESP_STRUCT(resp3, pkt);
6840 sc->sc_uc.uc_error_event_table
6841 = le32toh(resp3->error_event_table_ptr);
6842 sc->sc_uc.uc_log_event_table
6843 = le32toh(resp3->log_event_table_ptr);
6844 sc->sched_base = le32toh(resp3->scd_base_ptr);
6845 sc->sc_uc.uc_umac_error_event_table
6846 = le32toh(resp3->error_info_addr);
6847 if (resp3->status == IWM_ALIVE_STATUS_OK)
6848 sc->sc_uc.uc_ok = 1;
6849 else
6850 sc->sc_uc.uc_ok = 0;
6851 }
6852
6853 sc->sc_uc.uc_intr = 1;
6854 wakeup(&sc->sc_uc);
6855 break;
6856 }
6857
6858 case IWM_CALIB_RES_NOTIF_PHY_DB: {
6859 struct iwm_calib_res_notif_phy_db *phy_db_notif;
6860 SYNC_RESP_STRUCT(phy_db_notif, pkt);
6861 uint16_t size = le16toh(phy_db_notif->length);
6862 bus_dmamap_sync(sc->sc_dmat, data->map,
6863 sizeof(*pkt) + sizeof(*phy_db_notif),
6864 size, BUS_DMASYNC_POSTREAD);
6865 iwm_phy_db_set_section(sc, phy_db_notif, size);
6866 break;
6867 }
6868
6869 case IWM_STATISTICS_NOTIFICATION: {
6870 struct iwm_notif_statistics *stats;
6871 SYNC_RESP_STRUCT(stats, pkt);
6872 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6873 sc->sc_noise = iwm_get_noise(&stats->rx.general);
6874 break;
6875 }
6876
6877 case IWM_NVM_ACCESS_CMD:
6878 case IWM_MCC_UPDATE_CMD:
6879 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6880 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6881 sizeof(sc->sc_cmd_resp),
6882 BUS_DMASYNC_POSTREAD);
6883 memcpy(sc->sc_cmd_resp,
6884 pkt, sizeof(sc->sc_cmd_resp));
6885 }
6886 break;
6887
6888 case IWM_MCC_CHUB_UPDATE_CMD: {
6889 struct iwm_mcc_chub_notif *notif;
6890 SYNC_RESP_STRUCT(notif, pkt);
6891
6892 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
6893 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
6894 sc->sc_fw_mcc[2] = '\0';
6895 break;
6896 }
6897
6898 case IWM_DTS_MEASUREMENT_NOTIFICATION:
6899 break;
6900
6901 case IWM_PHY_CONFIGURATION_CMD:
6902 case IWM_TX_ANT_CONFIGURATION_CMD:
6903 case IWM_ADD_STA:
6904 case IWM_MAC_CONTEXT_CMD:
6905 case IWM_REPLY_SF_CFG_CMD:
6906 case IWM_POWER_TABLE_CMD:
6907 case IWM_PHY_CONTEXT_CMD:
6908 case IWM_BINDING_CONTEXT_CMD:
6909 case IWM_TIME_EVENT_CMD:
6910 case IWM_SCAN_REQUEST_CMD:
6911 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
6912 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
6913 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
6914 case IWM_REPLY_BEACON_FILTERING_CMD:
6915 case IWM_MAC_PM_POWER_TABLE:
6916 case IWM_TIME_QUOTA_CMD:
6917 case IWM_REMOVE_STA:
6918 case IWM_TXPATH_FLUSH:
6919 case IWM_LQ_CMD:
6920 case IWM_BT_CONFIG:
6921 case IWM_REPLY_THERMAL_MNG_BACKOFF:
6922 SYNC_RESP_STRUCT(cresp, pkt);
6923 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6924 memcpy(sc->sc_cmd_resp,
6925 pkt, sizeof(*pkt) + sizeof(*cresp));
6926 }
6927 break;
6928
6929 /* ignore */
6930 case 0x6c: /* IWM_PHY_DB_CMD */
6931 break;
6932
6933 case IWM_INIT_COMPLETE_NOTIF:
6934 sc->sc_init_complete = 1;
6935 wakeup(&sc->sc_init_complete);
6936 break;
6937
6938 case IWM_SCAN_OFFLOAD_COMPLETE: {
6939 struct iwm_periodic_scan_complete *notif;
6940 SYNC_RESP_STRUCT(notif, pkt);
6941 break;
6942 }
6943
6944 case IWM_SCAN_ITERATION_COMPLETE: {
6945 struct iwm_lmac_scan_complete_notif *notif;
6946 SYNC_RESP_STRUCT(notif, pkt);
6947 iwm_endscan(sc);
6948 break;
6949 }
6950
6951 case IWM_SCAN_COMPLETE_UMAC: {
6952 struct iwm_umac_scan_complete *notif;
6953 SYNC_RESP_STRUCT(notif, pkt);
6954 iwm_endscan(sc);
6955 break;
6956 }
6957
6958 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
6959 struct iwm_umac_scan_iter_complete_notif *notif;
6960 SYNC_RESP_STRUCT(notif, pkt);
6961 iwm_endscan(sc);
6962 break;
6963 }
6964
6965 case IWM_REPLY_ERROR: {
6966 struct iwm_error_resp *resp;
6967 SYNC_RESP_STRUCT(resp, pkt);
6968 aprint_error_dev(sc->sc_dev,
6969 "firmware error 0x%x, cmd 0x%x\n",
6970 le32toh(resp->error_type), resp->cmd_id);
6971 break;
6972 }
6973
6974 case IWM_TIME_EVENT_NOTIFICATION: {
6975 struct iwm_time_event_notif *notif;
6976 SYNC_RESP_STRUCT(notif, pkt);
6977 break;
6978 }
6979
6980 case IWM_MCAST_FILTER_CMD:
6981 break;
6982
6983 case IWM_SCD_QUEUE_CFG: {
6984 struct iwm_scd_txq_cfg_rsp *rsp;
6985 SYNC_RESP_STRUCT(rsp, pkt);
6986 break;
6987 }
6988
6989 default:
6990 aprint_error_dev(sc->sc_dev,
6991 "unhandled firmware response 0x%x 0x%x/0x%x "
6992 "rx ring %d[%d]\n",
6993 code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
6994 break;
6995 }
6996
6997 /*
6998 * uCode sets bit 0x80 when it originates the notification,
6999 * i.e. when the notification is not a direct response to a
7000 * command sent by the driver.
7001 * For example, uCode issues IWM_REPLY_RX when it sends a
7002 * received frame to the driver.
7003 */
7004 if (!(orig_qid & (1 << 7))) {
7005 iwm_cmd_done(sc, qid, idx);
7006 }
7007
7008 ADVANCE_RXQ(sc);
7009 }
7010
7011 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
7012 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
7013
7014 /*
7015 * Seems like the hardware gets upset unless we align the write by 8??
7016 */
7017 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7018 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7019 }
7020
7021 static void
7022 iwm_softintr(void *arg)
7023 {
7024 struct iwm_softc *sc = arg;
7025 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7026 uint32_t r1;
7027 int isperiodic = 0;
7028
7029 r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7030
7031 restart:
7032 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7033 #ifdef IWM_DEBUG
7034 int i;
7035
7036 iwm_nic_error(sc);
7037
7038 /* Dump driver status (TX and RX rings) while we're here. */
7039 DPRINTF(("driver status:\n"));
7040 for (i = 0; i < IWM_MAX_QUEUES; i++) {
7041 struct iwm_tx_ring *ring = &sc->txq[i];
7042 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7043 "queued=%-3d\n",
7044 i, ring->qid, ring->cur, ring->queued));
7045 }
7046 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7047 DPRINTF((" 802.11 state %s\n",
7048 ieee80211_state_name[sc->sc_ic.ic_state]));
7049 #endif
7050
7051 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7052 fatal:
7053 ifp->if_flags &= ~IFF_UP;
7054 iwm_stop(ifp, 1);
7055 /* Don't restore interrupt mask */
7056 return;
7057
7058 }
7059
7060 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7061 aprint_error_dev(sc->sc_dev,
7062 "hardware error, stopping device\n");
7063 goto fatal;
7064 }
7065
7066 /* firmware chunk loaded */
7067 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7068 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7069 sc->sc_fw_chunk_done = 1;
7070 wakeup(&sc->sc_fw);
7071 }
7072
7073 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7074 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
7075 ifp->if_flags &= ~IFF_UP;
7076 iwm_stop(ifp, 1);
7077 }
7078 }
7079
7080 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7081 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7082 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7083 IWM_WRITE_1(sc,
7084 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7085 isperiodic = 1;
7086 }
7087
7088 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7089 isperiodic) {
7090 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7091
7092 iwm_notif_intr(sc);
7093
7094 /* enable periodic interrupt, see above */
7095 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7096 !isperiodic)
7097 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7098 IWM_CSR_INT_PERIODIC_ENA);
7099 }
7100
7101 r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7102 if (r1 != 0)
7103 goto restart;
7104
7105 iwm_restore_interrupts(sc);
7106 }
7107
7108 static int
7109 iwm_intr(void *arg)
7110 {
7111 struct iwm_softc *sc = arg;
7112 int r1, r2;
7113
7114 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7115
7116 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
7117 uint32_t *ict = sc->ict_dma.vaddr;
7118 int tmp;
7119
7120 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7121 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7122 tmp = htole32(ict[sc->ict_cur]);
7123 if (!tmp)
7124 goto out_ena;
7125
7126 /*
7127 * ok, there was something. keep plowing until we have all.
7128 */
7129 r1 = r2 = 0;
7130 while (tmp) {
7131 r1 |= tmp;
7132 ict[sc->ict_cur] = 0; /* Acknowledge. */
7133 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7134 &ict[sc->ict_cur] - ict, sizeof(*ict),
7135 BUS_DMASYNC_PREWRITE);
7136 sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7137 tmp = htole32(ict[sc->ict_cur]);
7138 }
7139
7140 /* this is where the fun begins. don't ask */
7141 if (r1 == 0xffffffff)
7142 r1 = 0;
7143
7144 /* i am not expected to understand this */
7145 if (r1 & 0xc0000)
7146 r1 |= 0x8000;
7147 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7148 } else {
7149 r1 = IWM_READ(sc, IWM_CSR_INT);
7150 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7151 goto out;
7152 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7153 }
7154 if (r1 == 0 && r2 == 0) {
7155 goto out_ena;
7156 }
7157
7158 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7159
7160 atomic_or_32(&sc->sc_soft_flags, r1);
7161 softint_schedule(sc->sc_soft_ih);
7162 return 1;
7163
7164 out_ena:
7165 iwm_restore_interrupts(sc);
7166 out:
7167 return 0;
7168 }
7169
7170 /*
7171 * Autoconf glue-sniffing
7172 */
7173
7174 static const pci_product_id_t iwm_devices[] = {
7175 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7176 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7177 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7178 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7179 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7180 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7181 #if 0
7182 PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7183 PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7184 PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7185 PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7186 #endif
7187 };
7188
7189 static int
7190 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7191 {
7192 struct pci_attach_args *pa = aux;
7193
7194 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7195 return 0;
7196
7197 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7198 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7199 return 1;
7200
7201 return 0;
7202 }
7203
7204 static int
7205 iwm_preinit(struct iwm_softc *sc)
7206 {
7207 struct ieee80211com *ic = &sc->sc_ic;
7208 int err;
7209
7210 if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7211 return 0;
7212
7213 err = iwm_start_hw(sc);
7214 if (err) {
7215 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7216 return err;
7217 }
7218
7219 err = iwm_run_init_mvm_ucode(sc, 1);
7220 iwm_stop_device(sc);
7221 if (err)
7222 return err;
7223
7224 sc->sc_flags |= IWM_FLAG_ATTACHED;
7225
7226 aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7227 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7228 ether_sprintf(sc->sc_nvm.hw_addr));
7229
7230 #ifndef IEEE80211_NO_HT
7231 if (sc->sc_nvm.sku_cap_11n_enable)
7232 iwm_setup_ht_rates(sc);
7233 #endif
7234
7235 /* not all hardware can do 5GHz band */
7236 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7237 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7238
7239 ieee80211_ifattach(ic);
7240
7241 ic->ic_node_alloc = iwm_node_alloc;
7242
7243 /* Override 802.11 state transition machine. */
7244 sc->sc_newstate = ic->ic_newstate;
7245 ic->ic_newstate = iwm_newstate;
7246 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7247 ieee80211_announce(ic);
7248
7249 iwm_radiotap_attach(sc);
7250
7251 return 0;
7252 }
7253
7254 static void
7255 iwm_attach_hook(device_t dev)
7256 {
7257 struct iwm_softc *sc = device_private(dev);
7258
7259 iwm_preinit(sc);
7260 }
7261
7262 static void
7263 iwm_attach(device_t parent, device_t self, void *aux)
7264 {
7265 struct iwm_softc *sc = device_private(self);
7266 struct pci_attach_args *pa = aux;
7267 struct ieee80211com *ic = &sc->sc_ic;
7268 struct ifnet *ifp = &sc->sc_ec.ec_if;
7269 pcireg_t reg, memtype;
7270 char intrbuf[PCI_INTRSTR_LEN];
7271 const char *intrstr;
7272 int err;
7273 int txq_i;
7274 const struct sysctlnode *node;
7275
7276 sc->sc_dev = self;
7277 sc->sc_pct = pa->pa_pc;
7278 sc->sc_pcitag = pa->pa_tag;
7279 sc->sc_dmat = pa->pa_dmat;
7280 sc->sc_pciid = pa->pa_id;
7281
7282 pci_aprint_devinfo(pa, NULL);
7283
7284 if (workqueue_create(&sc->sc_nswq, "iwmns",
7285 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7286 panic("%s: could not create workqueue: newstate",
7287 device_xname(self));
7288 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7289 if (sc->sc_soft_ih == NULL)
7290 panic("%s: could not establish softint", device_xname(self));
7291
7292 /*
7293 * Get the offset of the PCI Express Capability Structure in PCI
7294 * Configuration Space.
7295 */
7296 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7297 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7298 if (err == 0) {
7299 aprint_error_dev(self,
7300 "PCIe capability structure not found!\n");
7301 return;
7302 }
7303
7304 /* Clear device-specific "PCI retry timeout" register (41h). */
7305 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7306 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7307
7308 /* Enable bus-mastering */
7309 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7310 reg |= PCI_COMMAND_MASTER_ENABLE;
7311 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7312
7313 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7314 err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7315 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7316 if (err) {
7317 aprint_error_dev(self, "can't map mem space\n");
7318 return;
7319 }
7320
7321 /* Install interrupt handler. */
7322 err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7323 if (err) {
7324 aprint_error_dev(self, "can't allocate interrupt\n");
7325 return;
7326 }
7327 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) {
7328 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
7329 PCI_COMMAND_STATUS_REG);
7330 if (ISSET(reg, PCI_COMMAND_INTERRUPT_DISABLE)) {
7331 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7332 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
7333 PCI_COMMAND_STATUS_REG, reg);
7334 }
7335 }
7336 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7337 sizeof(intrbuf));
7338 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7339 IPL_NET, iwm_intr, sc, device_xname(self));
7340 if (sc->sc_ih == NULL) {
7341 aprint_error_dev(self, "can't establish interrupt");
7342 if (intrstr != NULL)
7343 aprint_error(" at %s", intrstr);
7344 aprint_error("\n");
7345 return;
7346 }
7347 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7348
7349 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7350
7351 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7352 switch (PCI_PRODUCT(sc->sc_pciid)) {
7353 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7354 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7355 sc->sc_fwname = "iwlwifi-3160-16.ucode";
7356 sc->host_interrupt_operation_mode = 1;
7357 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7358 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7359 break;
7360 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7361 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7362 sc->sc_fwname = "iwlwifi-7265D-16.ucode";
7363 sc->host_interrupt_operation_mode = 0;
7364 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7365 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7366 break;
7367 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7368 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7369 sc->sc_fwname = "iwlwifi-7260-16.ucode";
7370 sc->host_interrupt_operation_mode = 1;
7371 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7372 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7373 break;
7374 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7375 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7376 sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7377 IWM_CSR_HW_REV_TYPE_7265D ?
7378 "iwlwifi-7265D-16.ucode": "iwlwifi-7265-16.ucode";
7379 sc->host_interrupt_operation_mode = 0;
7380 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7381 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7382 break;
7383 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7384 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7385 sc->sc_fwname = "iwlwifi-8000C-16.ucode";
7386 sc->host_interrupt_operation_mode = 0;
7387 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7388 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7389 break;
7390 default:
7391 aprint_error_dev(self, "unknown product %#x",
7392 PCI_PRODUCT(sc->sc_pciid));
7393 return;
7394 }
7395 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7396
7397 /*
7398 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7399 * changed, and now the revision step also includes bit 0-1 (no more
7400 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7401 * in the old format.
7402 */
7403
7404 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7405 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7406 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7407
7408 if (iwm_prepare_card_hw(sc) != 0) {
7409 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7410 return;
7411 }
7412
7413 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7414 uint32_t hw_step;
7415
7416 /*
7417 * In order to recognize C step the driver should read the
7418 * chip version id located at the AUX bus MISC address.
7419 */
7420 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7421 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7422 DELAY(2);
7423
7424 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7425 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7426 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7427 25000);
7428 if (!err) {
7429 aprint_error_dev(sc->sc_dev,
7430 "failed to wake up the nic\n");
7431 return;
7432 }
7433
7434 if (iwm_nic_lock(sc)) {
7435 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7436 hw_step |= IWM_ENABLE_WFPM;
7437 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7438 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7439 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7440 if (hw_step == 0x3)
7441 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7442 (IWM_SILICON_C_STEP << 2);
7443 iwm_nic_unlock(sc);
7444 } else {
7445 aprint_error_dev(sc->sc_dev,
7446 "failed to lock the nic\n");
7447 return;
7448 }
7449 }
7450
7451 /*
7452 * Allocate DMA memory for firmware transfers.
7453 * Must be aligned on a 16-byte boundary.
7454 */
7455 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7456 16);
7457 if (err) {
7458 aprint_error_dev(sc->sc_dev,
7459 "could not allocate memory for firmware\n");
7460 return;
7461 }
7462
7463 /* Allocate "Keep Warm" page, used internally by the card. */
7464 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7465 if (err) {
7466 aprint_error_dev(sc->sc_dev,
7467 "could not allocate keep warm page\n");
7468 goto fail1;
7469 }
7470
7471 /* Allocate interrupt cause table (ICT).*/
7472 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7473 1 << IWM_ICT_PADDR_SHIFT);
7474 if (err) {
7475 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7476 goto fail2;
7477 }
7478
7479 /* TX scheduler rings must be aligned on a 1KB boundary. */
7480 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7481 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7482 if (err) {
7483 aprint_error_dev(sc->sc_dev,
7484 "could not allocate TX scheduler rings\n");
7485 goto fail3;
7486 }
7487
7488 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7489 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7490 if (err) {
7491 aprint_error_dev(sc->sc_dev,
7492 "could not allocate TX ring %d\n", txq_i);
7493 goto fail4;
7494 }
7495 }
7496
7497 err = iwm_alloc_rx_ring(sc, &sc->rxq);
7498 if (err) {
7499 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
7500 goto fail4;
7501 }
7502
7503 /* Clear pending interrupts. */
7504 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7505
7506 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7507 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
7508 SYSCTL_DESCR("iwm per-controller controls"),
7509 NULL, 0, NULL, 0,
7510 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
7511 CTL_EOL)) != 0) {
7512 aprint_normal_dev(sc->sc_dev,
7513 "couldn't create iwm per-controller sysctl node\n");
7514 }
7515 if (err == 0) {
7516 int iwm_nodenum = node->sysctl_num;
7517
7518 /* Reload firmware sysctl node */
7519 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7520 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
7521 SYSCTL_DESCR("Reload firmware"),
7522 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
7523 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
7524 CTL_EOL)) != 0) {
7525 aprint_normal_dev(sc->sc_dev,
7526 "couldn't create load_fw sysctl node\n");
7527 }
7528 }
7529
7530 /*
7531 * Attach interface
7532 */
7533 ic->ic_ifp = ifp;
7534 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
7535 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
7536 ic->ic_state = IEEE80211_S_INIT;
7537
7538 /* Set device capabilities. */
7539 ic->ic_caps =
7540 IEEE80211_C_WEP | /* WEP */
7541 IEEE80211_C_WPA | /* 802.11i */
7542 #ifdef notyet
7543 IEEE80211_C_SCANALL | /* device scans all channels at once */
7544 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
7545 #endif
7546 IEEE80211_C_SHSLOT | /* short slot time supported */
7547 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
7548
7549 #ifndef IEEE80211_NO_HT
7550 ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7551 ic->ic_htxcaps = 0;
7552 ic->ic_txbfcaps = 0;
7553 ic->ic_aselcaps = 0;
7554 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7555 #endif
7556
7557 /* all hardware can do 2.4GHz band */
7558 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7559 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7560
7561 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
7562 sc->sc_phyctxt[i].id = i;
7563 }
7564
7565 sc->sc_amrr.amrr_min_success_threshold = 1;
7566 sc->sc_amrr.amrr_max_success_threshold = 15;
7567
7568 /* IBSS channel undefined for now. */
7569 ic->ic_ibss_chan = &ic->ic_channels[1];
7570
7571 #if 0
7572 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7573 #endif
7574
7575 ifp->if_softc = sc;
7576 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7577 ifp->if_init = iwm_init;
7578 ifp->if_stop = iwm_stop;
7579 ifp->if_ioctl = iwm_ioctl;
7580 ifp->if_start = iwm_start;
7581 ifp->if_watchdog = iwm_watchdog;
7582 IFQ_SET_READY(&ifp->if_snd);
7583 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7584
7585 if_initialize(ifp);
7586 #if 0
7587 ieee80211_ifattach(ic);
7588 #else
7589 ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
7590 #endif
7591 /* Use common softint-based if_input */
7592 ifp->if_percpuq = if_percpuq_create(ifp);
7593 if_deferred_start_init(ifp, NULL);
7594 if_register(ifp);
7595
7596 callout_init(&sc->sc_calib_to, 0);
7597 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
7598 callout_init(&sc->sc_led_blink_to, 0);
7599 callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7600 #ifndef IEEE80211_NO_HT
7601 if (workqueue_create(&sc->sc_setratewq, "iwmsr",
7602 iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
7603 panic("%s: could not create workqueue: setrates",
7604 device_xname(self));
7605 if (workqueue_create(&sc->sc_bawq, "iwmba",
7606 iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
7607 panic("%s: could not create workqueue: blockack",
7608 device_xname(self));
7609 if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
7610 iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
7611 panic("%s: could not create workqueue: htprot",
7612 device_xname(self));
7613 #endif
7614
7615 if (pmf_device_register(self, NULL, NULL))
7616 pmf_class_network_register(self, ifp);
7617 else
7618 aprint_error_dev(self, "couldn't establish power handler\n");
7619
7620 /*
7621 * We can't do normal attach before the file system is mounted
7622 * because we cannot read the MAC address without loading the
7623 * firmware from disk. So we postpone until mountroot is done.
7624 * Notably, this will require a full driver unload/load cycle
7625 * (or reboot) in case the firmware is not present when the
7626 * hook runs.
7627 */
7628 config_mountroot(self, iwm_attach_hook);
7629
7630 return;
7631
7632 fail4: while (--txq_i >= 0)
7633 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7634 iwm_free_rx_ring(sc, &sc->rxq);
7635 iwm_dma_contig_free(&sc->sched_dma);
7636 fail3: if (sc->ict_dma.vaddr != NULL)
7637 iwm_dma_contig_free(&sc->ict_dma);
7638 fail2: iwm_dma_contig_free(&sc->kw_dma);
7639 fail1: iwm_dma_contig_free(&sc->fw_dma);
7640 }
7641
7642 void
7643 iwm_radiotap_attach(struct iwm_softc *sc)
7644 {
7645 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7646
7647 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
7648 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
7649 &sc->sc_drvbpf);
7650
7651 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7652 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7653 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7654
7655 sc->sc_txtap_len = sizeof sc->sc_txtapu;
7656 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7657 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7658 }
7659
7660 #if 0
7661 static void
7662 iwm_init_task(void *arg)
7663 {
7664 struct iwm_softc *sc = arg;
7665 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7666 int s;
7667
7668 rw_enter_write(&sc->ioctl_rwl);
7669 s = splnet();
7670
7671 iwm_stop(ifp, 0);
7672 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7673 iwm_init(ifp);
7674
7675 splx(s);
7676 rw_exit(&sc->ioctl_rwl);
7677 }
7678
7679 static void
7680 iwm_wakeup(struct iwm_softc *sc)
7681 {
7682 pcireg_t reg;
7683
7684 /* Clear device-specific "PCI retry timeout" register (41h). */
7685 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7686 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7687
7688 iwm_init_task(sc);
7689 }
7690
7691 static int
7692 iwm_activate(device_t self, enum devact act)
7693 {
7694 struct iwm_softc *sc = device_private(self);
7695 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7696
7697 switch (act) {
7698 case DVACT_DEACTIVATE:
7699 if (ifp->if_flags & IFF_RUNNING)
7700 iwm_stop(ifp, 0);
7701 return 0;
7702 default:
7703 return EOPNOTSUPP;
7704 }
7705 }
7706 #endif
7707
7708 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
7709 NULL, NULL);
7710
7711 static int
7712 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
7713 {
7714 struct sysctlnode node;
7715 struct iwm_softc *sc;
7716 int err, t;
7717
7718 node = *rnode;
7719 sc = node.sysctl_data;
7720 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
7721 node.sysctl_data = &t;
7722 err = sysctl_lookup(SYSCTLFN_CALL(&node));
7723 if (err || newp == NULL)
7724 return err;
7725
7726 if (t == 0)
7727 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
7728 return 0;
7729 }
7730
7731 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
7732 {
7733 const struct sysctlnode *rnode;
7734 #ifdef IWM_DEBUG
7735 const struct sysctlnode *cnode;
7736 #endif /* IWM_DEBUG */
7737 int rc;
7738
7739 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
7740 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
7741 SYSCTL_DESCR("iwm global controls"),
7742 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
7743 goto err;
7744
7745 iwm_sysctl_root_num = rnode->sysctl_num;
7746
7747 #ifdef IWM_DEBUG
7748 /* control debugging printfs */
7749 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
7750 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
7751 "debug", SYSCTL_DESCR("Enable debugging output"),
7752 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
7753 goto err;
7754 #endif /* IWM_DEBUG */
7755
7756 return;
7757
7758 err:
7759 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
7760 }
7761