if_iwm.c revision 1.55 1 /* $NetBSD: if_iwm.c,v 1.55 2017/01/09 10:42:45 khorben Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.147 2016/11/17 14:12:33 stsp Exp */
3 #define IEEE80211_NO_HT
4 /*
5 * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 * Author: Stefan Sperling <stsp (at) openbsd.org>
7 * Copyright (c) 2014 Fixup Software Ltd.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 /*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 * Copyright(c) 2016 Intel Deutschland GmbH
64 * All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 *
70 * * Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * * Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in
74 * the documentation and/or other materials provided with the
75 * distribution.
76 * * Neither the name Intel Corporation nor the names of its
77 * contributors may be used to endorse or promote products derived
78 * from this software without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93 /*-
94 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
95 *
96 * Permission to use, copy, modify, and distribute this software for any
97 * purpose with or without fee is hereby granted, provided that the above
98 * copyright notice and this permission notice appear in all copies.
99 *
100 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 */
108
109 #include <sys/cdefs.h>
110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.55 2017/01/09 10:42:45 khorben Exp $");
111
112 #include <sys/param.h>
113 #include <sys/conf.h>
114 #include <sys/kernel.h>
115 #include <sys/kmem.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/sysctl.h>
122 #include <sys/systm.h>
123
124 #include <sys/cpu.h>
125 #include <sys/bus.h>
126 #include <sys/workqueue.h>
127 #include <machine/endian.h>
128 #include <machine/intr.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 #include <dev/firmload.h>
134
135 #include <net/bpf.h>
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_ether.h>
140
141 #include <netinet/in.h>
142 #include <netinet/ip.h>
143
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147
148 #define DEVNAME(_s) device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
150
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 0;
158 #else
159 #define DPRINTF(x) do { ; } while (0)
160 #define DPRINTFN(n, x) do { ; } while (0)
161 #endif
162
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165
166 static const uint8_t iwm_nvm_channels[] = {
167 /* 2.4 GHz */
168 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 /* 5 GHz */
170 36, 40, 44, 48, 52, 56, 60, 64,
171 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 149, 153, 157, 161, 165
173 };
174
175 static const uint8_t iwm_nvm_channels_8000[] = {
176 /* 2.4 GHz */
177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 /* 5 GHz */
179 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
180 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 149, 153, 157, 161, 165, 169, 173, 177, 181
182 };
183
184 #define IWM_NUM_2GHZ_CHANNELS 14
185
186 static const struct iwm_rate {
187 uint8_t rate;
188 uint8_t plcp;
189 uint8_t ht_plcp;
190 } iwm_rates[] = {
191 /* Legacy */ /* HT */
192 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
197 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
199 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
200 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
201 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
202 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
203 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
204 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
205 };
206 #define IWM_RIDX_CCK 0
207 #define IWM_RIDX_OFDM 4
208 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
211
212 #ifndef IEEE80211_NO_HT
213 /* Convert an MCS index into an iwm_rates[] index. */
214 static const int iwm_mcs2ridx[] = {
215 IWM_RATE_MCS_0_INDEX,
216 IWM_RATE_MCS_1_INDEX,
217 IWM_RATE_MCS_2_INDEX,
218 IWM_RATE_MCS_3_INDEX,
219 IWM_RATE_MCS_4_INDEX,
220 IWM_RATE_MCS_5_INDEX,
221 IWM_RATE_MCS_6_INDEX,
222 IWM_RATE_MCS_7_INDEX,
223 };
224 #endif
225
226 struct iwm_nvm_section {
227 uint16_t length;
228 uint8_t *data;
229 };
230
231 struct iwm_newstate_state {
232 struct work ns_wk;
233 enum ieee80211_state ns_nstate;
234 int ns_arg;
235 int ns_generation;
236 };
237
238 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
239 static int iwm_firmware_store_section(struct iwm_softc *,
240 enum iwm_ucode_type, uint8_t *, size_t);
241 static int iwm_set_default_calib(struct iwm_softc *, const void *);
242 static int iwm_read_firmware(struct iwm_softc *);
243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
244 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
245 #ifdef IWM_DEBUG
246 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
247 #endif
248 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
249 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
250 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
251 static int iwm_nic_lock(struct iwm_softc *);
252 static void iwm_nic_unlock(struct iwm_softc *);
253 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
254 uint32_t);
255 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
257 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
258 bus_size_t, bus_size_t);
259 static void iwm_dma_contig_free(struct iwm_dma_info *);
260 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
261 static void iwm_disable_rx_dma(struct iwm_softc *);
262 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
265 int);
266 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
268 static void iwm_enable_rfkill_int(struct iwm_softc *);
269 static int iwm_check_rfkill(struct iwm_softc *);
270 static void iwm_enable_interrupts(struct iwm_softc *);
271 static void iwm_restore_interrupts(struct iwm_softc *);
272 static void iwm_disable_interrupts(struct iwm_softc *);
273 static void iwm_ict_reset(struct iwm_softc *);
274 static int iwm_set_hw_ready(struct iwm_softc *);
275 static int iwm_prepare_card_hw(struct iwm_softc *);
276 static void iwm_apm_config(struct iwm_softc *);
277 static int iwm_apm_init(struct iwm_softc *);
278 static void iwm_apm_stop(struct iwm_softc *);
279 static int iwm_allow_mcast(struct iwm_softc *);
280 static int iwm_start_hw(struct iwm_softc *);
281 static void iwm_stop_device(struct iwm_softc *);
282 static void iwm_nic_config(struct iwm_softc *);
283 static int iwm_nic_rx_init(struct iwm_softc *);
284 static int iwm_nic_tx_init(struct iwm_softc *);
285 static int iwm_nic_init(struct iwm_softc *);
286 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
287 static int iwm_post_alive(struct iwm_softc *);
288 static struct iwm_phy_db_entry *
289 iwm_phy_db_get_section(struct iwm_softc *,
290 enum iwm_phy_db_section_type, uint16_t);
291 static int iwm_phy_db_set_section(struct iwm_softc *,
292 struct iwm_calib_res_notif_phy_db *, uint16_t);
293 static int iwm_is_valid_channel(uint16_t);
294 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
295 static uint16_t iwm_channel_id_to_papd(uint16_t);
296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
297 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
298 uint8_t **, uint16_t *, uint16_t);
299 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
300 void *);
301 static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
302 enum iwm_phy_db_section_type, uint8_t);
303 static int iwm_send_phy_db_data(struct iwm_softc *);
304 static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
305 struct iwm_time_event_cmd_v1 *);
306 static int iwm_send_time_event_cmd(struct iwm_softc *,
307 const struct iwm_time_event_cmd_v2 *);
308 static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
309 uint32_t, uint32_t);
310 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
311 uint16_t, uint8_t *, uint16_t *);
312 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
313 uint16_t *, size_t);
314 static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
315 const uint8_t *, size_t);
316 #ifndef IEEE80211_NO_HT
317 static void iwm_setup_ht_rates(struct iwm_softc *);
318 static void iwm_htprot_task(void *);
319 static void iwm_update_htprot(struct ieee80211com *,
320 struct ieee80211_node *);
321 static int iwm_ampdu_rx_start(struct ieee80211com *,
322 struct ieee80211_node *, uint8_t);
323 static void iwm_ampdu_rx_stop(struct ieee80211com *,
324 struct ieee80211_node *, uint8_t);
325 static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
326 uint8_t, uint16_t, int);
327 #ifdef notyet
328 static int iwm_ampdu_tx_start(struct ieee80211com *,
329 struct ieee80211_node *, uint8_t);
330 static void iwm_ampdu_tx_stop(struct ieee80211com *,
331 struct ieee80211_node *, uint8_t);
332 #endif
333 static void iwm_ba_task(void *);
334 #endif
335
336 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
337 const uint16_t *, const uint16_t *, const uint16_t *,
338 const uint16_t *, const uint16_t *);
339 static void iwm_set_hw_address_8000(struct iwm_softc *,
340 struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
341 static int iwm_parse_nvm_sections(struct iwm_softc *,
342 struct iwm_nvm_section *);
343 static int iwm_nvm_init(struct iwm_softc *);
344 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
345 const uint8_t *, uint32_t);
346 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
347 const uint8_t *, uint32_t);
348 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
349 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
350 struct iwm_fw_sects *, int , int *);
351 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
352 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
353 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
354 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
355 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
356 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
357 enum iwm_ucode_type);
358 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
359 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
360 static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
361 static int iwm_get_signal_strength(struct iwm_softc *,
362 struct iwm_rx_phy_info *);
363 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
364 struct iwm_rx_packet *, struct iwm_rx_data *);
365 static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
366 static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
367 struct iwm_rx_data *);
368 static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
369 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
370 struct iwm_rx_data *);
371 static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
372 uint32_t);
373 #if 0
374 static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
375 static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
376 #endif
377 static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
380 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
381 uint8_t, uint8_t);
382 static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
383 uint8_t, uint8_t, uint32_t, uint32_t);
384 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
385 static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
386 uint16_t, const void *);
387 static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
388 uint32_t *);
389 static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
390 const void *, uint32_t *);
391 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
392 static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
393 #if 0
394 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
395 uint16_t);
396 #endif
397 static const struct iwm_rate *
398 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
399 struct ieee80211_frame *, struct iwm_tx_cmd *);
400 static int iwm_tx(struct iwm_softc *, struct mbuf *,
401 struct ieee80211_node *, int);
402 static void iwm_led_enable(struct iwm_softc *);
403 static void iwm_led_disable(struct iwm_softc *);
404 static int iwm_led_is_enabled(struct iwm_softc *);
405 static void iwm_led_blink_timeout(void *);
406 static void iwm_led_blink_start(struct iwm_softc *);
407 static void iwm_led_blink_stop(struct iwm_softc *);
408 static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
409 struct iwm_beacon_filter_cmd *);
410 static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
411 struct iwm_node *, struct iwm_beacon_filter_cmd *);
412 static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
413 int);
414 static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
415 struct iwm_mac_power_cmd *);
416 static int iwm_power_mac_update_mode(struct iwm_softc *,
417 struct iwm_node *);
418 static int iwm_power_update_device(struct iwm_softc *);
419 #ifdef notyet
420 static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
421 #endif
422 static int iwm_disable_beacon_filter(struct iwm_softc *);
423 static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 static int iwm_add_aux_sta(struct iwm_softc *);
425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
427 #ifdef notyet
428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
430 #endif
431 static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
432 struct iwm_scan_channel_cfg_lmac *, int);
433 static int iwm_fill_probe_req(struct iwm_softc *,
434 struct iwm_scan_probe_req *);
435 static int iwm_lmac_scan(struct iwm_softc *);
436 static int iwm_config_umac_scan(struct iwm_softc *);
437 static int iwm_umac_scan(struct iwm_softc *);
438 static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
439 static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
440 int *);
441 static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
442 struct iwm_mac_ctx_cmd *, uint32_t, int);
443 static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
444 struct iwm_mac_data_sta *, int);
445 static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
446 uint32_t, int);
447 static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
448 static int iwm_auth(struct iwm_softc *);
449 static int iwm_assoc(struct iwm_softc *);
450 static void iwm_calib_timeout(void *);
451 #ifndef IEEE80211_NO_HT
452 static void iwm_setrates_task(void *);
453 static int iwm_setrates(struct iwm_node *);
454 #endif
455 static int iwm_media_change(struct ifnet *);
456 static void iwm_newstate_cb(struct work *, void *);
457 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
458 static void iwm_endscan(struct iwm_softc *);
459 static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
460 struct ieee80211_node *);
461 static int iwm_sf_config(struct iwm_softc *, int);
462 static int iwm_send_bt_init_conf(struct iwm_softc *);
463 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
464 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
465 static int iwm_init_hw(struct iwm_softc *);
466 static int iwm_init(struct ifnet *);
467 static void iwm_start(struct ifnet *);
468 static void iwm_stop(struct ifnet *, int);
469 static void iwm_watchdog(struct ifnet *);
470 static int iwm_ioctl(struct ifnet *, u_long, void *);
471 #ifdef IWM_DEBUG
472 static const char *iwm_desc_lookup(uint32_t);
473 static void iwm_nic_error(struct iwm_softc *);
474 static void iwm_nic_umac_error(struct iwm_softc *);
475 #endif
476 static void iwm_notif_intr(struct iwm_softc *);
477 static void iwm_softintr(void *);
478 static int iwm_intr(void *);
479 static int iwm_preinit(struct iwm_softc *);
480 static void iwm_attach_hook(device_t);
481 static void iwm_attach(device_t, device_t, void *);
482 #if 0
483 static void iwm_init_task(void *);
484 static int iwm_activate(device_t, enum devact);
485 static void iwm_wakeup(struct iwm_softc *);
486 #endif
487 static void iwm_radiotap_attach(struct iwm_softc *);
488 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
489
490 static int iwm_sysctl_root_num;
491
492 static int
493 iwm_firmload(struct iwm_softc *sc)
494 {
495 struct iwm_fw_info *fw = &sc->sc_fw;
496 firmware_handle_t fwh;
497 int err;
498
499 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
500 return 0;
501
502 /* Open firmware image. */
503 err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
504 if (err) {
505 aprint_error_dev(sc->sc_dev,
506 "could not get firmware handle %s\n", sc->sc_fwname);
507 return err;
508 }
509
510 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
511 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
512 fw->fw_rawdata = NULL;
513 }
514
515 fw->fw_rawsize = firmware_get_size(fwh);
516 /*
517 * Well, this is how the Linux driver checks it ....
518 */
519 if (fw->fw_rawsize < sizeof(uint32_t)) {
520 aprint_error_dev(sc->sc_dev,
521 "firmware too short: %zd bytes\n", fw->fw_rawsize);
522 err = EINVAL;
523 goto out;
524 }
525
526 /* some sanity */
527 if (fw->fw_rawsize > IWM_FWMAXSIZE) {
528 aprint_error_dev(sc->sc_dev,
529 "firmware size is ridiculous: %zd bytes\n", fw->fw_rawsize);
530 err = EINVAL;
531 goto out;
532 }
533
534 /* Read the firmware. */
535 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
536 if (fw->fw_rawdata == NULL) {
537 aprint_error_dev(sc->sc_dev,
538 "not enough memory to stock firmware %s\n", sc->sc_fwname);
539 err = ENOMEM;
540 goto out;
541 }
542 err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
543 if (err) {
544 aprint_error_dev(sc->sc_dev,
545 "could not read firmware %s\n", sc->sc_fwname);
546 goto out;
547 }
548
549 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
550 out:
551 /* caller will release memory, if necessary */
552
553 firmware_close(fwh);
554 return err;
555 }
556
557 /*
558 * just maintaining status quo.
559 */
560 static void
561 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
562 {
563 struct ieee80211com *ic = &sc->sc_ic;
564 struct ieee80211_frame *wh;
565 uint8_t subtype;
566
567 wh = mtod(m, struct ieee80211_frame *);
568
569 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
570 return;
571
572 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
573
574 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
575 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
576 return;
577
578 int chan = le32toh(sc->sc_last_phy_info.channel);
579 if (chan < __arraycount(ic->ic_channels))
580 ic->ic_curchan = &ic->ic_channels[chan];
581 }
582
583 static int
584 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
585 {
586 struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
587
588 if (dlen < sizeof(*l) ||
589 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
590 return EINVAL;
591
592 /* we don't actually store anything for now, always use s/w crypto */
593
594 return 0;
595 }
596
597 static int
598 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
599 uint8_t *data, size_t dlen)
600 {
601 struct iwm_fw_sects *fws;
602 struct iwm_fw_onesect *fwone;
603
604 if (type >= IWM_UCODE_TYPE_MAX)
605 return EINVAL;
606 if (dlen < sizeof(uint32_t))
607 return EINVAL;
608
609 fws = &sc->sc_fw.fw_sects[type];
610 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
611 return EINVAL;
612
613 fwone = &fws->fw_sect[fws->fw_count];
614
615 /* first 32bit are device load offset */
616 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
617
618 /* rest is data */
619 fwone->fws_data = data + sizeof(uint32_t);
620 fwone->fws_len = dlen - sizeof(uint32_t);
621
622 /* for freeing the buffer during driver unload */
623 fwone->fws_alloc = data;
624 fwone->fws_allocsize = dlen;
625
626 fws->fw_count++;
627 fws->fw_totlen += fwone->fws_len;
628
629 return 0;
630 }
631
632 struct iwm_tlv_calib_data {
633 uint32_t ucode_type;
634 struct iwm_tlv_calib_ctrl calib;
635 } __packed;
636
637 static int
638 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
639 {
640 const struct iwm_tlv_calib_data *def_calib = data;
641 uint32_t ucode_type = le32toh(def_calib->ucode_type);
642
643 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
644 DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
645 DEVNAME(sc), ucode_type));
646 return EINVAL;
647 }
648
649 sc->sc_default_calib[ucode_type].flow_trigger =
650 def_calib->calib.flow_trigger;
651 sc->sc_default_calib[ucode_type].event_trigger =
652 def_calib->calib.event_trigger;
653
654 return 0;
655 }
656
657 static int
658 iwm_read_firmware(struct iwm_softc *sc)
659 {
660 struct iwm_fw_info *fw = &sc->sc_fw;
661 struct iwm_tlv_ucode_header *uhdr;
662 struct iwm_ucode_tlv tlv;
663 enum iwm_ucode_tlv_type tlv_type;
664 uint8_t *data;
665 int err, status;
666 size_t len;
667
668 if (fw->fw_status == IWM_FW_STATUS_NONE) {
669 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
670 } else {
671 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
672 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
673 }
674 status = fw->fw_status;
675
676 if (status == IWM_FW_STATUS_DONE)
677 return 0;
678
679 err = iwm_firmload(sc);
680 if (err) {
681 aprint_error_dev(sc->sc_dev,
682 "could not read firmware %s (error %d)\n",
683 sc->sc_fwname, err);
684 goto out;
685 }
686
687 sc->sc_capaflags = 0;
688 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
689 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
690 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
691
692 uhdr = (void *)fw->fw_rawdata;
693 if (*(uint32_t *)fw->fw_rawdata != 0
694 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
695 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
696 sc->sc_fwname);
697 err = EINVAL;
698 goto out;
699 }
700
701 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
702 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
703 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
704 IWM_UCODE_API(le32toh(uhdr->ver)));
705 data = uhdr->data;
706 len = fw->fw_rawsize - sizeof(*uhdr);
707
708 while (len >= sizeof(tlv)) {
709 size_t tlv_len;
710 void *tlv_data;
711
712 memcpy(&tlv, data, sizeof(tlv));
713 tlv_len = le32toh(tlv.length);
714 tlv_type = le32toh(tlv.type);
715
716 len -= sizeof(tlv);
717 data += sizeof(tlv);
718 tlv_data = data;
719
720 if (len < tlv_len) {
721 aprint_error_dev(sc->sc_dev,
722 "firmware too short: %zu bytes\n", len);
723 err = EINVAL;
724 goto parse_out;
725 }
726
727 switch (tlv_type) {
728 case IWM_UCODE_TLV_PROBE_MAX_LEN:
729 if (tlv_len < sizeof(uint32_t)) {
730 err = EINVAL;
731 goto parse_out;
732 }
733 sc->sc_capa_max_probe_len
734 = le32toh(*(uint32_t *)tlv_data);
735 /* limit it to something sensible */
736 if (sc->sc_capa_max_probe_len >
737 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
738 err = EINVAL;
739 goto parse_out;
740 }
741 break;
742 case IWM_UCODE_TLV_PAN:
743 if (tlv_len) {
744 err = EINVAL;
745 goto parse_out;
746 }
747 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
748 break;
749 case IWM_UCODE_TLV_FLAGS:
750 if (tlv_len < sizeof(uint32_t)) {
751 err = EINVAL;
752 goto parse_out;
753 }
754 /*
755 * Apparently there can be many flags, but Linux driver
756 * parses only the first one, and so do we.
757 *
758 * XXX: why does this override IWM_UCODE_TLV_PAN?
759 * Intentional or a bug? Observations from
760 * current firmware file:
761 * 1) TLV_PAN is parsed first
762 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
763 * ==> this resets TLV_PAN to itself... hnnnk
764 */
765 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
766 break;
767 case IWM_UCODE_TLV_CSCHEME:
768 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
769 if (err)
770 goto parse_out;
771 break;
772 case IWM_UCODE_TLV_NUM_OF_CPU: {
773 uint32_t num_cpu;
774 if (tlv_len != sizeof(uint32_t)) {
775 err = EINVAL;
776 goto parse_out;
777 }
778 num_cpu = le32toh(*(uint32_t *)tlv_data);
779 if (num_cpu < 1 || num_cpu > 2) {
780 err = EINVAL;
781 goto parse_out;
782 }
783 break;
784 }
785 case IWM_UCODE_TLV_SEC_RT:
786 err = iwm_firmware_store_section(sc,
787 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
788 if (err)
789 goto parse_out;
790 break;
791 case IWM_UCODE_TLV_SEC_INIT:
792 err = iwm_firmware_store_section(sc,
793 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
794 if (err)
795 goto parse_out;
796 break;
797 case IWM_UCODE_TLV_SEC_WOWLAN:
798 err = iwm_firmware_store_section(sc,
799 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
800 if (err)
801 goto parse_out;
802 break;
803 case IWM_UCODE_TLV_DEF_CALIB:
804 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
805 err = EINVAL;
806 goto parse_out;
807 }
808 err = iwm_set_default_calib(sc, tlv_data);
809 if (err)
810 goto parse_out;
811 break;
812 case IWM_UCODE_TLV_PHY_SKU:
813 if (tlv_len != sizeof(uint32_t)) {
814 err = EINVAL;
815 goto parse_out;
816 }
817 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
818 break;
819
820 case IWM_UCODE_TLV_API_CHANGES_SET: {
821 struct iwm_ucode_api *api;
822 if (tlv_len != sizeof(*api)) {
823 err = EINVAL;
824 goto parse_out;
825 }
826 api = (struct iwm_ucode_api *)tlv_data;
827 /* Flags may exceed 32 bits in future firmware. */
828 if (le32toh(api->api_index) > 0) {
829 goto parse_out;
830 }
831 sc->sc_ucode_api = le32toh(api->api_flags);
832 break;
833 }
834
835 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
836 struct iwm_ucode_capa *capa;
837 int idx, i;
838 if (tlv_len != sizeof(*capa)) {
839 err = EINVAL;
840 goto parse_out;
841 }
842 capa = (struct iwm_ucode_capa *)tlv_data;
843 idx = le32toh(capa->api_index);
844 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
845 goto parse_out;
846 }
847 for (i = 0; i < 32; i++) {
848 if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
849 continue;
850 setbit(sc->sc_enabled_capa, i + (32 * idx));
851 }
852 break;
853 }
854
855 case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
856 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
857 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
858 /* ignore, not used by current driver */
859 break;
860
861 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
862 err = iwm_firmware_store_section(sc,
863 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
864 tlv_len);
865 if (err)
866 goto parse_out;
867 break;
868
869 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
870 if (tlv_len != sizeof(uint32_t)) {
871 err = EINVAL;
872 goto parse_out;
873 }
874 sc->sc_capa_n_scan_channels =
875 le32toh(*(uint32_t *)tlv_data);
876 break;
877
878 case IWM_UCODE_TLV_FW_VERSION:
879 if (tlv_len != sizeof(uint32_t) * 3) {
880 err = EINVAL;
881 goto parse_out;
882 }
883 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
884 "%d.%d.%d",
885 le32toh(((uint32_t *)tlv_data)[0]),
886 le32toh(((uint32_t *)tlv_data)[1]),
887 le32toh(((uint32_t *)tlv_data)[2]));
888 break;
889
890 default:
891 DPRINTF(("%s: unknown firmware section %d, abort\n",
892 DEVNAME(sc), tlv_type));
893 err = EINVAL;
894 goto parse_out;
895 }
896
897 len -= roundup(tlv_len, 4);
898 data += roundup(tlv_len, 4);
899 }
900
901 KASSERT(err == 0);
902
903 parse_out:
904 if (err) {
905 aprint_error_dev(sc->sc_dev,
906 "firmware parse error, section type %d\n", tlv_type);
907 }
908
909 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
910 aprint_error_dev(sc->sc_dev,
911 "device uses unsupported power ops\n");
912 err = ENOTSUP;
913 }
914
915 out:
916 if (err)
917 fw->fw_status = IWM_FW_STATUS_NONE;
918 else
919 fw->fw_status = IWM_FW_STATUS_DONE;
920 wakeup(&sc->sc_fw);
921
922 if (err && fw->fw_rawdata != NULL) {
923 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
924 fw->fw_rawdata = NULL;
925 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
926 /* don't touch fw->fw_status */
927 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
928 }
929 return err;
930 }
931
932 static uint32_t
933 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
934 {
935 IWM_WRITE(sc,
936 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
937 IWM_BARRIER_READ_WRITE(sc);
938 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
939 }
940
941 static void
942 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
943 {
944 IWM_WRITE(sc,
945 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
946 IWM_BARRIER_WRITE(sc);
947 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
948 }
949
950 #ifdef IWM_DEBUG
951 static int
952 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
953 {
954 int offs;
955 uint32_t *vals = buf;
956
957 if (iwm_nic_lock(sc)) {
958 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
959 for (offs = 0; offs < dwords; offs++)
960 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
961 iwm_nic_unlock(sc);
962 return 0;
963 }
964 return EBUSY;
965 }
966 #endif
967
968 static int
969 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
970 {
971 int offs;
972 const uint32_t *vals = buf;
973
974 if (iwm_nic_lock(sc)) {
975 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
976 /* WADDR auto-increments */
977 for (offs = 0; offs < dwords; offs++) {
978 uint32_t val = vals ? vals[offs] : 0;
979 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
980 }
981 iwm_nic_unlock(sc);
982 return 0;
983 }
984 return EBUSY;
985 }
986
987 static int
988 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
989 {
990 return iwm_write_mem(sc, addr, &val, 1);
991 }
992
993 static int
994 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
995 int timo)
996 {
997 for (;;) {
998 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
999 return 1;
1000 }
1001 if (timo < 10) {
1002 return 0;
1003 }
1004 timo -= 10;
1005 DELAY(10);
1006 }
1007 }
1008
1009 static int
1010 iwm_nic_lock(struct iwm_softc *sc)
1011 {
1012 int rv = 0;
1013
1014 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1015 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1016
1017 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1018 DELAY(2);
1019
1020 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1021 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1022 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1023 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1024 rv = 1;
1025 } else {
1026 aprint_error_dev(sc->sc_dev, "device timeout\n");
1027 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1028 }
1029
1030 return rv;
1031 }
1032
1033 static void
1034 iwm_nic_unlock(struct iwm_softc *sc)
1035 {
1036 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1037 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1038 }
1039
1040 static void
1041 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1042 uint32_t mask)
1043 {
1044 uint32_t val;
1045
1046 /* XXX: no error path? */
1047 if (iwm_nic_lock(sc)) {
1048 val = iwm_read_prph(sc, reg) & mask;
1049 val |= bits;
1050 iwm_write_prph(sc, reg, val);
1051 iwm_nic_unlock(sc);
1052 }
1053 }
1054
1055 static void
1056 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1057 {
1058 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1059 }
1060
1061 static void
1062 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1063 {
1064 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1065 }
1066
1067 static int
1068 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1069 bus_size_t size, bus_size_t alignment)
1070 {
1071 int nsegs, err;
1072 void *va;
1073
1074 dma->tag = tag;
1075 dma->size = size;
1076
1077 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1078 &dma->map);
1079 if (err)
1080 goto fail;
1081
1082 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1083 BUS_DMA_NOWAIT);
1084 if (err)
1085 goto fail;
1086
1087 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1088 if (err)
1089 goto fail;
1090 dma->vaddr = va;
1091
1092 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1093 BUS_DMA_NOWAIT);
1094 if (err)
1095 goto fail;
1096
1097 memset(dma->vaddr, 0, size);
1098 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1099 dma->paddr = dma->map->dm_segs[0].ds_addr;
1100
1101 return 0;
1102
1103 fail: iwm_dma_contig_free(dma);
1104 return err;
1105 }
1106
1107 static void
1108 iwm_dma_contig_free(struct iwm_dma_info *dma)
1109 {
1110 if (dma->map != NULL) {
1111 if (dma->vaddr != NULL) {
1112 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1113 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1114 bus_dmamap_unload(dma->tag, dma->map);
1115 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1116 bus_dmamem_free(dma->tag, &dma->seg, 1);
1117 dma->vaddr = NULL;
1118 }
1119 bus_dmamap_destroy(dma->tag, dma->map);
1120 dma->map = NULL;
1121 }
1122 }
1123
1124 static int
1125 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1126 {
1127 bus_size_t size;
1128 int i, err;
1129
1130 ring->cur = 0;
1131
1132 /* Allocate RX descriptors (256-byte aligned). */
1133 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1134 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1135 if (err) {
1136 aprint_error_dev(sc->sc_dev,
1137 "could not allocate RX ring DMA memory\n");
1138 goto fail;
1139 }
1140 ring->desc = ring->desc_dma.vaddr;
1141
1142 /* Allocate RX status area (16-byte aligned). */
1143 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1144 sizeof(*ring->stat), 16);
1145 if (err) {
1146 aprint_error_dev(sc->sc_dev,
1147 "could not allocate RX status DMA memory\n");
1148 goto fail;
1149 }
1150 ring->stat = ring->stat_dma.vaddr;
1151
1152 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1153 struct iwm_rx_data *data = &ring->data[i];
1154
1155 memset(data, 0, sizeof(*data));
1156 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1157 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1158 &data->map);
1159 if (err) {
1160 aprint_error_dev(sc->sc_dev,
1161 "could not create RX buf DMA map\n");
1162 goto fail;
1163 }
1164
1165 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1166 if (err)
1167 goto fail;
1168 }
1169 return 0;
1170
1171 fail: iwm_free_rx_ring(sc, ring);
1172 return err;
1173 }
1174
1175 static void
1176 iwm_disable_rx_dma(struct iwm_softc *sc)
1177 {
1178 int ntries;
1179
1180 if (iwm_nic_lock(sc)) {
1181 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1182 for (ntries = 0; ntries < 1000; ntries++) {
1183 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1184 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1185 break;
1186 DELAY(10);
1187 }
1188 iwm_nic_unlock(sc);
1189 }
1190 }
1191
1192 void
1193 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1194 {
1195 ring->cur = 0;
1196 memset(ring->stat, 0, sizeof(*ring->stat));
1197 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1198 ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1199 }
1200
1201 static void
1202 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1203 {
1204 int i;
1205
1206 iwm_dma_contig_free(&ring->desc_dma);
1207 iwm_dma_contig_free(&ring->stat_dma);
1208
1209 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1210 struct iwm_rx_data *data = &ring->data[i];
1211
1212 if (data->m != NULL) {
1213 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1214 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1215 bus_dmamap_unload(sc->sc_dmat, data->map);
1216 m_freem(data->m);
1217 data->m = NULL;
1218 }
1219 if (data->map != NULL) {
1220 bus_dmamap_destroy(sc->sc_dmat, data->map);
1221 data->map = NULL;
1222 }
1223 }
1224 }
1225
1226 static int
1227 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1228 {
1229 bus_addr_t paddr;
1230 bus_size_t size;
1231 int i, err;
1232
1233 ring->qid = qid;
1234 ring->queued = 0;
1235 ring->cur = 0;
1236
1237 /* Allocate TX descriptors (256-byte aligned). */
1238 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1239 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1240 if (err) {
1241 aprint_error_dev(sc->sc_dev,
1242 "could not allocate TX ring DMA memory\n");
1243 goto fail;
1244 }
1245 ring->desc = ring->desc_dma.vaddr;
1246
1247 /*
1248 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1249 * to allocate commands space for other rings.
1250 */
1251 if (qid > IWM_CMD_QUEUE)
1252 return 0;
1253
1254 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1255 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1256 if (err) {
1257 aprint_error_dev(sc->sc_dev,
1258 "could not allocate TX cmd DMA memory\n");
1259 goto fail;
1260 }
1261 ring->cmd = ring->cmd_dma.vaddr;
1262
1263 paddr = ring->cmd_dma.paddr;
1264 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1265 struct iwm_tx_data *data = &ring->data[i];
1266 size_t mapsize;
1267
1268 data->cmd_paddr = paddr;
1269 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1270 + offsetof(struct iwm_tx_cmd, scratch);
1271 paddr += sizeof(struct iwm_device_cmd);
1272
1273 /* FW commands may require more mapped space than packets. */
1274 if (qid == IWM_CMD_QUEUE)
1275 mapsize = (sizeof(struct iwm_cmd_header) +
1276 IWM_MAX_CMD_PAYLOAD_SIZE);
1277 else
1278 mapsize = MCLBYTES;
1279 err = bus_dmamap_create(sc->sc_dmat, mapsize,
1280 IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT, &data->map);
1281 if (err) {
1282 aprint_error_dev(sc->sc_dev,
1283 "could not create TX buf DMA map\n");
1284 goto fail;
1285 }
1286 }
1287 KASSERT(paddr == ring->cmd_dma.paddr + size);
1288 return 0;
1289
1290 fail: iwm_free_tx_ring(sc, ring);
1291 return err;
1292 }
1293
1294 static void
1295 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1296 {
1297 int i;
1298
1299 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1300 struct iwm_tx_data *data = &ring->data[i];
1301
1302 if (data->m != NULL) {
1303 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1304 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1305 bus_dmamap_unload(sc->sc_dmat, data->map);
1306 m_freem(data->m);
1307 data->m = NULL;
1308 }
1309 }
1310 /* Clear TX descriptors. */
1311 memset(ring->desc, 0, ring->desc_dma.size);
1312 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1313 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1314 sc->qfullmsk &= ~(1 << ring->qid);
1315 ring->queued = 0;
1316 ring->cur = 0;
1317 }
1318
1319 static void
1320 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1321 {
1322 int i;
1323
1324 iwm_dma_contig_free(&ring->desc_dma);
1325 iwm_dma_contig_free(&ring->cmd_dma);
1326
1327 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1328 struct iwm_tx_data *data = &ring->data[i];
1329
1330 if (data->m != NULL) {
1331 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1332 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1333 bus_dmamap_unload(sc->sc_dmat, data->map);
1334 m_freem(data->m);
1335 }
1336 if (data->map != NULL) {
1337 bus_dmamap_destroy(sc->sc_dmat, data->map);
1338 data->map = NULL;
1339 }
1340 }
1341 }
1342
1343 static void
1344 iwm_enable_rfkill_int(struct iwm_softc *sc)
1345 {
1346 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1347 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1348 }
1349
1350 static int
1351 iwm_check_rfkill(struct iwm_softc *sc)
1352 {
1353 uint32_t v;
1354 int s;
1355 int rv;
1356
1357 s = splnet();
1358
1359 /*
1360 * "documentation" is not really helpful here:
1361 * 27: HW_RF_KILL_SW
1362 * Indicates state of (platform's) hardware RF-Kill switch
1363 *
1364 * But apparently when it's off, it's on ...
1365 */
1366 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1367 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1368 if (rv) {
1369 sc->sc_flags |= IWM_FLAG_RFKILL;
1370 } else {
1371 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1372 }
1373
1374 splx(s);
1375 return rv;
1376 }
1377
1378 static void
1379 iwm_enable_interrupts(struct iwm_softc *sc)
1380 {
1381 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1382 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1383 }
1384
1385 static void
1386 iwm_restore_interrupts(struct iwm_softc *sc)
1387 {
1388 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1389 }
1390
1391 static void
1392 iwm_disable_interrupts(struct iwm_softc *sc)
1393 {
1394 int s = splnet();
1395
1396 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1397
1398 /* acknowledge all interrupts */
1399 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1400 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1401
1402 splx(s);
1403 }
1404
1405 static void
1406 iwm_ict_reset(struct iwm_softc *sc)
1407 {
1408 iwm_disable_interrupts(sc);
1409
1410 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1411 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1412 BUS_DMASYNC_PREWRITE);
1413 sc->ict_cur = 0;
1414
1415 /* Set physical address of ICT (4KB aligned). */
1416 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1417 IWM_CSR_DRAM_INT_TBL_ENABLE
1418 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1419 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1420 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1421
1422 /* Switch to ICT interrupt mode in driver. */
1423 sc->sc_flags |= IWM_FLAG_USE_ICT;
1424
1425 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1426 iwm_enable_interrupts(sc);
1427 }
1428
1429 #define IWM_HW_READY_TIMEOUT 50
1430 static int
1431 iwm_set_hw_ready(struct iwm_softc *sc)
1432 {
1433 int ready;
1434
1435 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1436 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1437
1438 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1439 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1440 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1441 IWM_HW_READY_TIMEOUT);
1442 if (ready)
1443 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1444 IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1445
1446 return ready;
1447 }
1448 #undef IWM_HW_READY_TIMEOUT
1449
1450 static int
1451 iwm_prepare_card_hw(struct iwm_softc *sc)
1452 {
1453 int t = 0;
1454
1455 if (iwm_set_hw_ready(sc))
1456 return 0;
1457
1458 DELAY(100);
1459
1460 /* If HW is not ready, prepare the conditions to check again */
1461 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1462 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1463
1464 do {
1465 if (iwm_set_hw_ready(sc))
1466 return 0;
1467 DELAY(200);
1468 t += 200;
1469 } while (t < 150000);
1470
1471 return ETIMEDOUT;
1472 }
1473
1474 static void
1475 iwm_apm_config(struct iwm_softc *sc)
1476 {
1477 pcireg_t reg;
1478
1479 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1480 sc->sc_cap_off + PCIE_LCSR);
1481 if (reg & PCIE_LCSR_ASPM_L1) {
1482 /* Um the Linux driver prints "Disabling L0S for this one ... */
1483 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1484 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1485 } else {
1486 /* ... and "Enabling" here */
1487 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1488 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1489 }
1490 }
1491
1492 /*
1493 * Start up NIC's basic functionality after it has been reset
1494 * e.g. after platform boot or shutdown.
1495 * NOTE: This does not load uCode nor start the embedded processor
1496 */
1497 static int
1498 iwm_apm_init(struct iwm_softc *sc)
1499 {
1500 int err = 0;
1501
1502 /* Disable L0S exit timer (platform NMI workaround) */
1503 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1504 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1505 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1506
1507 /*
1508 * Disable L0s without affecting L1;
1509 * don't wait for ICH L0s (ICH bug W/A)
1510 */
1511 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1512 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1513
1514 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1515 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1516
1517 /*
1518 * Enable HAP INTA (interrupt from management bus) to
1519 * wake device's PCI Express link L1a -> L0s
1520 */
1521 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1522 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1523
1524 iwm_apm_config(sc);
1525
1526 #if 0 /* not for 7k/8k */
1527 /* Configure analog phase-lock-loop before activating to D0A */
1528 if (trans->cfg->base_params->pll_cfg_val)
1529 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1530 trans->cfg->base_params->pll_cfg_val);
1531 #endif
1532
1533 /*
1534 * Set "initialization complete" bit to move adapter from
1535 * D0U* --> D0A* (powered-up active) state.
1536 */
1537 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1538
1539 /*
1540 * Wait for clock stabilization; once stabilized, access to
1541 * device-internal resources is supported, e.g. iwm_write_prph()
1542 * and accesses to uCode SRAM.
1543 */
1544 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1545 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1546 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1547 aprint_error_dev(sc->sc_dev,
1548 "timeout waiting for clock stabilization\n");
1549 err = ETIMEDOUT;
1550 goto out;
1551 }
1552
1553 if (sc->host_interrupt_operation_mode) {
1554 /*
1555 * This is a bit of an abuse - This is needed for 7260 / 3160
1556 * only check host_interrupt_operation_mode even if this is
1557 * not related to host_interrupt_operation_mode.
1558 *
1559 * Enable the oscillator to count wake up time for L1 exit. This
1560 * consumes slightly more power (100uA) - but allows to be sure
1561 * that we wake up from L1 on time.
1562 *
1563 * This looks weird: read twice the same register, discard the
1564 * value, set a bit, and yet again, read that same register
1565 * just to discard the value. But that's the way the hardware
1566 * seems to like it.
1567 */
1568 iwm_read_prph(sc, IWM_OSC_CLK);
1569 iwm_read_prph(sc, IWM_OSC_CLK);
1570 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1571 iwm_read_prph(sc, IWM_OSC_CLK);
1572 iwm_read_prph(sc, IWM_OSC_CLK);
1573 }
1574
1575 /*
1576 * Enable DMA clock and wait for it to stabilize.
1577 *
1578 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1579 * do not disable clocks. This preserves any hardware bits already
1580 * set by default in "CLK_CTRL_REG" after reset.
1581 */
1582 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1583 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1584 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1585 DELAY(20);
1586
1587 /* Disable L1-Active */
1588 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1589 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1590
1591 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1592 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1593 IWM_APMG_RTC_INT_STT_RFKILL);
1594 }
1595 out:
1596 if (err)
1597 aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1598 return err;
1599 }
1600
1601 static void
1602 iwm_apm_stop(struct iwm_softc *sc)
1603 {
1604 /* stop device's busmaster DMA activity */
1605 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1606
1607 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1608 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1609 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1610 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1611 DPRINTF(("iwm apm stop\n"));
1612 }
1613
1614 static int
1615 iwm_start_hw(struct iwm_softc *sc)
1616 {
1617 int err;
1618
1619 err = iwm_prepare_card_hw(sc);
1620 if (err)
1621 return err;
1622
1623 /* Reset the entire device */
1624 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1625 DELAY(10);
1626
1627 err = iwm_apm_init(sc);
1628 if (err)
1629 return err;
1630
1631 iwm_enable_rfkill_int(sc);
1632 iwm_check_rfkill(sc);
1633
1634 return 0;
1635 }
1636
1637 static void
1638 iwm_stop_device(struct iwm_softc *sc)
1639 {
1640 int chnl, ntries;
1641 int qid;
1642
1643 iwm_disable_interrupts(sc);
1644 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1645
1646 /* Deactivate TX scheduler. */
1647 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1648
1649 /* Stop all DMA channels. */
1650 if (iwm_nic_lock(sc)) {
1651 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1652 IWM_WRITE(sc,
1653 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1654 for (ntries = 0; ntries < 200; ntries++) {
1655 uint32_t r;
1656
1657 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1658 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1659 chnl))
1660 break;
1661 DELAY(20);
1662 }
1663 }
1664 iwm_nic_unlock(sc);
1665 }
1666 iwm_disable_rx_dma(sc);
1667
1668 iwm_reset_rx_ring(sc, &sc->rxq);
1669
1670 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1671 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1672
1673 /*
1674 * Power-down device's busmaster DMA clocks
1675 */
1676 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1677 DELAY(5);
1678
1679 /* Make sure (redundant) we've released our request to stay awake */
1680 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1681 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1682
1683 /* Stop the device, and put it in low power state */
1684 iwm_apm_stop(sc);
1685
1686 /*
1687 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1688 * Clean again the interrupt here
1689 */
1690 iwm_disable_interrupts(sc);
1691
1692 /* Reset the on-board processor. */
1693 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1694
1695 /* Even though we stop the HW we still want the RF kill interrupt. */
1696 iwm_enable_rfkill_int(sc);
1697 iwm_check_rfkill(sc);
1698 }
1699
1700 static void
1701 iwm_nic_config(struct iwm_softc *sc)
1702 {
1703 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1704 uint32_t reg_val = 0;
1705
1706 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1707 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1708 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1709 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1710 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1711 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1712
1713 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1714 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1715 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1716 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1717
1718 /* radio configuration */
1719 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1720 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1721 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1722
1723 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1724
1725 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1726 radio_cfg_step, radio_cfg_dash));
1727
1728 /*
1729 * W/A : NIC is stuck in a reset state after Early PCIe power off
1730 * (PCIe power is lost before PERST# is asserted), causing ME FW
1731 * to lose ownership and not being able to obtain it back.
1732 */
1733 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1734 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1735 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1736 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1737 }
1738
1739 static int
1740 iwm_nic_rx_init(struct iwm_softc *sc)
1741 {
1742 if (!iwm_nic_lock(sc))
1743 return EBUSY;
1744
1745 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1746 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1747 0, sc->rxq.stat_dma.size,
1748 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1749
1750 iwm_disable_rx_dma(sc);
1751 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1752 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1753 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1754 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1755
1756 /* Set physical address of RX ring (256-byte aligned). */
1757 IWM_WRITE(sc,
1758 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1759
1760 /* Set physical address of RX status (16-byte aligned). */
1761 IWM_WRITE(sc,
1762 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1763
1764 /* Enable RX. */
1765 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1766 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1767 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1768 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1769 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1770 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1771 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1772 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1773
1774 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1775
1776 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1777 if (sc->host_interrupt_operation_mode)
1778 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1779
1780 /*
1781 * This value should initially be 0 (before preparing any RBs),
1782 * and should be 8 after preparing the first 8 RBs (for example).
1783 */
1784 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1785
1786 iwm_nic_unlock(sc);
1787
1788 return 0;
1789 }
1790
1791 static int
1792 iwm_nic_tx_init(struct iwm_softc *sc)
1793 {
1794 int qid;
1795
1796 if (!iwm_nic_lock(sc))
1797 return EBUSY;
1798
1799 /* Deactivate TX scheduler. */
1800 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1801
1802 /* Set physical address of "keep warm" page (16-byte aligned). */
1803 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1804
1805 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1806 struct iwm_tx_ring *txq = &sc->txq[qid];
1807
1808 /* Set physical address of TX ring (256-byte aligned). */
1809 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1810 txq->desc_dma.paddr >> 8);
1811 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1812 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1813 }
1814
1815 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1816
1817 iwm_nic_unlock(sc);
1818
1819 return 0;
1820 }
1821
1822 static int
1823 iwm_nic_init(struct iwm_softc *sc)
1824 {
1825 int err;
1826
1827 iwm_apm_init(sc);
1828 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1829 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1830 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1831 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1832
1833 iwm_nic_config(sc);
1834
1835 err = iwm_nic_rx_init(sc);
1836 if (err)
1837 return err;
1838
1839 err = iwm_nic_tx_init(sc);
1840 if (err)
1841 return err;
1842
1843 DPRINTF(("shadow registers enabled\n"));
1844 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1845
1846 return 0;
1847 }
1848
1849 static const uint8_t iwm_ac_to_tx_fifo[] = {
1850 IWM_TX_FIFO_VO,
1851 IWM_TX_FIFO_VI,
1852 IWM_TX_FIFO_BE,
1853 IWM_TX_FIFO_BK,
1854 };
1855
1856 static int
1857 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1858 {
1859 if (!iwm_nic_lock(sc)) {
1860 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1861 return EBUSY;
1862 }
1863
1864 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1865
1866 if (qid == IWM_CMD_QUEUE) {
1867 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1868 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1869 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1870
1871 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1872
1873 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1874
1875 iwm_write_mem32(sc,
1876 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1877
1878 /* Set scheduler window size and frame limit. */
1879 iwm_write_mem32(sc,
1880 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1881 sizeof(uint32_t),
1882 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1883 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1884 ((IWM_FRAME_LIMIT
1885 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1886 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1887
1888 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1889 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1890 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1891 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1892 IWM_SCD_QUEUE_STTS_REG_MSK);
1893 } else {
1894 struct iwm_scd_txq_cfg_cmd cmd;
1895 int err;
1896
1897 iwm_nic_unlock(sc);
1898
1899 memset(&cmd, 0, sizeof(cmd));
1900 cmd.scd_queue = qid;
1901 cmd.enable = 1;
1902 cmd.sta_id = sta_id;
1903 cmd.tx_fifo = fifo;
1904 cmd.aggregate = 0;
1905 cmd.window = IWM_FRAME_LIMIT;
1906
1907 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
1908 &cmd);
1909 if (err)
1910 return err;
1911
1912 if (!iwm_nic_lock(sc))
1913 return EBUSY;
1914 }
1915
1916 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1917 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1918
1919 iwm_nic_unlock(sc);
1920
1921 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1922
1923 return 0;
1924 }
1925
1926 static int
1927 iwm_post_alive(struct iwm_softc *sc)
1928 {
1929 int nwords;
1930 int err, chnl;
1931 uint32_t base;
1932
1933 if (!iwm_nic_lock(sc))
1934 return EBUSY;
1935
1936 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1937 if (sc->sched_base != base) {
1938 DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
1939 DEVNAME(sc), sc->sched_base, base));
1940 err = EINVAL;
1941 goto out;
1942 }
1943
1944 iwm_ict_reset(sc);
1945
1946 /* Clear TX scheduler state in SRAM. */
1947 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1948 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1949 / sizeof(uint32_t);
1950 err = iwm_write_mem(sc,
1951 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1952 NULL, nwords);
1953 if (err)
1954 goto out;
1955
1956 /* Set physical address of TX scheduler rings (1KB aligned). */
1957 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1958
1959 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1960
1961 iwm_nic_unlock(sc);
1962
1963 /* enable command channel */
1964 err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1965 if (err)
1966 return err;
1967
1968 if (!iwm_nic_lock(sc))
1969 return EBUSY;
1970
1971 /* Activate TX scheduler. */
1972 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1973
1974 /* Enable DMA channels. */
1975 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1976 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1977 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1978 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1979 }
1980
1981 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1982 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1983
1984 /* Enable L1-Active */
1985 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1986 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1987 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1988
1989 out:
1990 iwm_nic_unlock(sc);
1991 return err;
1992 }
1993
1994 static struct iwm_phy_db_entry *
1995 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
1996 uint16_t chg_id)
1997 {
1998 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1999
2000 if (type >= IWM_PHY_DB_MAX)
2001 return NULL;
2002
2003 switch (type) {
2004 case IWM_PHY_DB_CFG:
2005 return &phy_db->cfg;
2006 case IWM_PHY_DB_CALIB_NCH:
2007 return &phy_db->calib_nch;
2008 case IWM_PHY_DB_CALIB_CHG_PAPD:
2009 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2010 return NULL;
2011 return &phy_db->calib_ch_group_papd[chg_id];
2012 case IWM_PHY_DB_CALIB_CHG_TXP:
2013 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2014 return NULL;
2015 return &phy_db->calib_ch_group_txp[chg_id];
2016 default:
2017 return NULL;
2018 }
2019 return NULL;
2020 }
2021
2022 static int
2023 iwm_phy_db_set_section(struct iwm_softc *sc,
2024 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2025 {
2026 struct iwm_phy_db_entry *entry;
2027 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2028 uint16_t chg_id = 0;
2029
2030 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2031 type == IWM_PHY_DB_CALIB_CHG_TXP)
2032 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2033
2034 entry = iwm_phy_db_get_section(sc, type, chg_id);
2035 if (!entry)
2036 return EINVAL;
2037
2038 if (entry->data)
2039 kmem_intr_free(entry->data, entry->size);
2040 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2041 if (!entry->data) {
2042 entry->size = 0;
2043 return ENOMEM;
2044 }
2045 memcpy(entry->data, phy_db_notif->data, size);
2046 entry->size = size;
2047
2048 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2049 __func__, __LINE__, type, size, entry->data));
2050
2051 return 0;
2052 }
2053
2054 static int
2055 iwm_is_valid_channel(uint16_t ch_id)
2056 {
2057 if (ch_id <= 14 ||
2058 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2059 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2060 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2061 return 1;
2062 return 0;
2063 }
2064
2065 static uint8_t
2066 iwm_ch_id_to_ch_index(uint16_t ch_id)
2067 {
2068 if (!iwm_is_valid_channel(ch_id))
2069 return 0xff;
2070
2071 if (ch_id <= 14)
2072 return ch_id - 1;
2073 if (ch_id <= 64)
2074 return (ch_id + 20) / 4;
2075 if (ch_id <= 140)
2076 return (ch_id - 12) / 4;
2077 return (ch_id - 13) / 4;
2078 }
2079
2080
2081 static uint16_t
2082 iwm_channel_id_to_papd(uint16_t ch_id)
2083 {
2084 if (!iwm_is_valid_channel(ch_id))
2085 return 0xff;
2086
2087 if (1 <= ch_id && ch_id <= 14)
2088 return 0;
2089 if (36 <= ch_id && ch_id <= 64)
2090 return 1;
2091 if (100 <= ch_id && ch_id <= 140)
2092 return 2;
2093 return 3;
2094 }
2095
2096 static uint16_t
2097 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2098 {
2099 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2100 struct iwm_phy_db_chg_txp *txp_chg;
2101 int i;
2102 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2103
2104 if (ch_index == 0xff)
2105 return 0xff;
2106
2107 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2108 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2109 if (!txp_chg)
2110 return 0xff;
2111 /*
2112 * Looking for the first channel group the max channel
2113 * of which is higher than the requested channel.
2114 */
2115 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2116 return i;
2117 }
2118 return 0xff;
2119 }
2120
2121 static int
2122 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2123 uint16_t *size, uint16_t ch_id)
2124 {
2125 struct iwm_phy_db_entry *entry;
2126 uint16_t ch_group_id = 0;
2127
2128 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2129 ch_group_id = iwm_channel_id_to_papd(ch_id);
2130 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2131 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2132
2133 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2134 if (!entry)
2135 return EINVAL;
2136
2137 *data = entry->data;
2138 *size = entry->size;
2139
2140 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2141 __func__, __LINE__, type, *size));
2142
2143 return 0;
2144 }
2145
2146 static int
2147 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2148 void *data)
2149 {
2150 struct iwm_phy_db_cmd phy_db_cmd;
2151 struct iwm_host_cmd cmd = {
2152 .id = IWM_PHY_DB_CMD,
2153 .flags = IWM_CMD_ASYNC,
2154 };
2155
2156 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2157 type, length));
2158
2159 phy_db_cmd.type = le16toh(type);
2160 phy_db_cmd.length = le16toh(length);
2161
2162 cmd.data[0] = &phy_db_cmd;
2163 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2164 cmd.data[1] = data;
2165 cmd.len[1] = length;
2166
2167 return iwm_send_cmd(sc, &cmd);
2168 }
2169
2170 static int
2171 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2172 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2173 {
2174 uint16_t i;
2175 int err;
2176 struct iwm_phy_db_entry *entry;
2177
2178 /* Send all the channel-specific groups to operational fw */
2179 for (i = 0; i < max_ch_groups; i++) {
2180 entry = iwm_phy_db_get_section(sc, type, i);
2181 if (!entry)
2182 return EINVAL;
2183
2184 if (!entry->size)
2185 continue;
2186
2187 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2188 if (err) {
2189 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2190 "err %d\n", DEVNAME(sc), type, i, err));
2191 return err;
2192 }
2193
2194 DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2195 DEVNAME(sc), type, i));
2196
2197 DELAY(1000);
2198 }
2199
2200 return 0;
2201 }
2202
2203 static int
2204 iwm_send_phy_db_data(struct iwm_softc *sc)
2205 {
2206 uint8_t *data = NULL;
2207 uint16_t size = 0;
2208 int err;
2209
2210 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2211 if (err)
2212 return err;
2213
2214 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2215 if (err)
2216 return err;
2217
2218 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2219 &data, &size, 0);
2220 if (err)
2221 return err;
2222
2223 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2224 if (err)
2225 return err;
2226
2227 err = iwm_phy_db_send_all_channel_groups(sc,
2228 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2229 if (err)
2230 return err;
2231
2232 err = iwm_phy_db_send_all_channel_groups(sc,
2233 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2234 if (err)
2235 return err;
2236
2237 return 0;
2238 }
2239
2240 /*
2241 * For the high priority TE use a time event type that has similar priority to
2242 * the FW's action scan priority.
2243 */
2244 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2245 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2246
2247 /* used to convert from time event API v2 to v1 */
2248 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2249 IWM_TE_V2_EVENT_SOCIOPATHIC)
2250 static inline uint16_t
2251 iwm_te_v2_get_notify(uint16_t policy)
2252 {
2253 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2254 }
2255
2256 static inline uint16_t
2257 iwm_te_v2_get_dep_policy(uint16_t policy)
2258 {
2259 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2260 IWM_TE_V2_PLACEMENT_POS;
2261 }
2262
2263 static inline uint16_t
2264 iwm_te_v2_get_absence(uint16_t policy)
2265 {
2266 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2267 }
2268
2269 static void
2270 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2271 struct iwm_time_event_cmd_v1 *cmd_v1)
2272 {
2273 cmd_v1->id_and_color = cmd_v2->id_and_color;
2274 cmd_v1->action = cmd_v2->action;
2275 cmd_v1->id = cmd_v2->id;
2276 cmd_v1->apply_time = cmd_v2->apply_time;
2277 cmd_v1->max_delay = cmd_v2->max_delay;
2278 cmd_v1->depends_on = cmd_v2->depends_on;
2279 cmd_v1->interval = cmd_v2->interval;
2280 cmd_v1->duration = cmd_v2->duration;
2281 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2282 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2283 else
2284 cmd_v1->repeat = htole32(cmd_v2->repeat);
2285 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2286 cmd_v1->interval_reciprocal = 0; /* unused */
2287
2288 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2289 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2290 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2291 }
2292
2293 static int
2294 iwm_send_time_event_cmd(struct iwm_softc *sc,
2295 const struct iwm_time_event_cmd_v2 *cmd)
2296 {
2297 struct iwm_time_event_cmd_v1 cmd_v1;
2298
2299 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2300 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2301 cmd);
2302
2303 iwm_te_v2_to_v1(cmd, &cmd_v1);
2304 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2305 &cmd_v1);
2306 }
2307
2308 static void
2309 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2310 uint32_t duration, uint32_t max_delay)
2311 {
2312 struct iwm_time_event_cmd_v2 time_cmd;
2313
2314 memset(&time_cmd, 0, sizeof(time_cmd));
2315
2316 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2317 time_cmd.id_and_color =
2318 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2319 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2320
2321 time_cmd.apply_time = htole32(0);
2322
2323 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2324 time_cmd.max_delay = htole32(max_delay);
2325 /* TODO: why do we need to interval = bi if it is not periodic? */
2326 time_cmd.interval = htole32(1);
2327 time_cmd.duration = htole32(duration);
2328 time_cmd.repeat = 1;
2329 time_cmd.policy
2330 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2331 IWM_TE_V2_NOTIF_HOST_EVENT_END |
2332 IWM_T2_V2_START_IMMEDIATELY);
2333
2334 iwm_send_time_event_cmd(sc, &time_cmd);
2335 }
2336
2337 /*
2338 * NVM read access and content parsing. We do not support
2339 * external NVM or writing NVM.
2340 */
2341
2342 /* list of NVM sections we are allowed/need to read */
2343 static const int iwm_nvm_to_read[] = {
2344 IWM_NVM_SECTION_TYPE_HW,
2345 IWM_NVM_SECTION_TYPE_SW,
2346 IWM_NVM_SECTION_TYPE_REGULATORY,
2347 IWM_NVM_SECTION_TYPE_CALIBRATION,
2348 IWM_NVM_SECTION_TYPE_PRODUCTION,
2349 IWM_NVM_SECTION_TYPE_HW_8000,
2350 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2351 IWM_NVM_SECTION_TYPE_PHY_SKU,
2352 };
2353
2354 /* Default NVM size to read */
2355 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2356 #define IWM_MAX_NVM_SECTION_SIZE 8192
2357
2358 #define IWM_NVM_WRITE_OPCODE 1
2359 #define IWM_NVM_READ_OPCODE 0
2360
2361 static int
2362 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2363 uint16_t length, uint8_t *data, uint16_t *len)
2364 {
2365 offset = 0;
2366 struct iwm_nvm_access_cmd nvm_access_cmd = {
2367 .offset = htole16(offset),
2368 .length = htole16(length),
2369 .type = htole16(section),
2370 .op_code = IWM_NVM_READ_OPCODE,
2371 };
2372 struct iwm_nvm_access_resp *nvm_resp;
2373 struct iwm_rx_packet *pkt;
2374 struct iwm_host_cmd cmd = {
2375 .id = IWM_NVM_ACCESS_CMD,
2376 .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2377 .data = { &nvm_access_cmd, },
2378 };
2379 int err, offset_read;
2380 size_t bytes_read;
2381 uint8_t *resp_data;
2382
2383 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2384
2385 err = iwm_send_cmd(sc, &cmd);
2386 if (err) {
2387 DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2388 DEVNAME(sc), err));
2389 return err;
2390 }
2391
2392 pkt = cmd.resp_pkt;
2393 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2394 err = EIO;
2395 goto exit;
2396 }
2397
2398 /* Extract NVM response */
2399 nvm_resp = (void *)pkt->data;
2400
2401 err = le16toh(nvm_resp->status);
2402 bytes_read = le16toh(nvm_resp->length);
2403 offset_read = le16toh(nvm_resp->offset);
2404 resp_data = nvm_resp->data;
2405 if (err) {
2406 err = EINVAL;
2407 goto exit;
2408 }
2409
2410 if (offset_read != offset) {
2411 err = EINVAL;
2412 goto exit;
2413 }
2414 if (bytes_read > length) {
2415 err = EINVAL;
2416 goto exit;
2417 }
2418
2419 memcpy(data + offset, resp_data, bytes_read);
2420 *len = bytes_read;
2421
2422 exit:
2423 iwm_free_resp(sc, &cmd);
2424 return err;
2425 }
2426
2427 /*
2428 * Reads an NVM section completely.
2429 * NICs prior to 7000 family doesn't have a real NVM, but just read
2430 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2431 * by uCode, we need to manually check in this case that we don't
2432 * overflow and try to read more than the EEPROM size.
2433 */
2434 static int
2435 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2436 uint16_t *len, size_t max_len)
2437 {
2438 uint16_t chunklen, seglen;
2439 int err;
2440
2441 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2442 *len = 0;
2443
2444 /* Read NVM chunks until exhausted (reading less than requested) */
2445 while (seglen == chunklen && *len < max_len) {
2446 err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2447 &seglen);
2448 if (err) {
2449 DPRINTF(("%s:Cannot read NVM from section %d "
2450 "offset %d, length %d\n",
2451 DEVNAME(sc), section, *len, chunklen));
2452 return err;
2453 }
2454 *len += seglen;
2455 }
2456
2457 DPRINTFN(4, ("NVM section %d read completed\n", section));
2458 return 0;
2459 }
2460
2461 static uint8_t
2462 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2463 {
2464 uint8_t tx_ant;
2465
2466 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2467 >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2468
2469 if (sc->sc_nvm.valid_tx_ant)
2470 tx_ant &= sc->sc_nvm.valid_tx_ant;
2471
2472 return tx_ant;
2473 }
2474
2475 static uint8_t
2476 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2477 {
2478 uint8_t rx_ant;
2479
2480 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2481 >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2482
2483 if (sc->sc_nvm.valid_rx_ant)
2484 rx_ant &= sc->sc_nvm.valid_rx_ant;
2485
2486 return rx_ant;
2487 }
2488
2489 static void
2490 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2491 const uint8_t *nvm_channels, size_t nchan)
2492 {
2493 struct ieee80211com *ic = &sc->sc_ic;
2494 struct iwm_nvm_data *data = &sc->sc_nvm;
2495 int ch_idx;
2496 struct ieee80211_channel *channel;
2497 uint16_t ch_flags;
2498 int is_5ghz;
2499 int flags, hw_value;
2500
2501 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2502 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2503
2504 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2505 !data->sku_cap_band_52GHz_enable)
2506 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2507
2508 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2509 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2510 iwm_nvm_channels[ch_idx],
2511 ch_flags,
2512 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2513 "5.2" : "2.4"));
2514 continue;
2515 }
2516
2517 hw_value = nvm_channels[ch_idx];
2518 channel = &ic->ic_channels[hw_value];
2519
2520 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2521 if (!is_5ghz) {
2522 flags = IEEE80211_CHAN_2GHZ;
2523 channel->ic_flags
2524 = IEEE80211_CHAN_CCK
2525 | IEEE80211_CHAN_OFDM
2526 | IEEE80211_CHAN_DYN
2527 | IEEE80211_CHAN_2GHZ;
2528 } else {
2529 flags = IEEE80211_CHAN_5GHZ;
2530 channel->ic_flags =
2531 IEEE80211_CHAN_A;
2532 }
2533 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2534
2535 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2536 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2537
2538 #ifndef IEEE80211_NO_HT
2539 if (data->sku_cap_11n_enable)
2540 channel->ic_flags |= IEEE80211_CHAN_HT;
2541 #endif
2542 }
2543 }
2544
2545 #ifndef IEEE80211_NO_HT
2546 static void
2547 iwm_setup_ht_rates(struct iwm_softc *sc)
2548 {
2549 struct ieee80211com *ic = &sc->sc_ic;
2550
2551 /* TX is supported with the same MCS as RX. */
2552 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2553
2554 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2555
2556 #ifdef notyet
2557 if (sc->sc_nvm.sku_cap_mimo_disable)
2558 return;
2559
2560 if (iwm_fw_valid_rx_ant(sc) > 1)
2561 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2562 if (iwm_fw_valid_rx_ant(sc) > 2)
2563 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2564 #endif
2565 }
2566
2567 #define IWM_MAX_RX_BA_SESSIONS 16
2568
2569 static void
2570 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2571 uint16_t ssn, int start)
2572 {
2573 struct ieee80211com *ic = &sc->sc_ic;
2574 struct iwm_add_sta_cmd_v7 cmd;
2575 struct iwm_node *in = (struct iwm_node *)ni;
2576 int err, s;
2577 uint32_t status;
2578
2579 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2580 ieee80211_addba_req_refuse(ic, ni, tid);
2581 return;
2582 }
2583
2584 memset(&cmd, 0, sizeof(cmd));
2585
2586 cmd.sta_id = IWM_STATION_ID;
2587 cmd.mac_id_n_color
2588 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2589 cmd.add_modify = IWM_STA_MODE_MODIFY;
2590
2591 if (start) {
2592 cmd.add_immediate_ba_tid = (uint8_t)tid;
2593 cmd.add_immediate_ba_ssn = ssn;
2594 } else {
2595 cmd.remove_immediate_ba_tid = (uint8_t)tid;
2596 }
2597 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2598 IWM_STA_MODIFY_REMOVE_BA_TID;
2599
2600 status = IWM_ADD_STA_SUCCESS;
2601 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2602 &status);
2603
2604 s = splnet();
2605 if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2606 if (start) {
2607 sc->sc_rx_ba_sessions++;
2608 ieee80211_addba_req_accept(ic, ni, tid);
2609 } else if (sc->sc_rx_ba_sessions > 0)
2610 sc->sc_rx_ba_sessions--;
2611 } else if (start)
2612 ieee80211_addba_req_refuse(ic, ni, tid);
2613
2614 splx(s);
2615 }
2616
2617 static void
2618 iwm_htprot_task(void *arg)
2619 {
2620 struct iwm_softc *sc = arg;
2621 struct ieee80211com *ic = &sc->sc_ic;
2622 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2623 int err;
2624
2625 /* This call updates HT protection based on in->in_ni.ni_htop1. */
2626 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2627 if (err)
2628 aprint_error_dev(sc->sc_dev,
2629 "could not change HT protection: error %d\n", err);
2630 }
2631
2632 /*
2633 * This function is called by upper layer when HT protection settings in
2634 * beacons have changed.
2635 */
2636 static void
2637 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2638 {
2639 struct iwm_softc *sc = ic->ic_softc;
2640
2641 /* assumes that ni == ic->ic_bss */
2642 task_add(systq, &sc->htprot_task);
2643 }
2644
2645 static void
2646 iwm_ba_task(void *arg)
2647 {
2648 struct iwm_softc *sc = arg;
2649 struct ieee80211com *ic = &sc->sc_ic;
2650 struct ieee80211_node *ni = ic->ic_bss;
2651
2652 if (sc->ba_start)
2653 iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2654 else
2655 iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2656 }
2657
2658 /*
2659 * This function is called by upper layer when an ADDBA request is received
2660 * from another STA and before the ADDBA response is sent.
2661 */
2662 static int
2663 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2664 uint8_t tid)
2665 {
2666 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2667 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2668
2669 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2670 return ENOSPC;
2671
2672 sc->ba_start = 1;
2673 sc->ba_tid = tid;
2674 sc->ba_ssn = htole16(ba->ba_winstart);
2675 task_add(systq, &sc->ba_task);
2676
2677 return EBUSY;
2678 }
2679
2680 /*
2681 * This function is called by upper layer on teardown of an HT-immediate
2682 * Block Ack agreement (eg. upon receipt of a DELBA frame).
2683 */
2684 static void
2685 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2686 uint8_t tid)
2687 {
2688 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2689
2690 sc->ba_start = 0;
2691 sc->ba_tid = tid;
2692 task_add(systq, &sc->ba_task);
2693 }
2694 #endif
2695
2696 static void
2697 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2698 const uint16_t *mac_override, const uint16_t *nvm_hw)
2699 {
2700 static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
2701 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2702 };
2703 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
2704 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
2705 };
2706 const uint8_t *hw_addr;
2707
2708 if (mac_override) {
2709 hw_addr = (const uint8_t *)(mac_override +
2710 IWM_MAC_ADDRESS_OVERRIDE_8000);
2711
2712 /*
2713 * Store the MAC address from MAO section.
2714 * No byte swapping is required in MAO section
2715 */
2716 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2717
2718 /*
2719 * Force the use of the OTP MAC address in case of reserved MAC
2720 * address in the NVM, or if address is given but invalid.
2721 */
2722 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2723 (memcmp(etherbroadcastaddr, data->hw_addr,
2724 sizeof(etherbroadcastaddr)) != 0) &&
2725 (memcmp(etheranyaddr, data->hw_addr,
2726 sizeof(etheranyaddr)) != 0) &&
2727 !ETHER_IS_MULTICAST(data->hw_addr))
2728 return;
2729 }
2730
2731 if (nvm_hw) {
2732 /* Read the mac address from WFMP registers. */
2733 uint32_t mac_addr0 =
2734 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2735 uint32_t mac_addr1 =
2736 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2737
2738 hw_addr = (const uint8_t *)&mac_addr0;
2739 data->hw_addr[0] = hw_addr[3];
2740 data->hw_addr[1] = hw_addr[2];
2741 data->hw_addr[2] = hw_addr[1];
2742 data->hw_addr[3] = hw_addr[0];
2743
2744 hw_addr = (const uint8_t *)&mac_addr1;
2745 data->hw_addr[4] = hw_addr[1];
2746 data->hw_addr[5] = hw_addr[0];
2747
2748 return;
2749 }
2750
2751 aprint_error_dev(sc->sc_dev, "mac address not found\n");
2752 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2753 }
2754
2755 static int
2756 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2757 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2758 const uint16_t *mac_override, const uint16_t *phy_sku,
2759 const uint16_t *regulatory)
2760 {
2761 struct iwm_nvm_data *data = &sc->sc_nvm;
2762 uint8_t hw_addr[ETHER_ADDR_LEN];
2763 uint32_t sku;
2764
2765 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2766
2767 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2768 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2769 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2770 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2771 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2772 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2773
2774 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2775 } else {
2776 uint32_t radio_cfg = le32_to_cpup(
2777 (const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2778 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2779 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2780 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2781 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2782 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2783 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2784
2785 sku = le32_to_cpup(
2786 (const uint32_t *)(phy_sku + IWM_SKU_8000));
2787 }
2788
2789 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2790 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2791 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2792 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2793
2794 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2795
2796 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2797 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2798 data->hw_addr[0] = hw_addr[1];
2799 data->hw_addr[1] = hw_addr[0];
2800 data->hw_addr[2] = hw_addr[3];
2801 data->hw_addr[3] = hw_addr[2];
2802 data->hw_addr[4] = hw_addr[5];
2803 data->hw_addr[5] = hw_addr[4];
2804 } else
2805 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2806
2807 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2808 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2809 iwm_nvm_channels, __arraycount(iwm_nvm_channels));
2810 else
2811 iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
2812 iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
2813
2814 data->calib_version = 255; /* TODO:
2815 this value will prevent some checks from
2816 failing, we need to check if this
2817 field is still needed, and if it does,
2818 where is it in the NVM */
2819
2820 return 0;
2821 }
2822
2823 static int
2824 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2825 {
2826 const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2827 const uint16_t *regulatory = NULL;
2828
2829 /* Checking for required sections */
2830 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2831 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2832 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2833 return ENOENT;
2834 }
2835
2836 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2837 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2838 /* SW and REGULATORY sections are mandatory */
2839 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2840 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2841 return ENOENT;
2842 }
2843 /* MAC_OVERRIDE or at least HW section must exist */
2844 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2845 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2846 return ENOENT;
2847 }
2848
2849 /* PHY_SKU section is mandatory in B0 */
2850 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2851 return ENOENT;
2852 }
2853
2854 regulatory = (const uint16_t *)
2855 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2856 hw = (const uint16_t *)
2857 sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2858 mac_override =
2859 (const uint16_t *)
2860 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2861 phy_sku = (const uint16_t *)
2862 sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2863 } else {
2864 panic("unknown device family %d\n", sc->sc_device_family);
2865 }
2866
2867 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2868 calib = (const uint16_t *)
2869 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2870
2871 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2872 phy_sku, regulatory);
2873 }
2874
2875 static int
2876 iwm_nvm_init(struct iwm_softc *sc)
2877 {
2878 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2879 int i, section, err;
2880 uint16_t len;
2881 uint8_t *buf;
2882 const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2883
2884 /* Read From FW NVM */
2885 DPRINTF(("Read NVM\n"));
2886
2887 memset(nvm_sections, 0, sizeof(nvm_sections));
2888
2889 buf = kmem_alloc(bufsz, KM_SLEEP);
2890 if (buf == NULL)
2891 return ENOMEM;
2892
2893 for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
2894 section = iwm_nvm_to_read[i];
2895 KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
2896
2897 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2898 if (err) {
2899 err = 0;
2900 continue;
2901 }
2902 nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
2903 if (nvm_sections[section].data == NULL) {
2904 err = ENOMEM;
2905 break;
2906 }
2907 memcpy(nvm_sections[section].data, buf, len);
2908 nvm_sections[section].length = len;
2909 }
2910 kmem_free(buf, bufsz);
2911 if (err == 0)
2912 err = iwm_parse_nvm_sections(sc, nvm_sections);
2913
2914 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2915 if (nvm_sections[i].data != NULL)
2916 kmem_free(nvm_sections[i].data, nvm_sections[i].length);
2917 }
2918
2919 return err;
2920 }
2921
2922 static int
2923 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2924 const uint8_t *section, uint32_t byte_cnt)
2925 {
2926 int err = EINVAL;
2927 uint32_t chunk_sz, offset;
2928
2929 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2930
2931 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2932 uint32_t addr, len;
2933 const uint8_t *data;
2934
2935 addr = dst_addr + offset;
2936 len = MIN(chunk_sz, byte_cnt - offset);
2937 data = section + offset;
2938
2939 err = iwm_firmware_load_chunk(sc, addr, data, len);
2940 if (err)
2941 break;
2942 }
2943
2944 return err;
2945 }
2946
2947 static int
2948 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2949 const uint8_t *section, uint32_t byte_cnt)
2950 {
2951 struct iwm_dma_info *dma = &sc->fw_dma;
2952 bool is_extended = false;
2953 int err;
2954
2955 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2956 memcpy(dma->vaddr, section, byte_cnt);
2957 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
2958 BUS_DMASYNC_PREWRITE);
2959
2960 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2961 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2962 is_extended = true;
2963
2964 if (is_extended) {
2965 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2966 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2967 }
2968
2969 sc->sc_fw_chunk_done = 0;
2970
2971 if (!iwm_nic_lock(sc)) {
2972 if (is_extended)
2973 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2974 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2975 return EBUSY;
2976 }
2977
2978 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2979 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2980 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2981 dst_addr);
2982 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2983 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2984 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2985 (iwm_get_dma_hi_addr(dma->paddr)
2986 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2987 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2988 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2989 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2990 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2991 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2992 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2993 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2994 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2995
2996 iwm_nic_unlock(sc);
2997
2998 /* Wait for this segment to load. */
2999 err = 0;
3000 while (!sc->sc_fw_chunk_done) {
3001 err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3002 if (err)
3003 break;
3004 }
3005 if (!sc->sc_fw_chunk_done) {
3006 aprint_error_dev(sc->sc_dev,
3007 "fw chunk addr 0x%x len %d failed to load\n",
3008 dst_addr, byte_cnt);
3009 }
3010
3011 if (is_extended) {
3012 int rv = iwm_nic_lock(sc);
3013 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3014 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3015 if (rv == 0)
3016 iwm_nic_unlock(sc);
3017 }
3018
3019 return err;
3020 }
3021
3022 static int
3023 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3024 {
3025 struct iwm_fw_sects *fws;
3026 int err, i;
3027 void *data;
3028 uint32_t dlen;
3029 uint32_t offset;
3030
3031 fws = &sc->sc_fw.fw_sects[ucode_type];
3032 for (i = 0; i < fws->fw_count; i++) {
3033 data = fws->fw_sect[i].fws_data;
3034 dlen = fws->fw_sect[i].fws_len;
3035 offset = fws->fw_sect[i].fws_devoff;
3036 if (dlen > sc->sc_fwdmasegsz) {
3037 err = EFBIG;
3038 } else
3039 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3040 if (err) {
3041 aprint_error_dev(sc->sc_dev,
3042 "could not load firmware chunk %u of %u\n",
3043 i, fws->fw_count);
3044 return err;
3045 }
3046 }
3047
3048 IWM_WRITE(sc, IWM_CSR_RESET, 0);
3049
3050 return 0;
3051 }
3052
3053 static int
3054 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3055 int cpu, int *first_ucode_section)
3056 {
3057 int shift_param;
3058 int i, err = 0, sec_num = 0x1;
3059 uint32_t val, last_read_idx = 0;
3060 void *data;
3061 uint32_t dlen;
3062 uint32_t offset;
3063
3064 if (cpu == 1) {
3065 shift_param = 0;
3066 *first_ucode_section = 0;
3067 } else {
3068 shift_param = 16;
3069 (*first_ucode_section)++;
3070 }
3071
3072 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3073 last_read_idx = i;
3074 data = fws->fw_sect[i].fws_data;
3075 dlen = fws->fw_sect[i].fws_len;
3076 offset = fws->fw_sect[i].fws_devoff;
3077
3078 /*
3079 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3080 * CPU1 to CPU2.
3081 * PAGING_SEPARATOR_SECTION delimiter - separate between
3082 * CPU2 non paged to CPU2 paging sec.
3083 */
3084 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3085 offset == IWM_PAGING_SEPARATOR_SECTION)
3086 break;
3087
3088 if (dlen > sc->sc_fwdmasegsz) {
3089 err = EFBIG;
3090 } else
3091 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3092 if (err) {
3093 aprint_error_dev(sc->sc_dev,
3094 "could not load firmware chunk %d (error %d)\n",
3095 i, err);
3096 return err;
3097 }
3098
3099 /* Notify the ucode of the loaded section number and status */
3100 if (iwm_nic_lock(sc)) {
3101 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3102 val = val | (sec_num << shift_param);
3103 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3104 sec_num = (sec_num << 1) | 0x1;
3105 iwm_nic_unlock(sc);
3106
3107 /*
3108 * The firmware won't load correctly without this delay.
3109 */
3110 DELAY(8000);
3111 }
3112 }
3113
3114 *first_ucode_section = last_read_idx;
3115
3116 if (iwm_nic_lock(sc)) {
3117 if (cpu == 1)
3118 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3119 else
3120 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3121 iwm_nic_unlock(sc);
3122 }
3123
3124 return 0;
3125 }
3126
3127 static int
3128 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3129 {
3130 struct iwm_fw_sects *fws;
3131 int err = 0;
3132 int first_ucode_section;
3133
3134 fws = &sc->sc_fw.fw_sects[ucode_type];
3135
3136 /* configure the ucode to be ready to get the secured image */
3137 /* release CPU reset */
3138 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
3139
3140 /* load to FW the binary Secured sections of CPU1 */
3141 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3142 if (err)
3143 return err;
3144
3145 /* load to FW the binary sections of CPU2 */
3146 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3147 }
3148
3149 static int
3150 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3151 {
3152 int err, w;
3153
3154 sc->sc_uc.uc_intr = 0;
3155
3156 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3157 err = iwm_load_firmware_8000(sc, ucode_type);
3158 else
3159 err = iwm_load_firmware_7000(sc, ucode_type);
3160
3161 if (err)
3162 return err;
3163
3164 /* wait for the firmware to load */
3165 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3166 err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3167 if (err || !sc->sc_uc.uc_ok)
3168 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
3169
3170 return err;
3171 }
3172
3173 static int
3174 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3175 {
3176 int err;
3177
3178 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3179
3180 err = iwm_nic_init(sc);
3181 if (err) {
3182 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3183 return err;
3184 }
3185
3186 /* make sure rfkill handshake bits are cleared */
3187 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3188 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3189 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3190
3191 /* clear (again), then enable host interrupts */
3192 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3193 iwm_enable_interrupts(sc);
3194
3195 /* really make sure rfkill handshake bits are cleared */
3196 /* maybe we should write a few times more? just to make sure */
3197 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3198 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3199
3200 return iwm_load_firmware(sc, ucode_type);
3201 }
3202
3203 static int
3204 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3205 {
3206 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3207 .valid = htole32(valid_tx_ant),
3208 };
3209
3210 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3211 sizeof(tx_ant_cmd), &tx_ant_cmd);
3212 }
3213
3214 static int
3215 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3216 {
3217 struct iwm_phy_cfg_cmd phy_cfg_cmd;
3218 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3219
3220 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3221 phy_cfg_cmd.calib_control.event_trigger =
3222 sc->sc_default_calib[ucode_type].event_trigger;
3223 phy_cfg_cmd.calib_control.flow_trigger =
3224 sc->sc_default_calib[ucode_type].flow_trigger;
3225
3226 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3227 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3228 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3229 }
3230
3231 static int
3232 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3233 {
3234 enum iwm_ucode_type old_type = sc->sc_uc_current;
3235 int err;
3236
3237 err = iwm_read_firmware(sc);
3238 if (err)
3239 return err;
3240
3241 sc->sc_uc_current = ucode_type;
3242 err = iwm_start_fw(sc, ucode_type);
3243 if (err) {
3244 sc->sc_uc_current = old_type;
3245 return err;
3246 }
3247
3248 return iwm_post_alive(sc);
3249 }
3250
3251 static int
3252 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3253 {
3254 int err;
3255
3256 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3257 aprint_error_dev(sc->sc_dev,
3258 "radio is disabled by hardware switch\n");
3259 return EPERM;
3260 }
3261
3262 sc->sc_init_complete = 0;
3263 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3264 if (err) {
3265 aprint_error_dev(sc->sc_dev, "failed to load init firmware\n");
3266 return err;
3267 }
3268
3269 if (justnvm) {
3270 err = iwm_nvm_init(sc);
3271 if (err) {
3272 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3273 return err;
3274 }
3275
3276 memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3277 ETHER_ADDR_LEN);
3278 return 0;
3279 }
3280
3281 err = iwm_send_bt_init_conf(sc);
3282 if (err)
3283 return err;
3284
3285 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3286 if (err)
3287 return err;
3288
3289 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3290 if (err)
3291 return err;
3292
3293 /*
3294 * Send phy configurations command to init uCode
3295 * to start the 16.0 uCode init image internal calibrations.
3296 */
3297 err = iwm_send_phy_cfg_cmd(sc);
3298 if (err)
3299 return err;
3300
3301 /*
3302 * Nothing to do but wait for the init complete notification
3303 * from the firmware
3304 */
3305 while (!sc->sc_init_complete) {
3306 err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3307 if (err)
3308 break;
3309 }
3310
3311 return err;
3312 }
3313
3314 static int
3315 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3316 {
3317 struct iwm_rx_ring *ring = &sc->rxq;
3318 struct iwm_rx_data *data = &ring->data[idx];
3319 struct mbuf *m;
3320 int err;
3321 int fatal = 0;
3322
3323 m = m_gethdr(M_DONTWAIT, MT_DATA);
3324 if (m == NULL)
3325 return ENOBUFS;
3326
3327 if (size <= MCLBYTES) {
3328 MCLGET(m, M_DONTWAIT);
3329 } else {
3330 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3331 }
3332 if ((m->m_flags & M_EXT) == 0) {
3333 m_freem(m);
3334 return ENOBUFS;
3335 }
3336
3337 if (data->m != NULL) {
3338 bus_dmamap_unload(sc->sc_dmat, data->map);
3339 fatal = 1;
3340 }
3341
3342 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3343 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3344 BUS_DMA_READ|BUS_DMA_NOWAIT);
3345 if (err) {
3346 /* XXX */
3347 if (fatal)
3348 panic("iwm: could not load RX mbuf");
3349 m_freem(m);
3350 return err;
3351 }
3352 data->m = m;
3353 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3354
3355 /* Update RX descriptor. */
3356 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3357 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3358 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3359
3360 return 0;
3361 }
3362
3363 #define IWM_RSSI_OFFSET 50
3364 static int
3365 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3366 {
3367 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3368 uint32_t agc_a, agc_b;
3369 uint32_t val;
3370
3371 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3372 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3373 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3374
3375 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3376 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3377 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3378
3379 /*
3380 * dBm = rssi dB - agc dB - constant.
3381 * Higher AGC (higher radio gain) means lower signal.
3382 */
3383 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3384 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3385 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3386
3387 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3388 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3389
3390 return max_rssi_dbm;
3391 }
3392
3393 /*
3394 * RSSI values are reported by the FW as positive values - need to negate
3395 * to obtain their dBM. Account for missing antennas by replacing 0
3396 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3397 */
3398 static int
3399 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3400 {
3401 int energy_a, energy_b, energy_c, max_energy;
3402 uint32_t val;
3403
3404 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3405 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3406 IWM_RX_INFO_ENERGY_ANT_A_POS;
3407 energy_a = energy_a ? -energy_a : -256;
3408 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3409 IWM_RX_INFO_ENERGY_ANT_B_POS;
3410 energy_b = energy_b ? -energy_b : -256;
3411 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3412 IWM_RX_INFO_ENERGY_ANT_C_POS;
3413 energy_c = energy_c ? -energy_c : -256;
3414 max_energy = MAX(energy_a, energy_b);
3415 max_energy = MAX(max_energy, energy_c);
3416
3417 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3418 energy_a, energy_b, energy_c, max_energy));
3419
3420 return max_energy;
3421 }
3422
3423 static void
3424 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3425 struct iwm_rx_data *data)
3426 {
3427 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3428
3429 DPRINTFN(20, ("received PHY stats\n"));
3430 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3431 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3432
3433 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3434 }
3435
3436 /*
3437 * Retrieve the average noise (in dBm) among receivers.
3438 */
3439 static int
3440 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3441 {
3442 int i, total, nbant, noise;
3443
3444 total = nbant = noise = 0;
3445 for (i = 0; i < 3; i++) {
3446 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3447 if (noise) {
3448 total += noise;
3449 nbant++;
3450 }
3451 }
3452
3453 /* There should be at least one antenna but check anyway. */
3454 return (nbant == 0) ? -127 : (total / nbant) - 107;
3455 }
3456
3457 static void
3458 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3459 struct iwm_rx_data *data)
3460 {
3461 struct ieee80211com *ic = &sc->sc_ic;
3462 struct ieee80211_frame *wh;
3463 struct ieee80211_node *ni;
3464 struct ieee80211_channel *c = NULL;
3465 struct mbuf *m;
3466 struct iwm_rx_phy_info *phy_info;
3467 struct iwm_rx_mpdu_res_start *rx_res;
3468 int device_timestamp;
3469 uint32_t len;
3470 uint32_t rx_pkt_status;
3471 int rssi;
3472 int s;
3473
3474 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3475 BUS_DMASYNC_POSTREAD);
3476
3477 phy_info = &sc->sc_last_phy_info;
3478 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3479 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3480 len = le16toh(rx_res->byte_count);
3481 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3482 sizeof(*rx_res) + len));
3483
3484 m = data->m;
3485 m->m_data = pkt->data + sizeof(*rx_res);
3486 m->m_pkthdr.len = m->m_len = len;
3487
3488 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3489 DPRINTF(("dsp size out of range [0,20]: %d\n",
3490 phy_info->cfg_phy_cnt));
3491 return;
3492 }
3493
3494 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3495 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3496 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3497 return; /* drop */
3498 }
3499
3500 device_timestamp = le32toh(phy_info->system_timestamp);
3501
3502 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3503 rssi = iwm_get_signal_strength(sc, phy_info);
3504 } else {
3505 rssi = iwm_calc_rssi(sc, phy_info);
3506 }
3507 rssi = -rssi;
3508
3509 if (ic->ic_state == IEEE80211_S_SCAN)
3510 iwm_fix_channel(sc, m);
3511
3512 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3513 return;
3514
3515 m_set_rcvif(m, IC2IFP(ic));
3516
3517 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3518 c = &ic->ic_channels[le32toh(phy_info->channel)];
3519
3520 s = splnet();
3521
3522 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3523 if (c)
3524 ni->ni_chan = c;
3525
3526 if (__predict_false(sc->sc_drvbpf != NULL)) {
3527 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3528
3529 tap->wr_flags = 0;
3530 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3531 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3532 tap->wr_chan_freq =
3533 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3534 tap->wr_chan_flags =
3535 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3536 tap->wr_dbm_antsignal = (int8_t)rssi;
3537 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3538 tap->wr_tsft = phy_info->system_timestamp;
3539 if (phy_info->phy_flags &
3540 htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3541 uint8_t mcs = (phy_info->rate_n_flags &
3542 htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
3543 tap->wr_rate = (0x80 | mcs);
3544 } else {
3545 uint8_t rate = (phy_info->rate_n_flags &
3546 htole32(IWM_RATE_LEGACY_RATE_MSK));
3547 switch (rate) {
3548 /* CCK rates. */
3549 case 10: tap->wr_rate = 2; break;
3550 case 20: tap->wr_rate = 4; break;
3551 case 55: tap->wr_rate = 11; break;
3552 case 110: tap->wr_rate = 22; break;
3553 /* OFDM rates. */
3554 case 0xd: tap->wr_rate = 12; break;
3555 case 0xf: tap->wr_rate = 18; break;
3556 case 0x5: tap->wr_rate = 24; break;
3557 case 0x7: tap->wr_rate = 36; break;
3558 case 0x9: tap->wr_rate = 48; break;
3559 case 0xb: tap->wr_rate = 72; break;
3560 case 0x1: tap->wr_rate = 96; break;
3561 case 0x3: tap->wr_rate = 108; break;
3562 /* Unknown rate: should not happen. */
3563 default: tap->wr_rate = 0;
3564 }
3565 }
3566
3567 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3568 }
3569 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3570 ieee80211_free_node(ni);
3571
3572 splx(s);
3573 }
3574
3575 static void
3576 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3577 struct iwm_node *in)
3578 {
3579 struct ieee80211com *ic = &sc->sc_ic;
3580 struct ifnet *ifp = IC2IFP(ic);
3581 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3582 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3583 int failack = tx_resp->failure_frame;
3584
3585 KASSERT(tx_resp->frame_count == 1);
3586
3587 /* Update rate control statistics. */
3588 in->in_amn.amn_txcnt++;
3589 if (failack > 0) {
3590 in->in_amn.amn_retrycnt++;
3591 }
3592
3593 if (status != IWM_TX_STATUS_SUCCESS &&
3594 status != IWM_TX_STATUS_DIRECT_DONE)
3595 ifp->if_oerrors++;
3596 else
3597 ifp->if_opackets++;
3598 }
3599
3600 static void
3601 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3602 struct iwm_rx_data *data)
3603 {
3604 struct ieee80211com *ic = &sc->sc_ic;
3605 struct ifnet *ifp = IC2IFP(ic);
3606 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3607 int idx = cmd_hdr->idx;
3608 int qid = cmd_hdr->qid;
3609 struct iwm_tx_ring *ring = &sc->txq[qid];
3610 struct iwm_tx_data *txd = &ring->data[idx];
3611 struct iwm_node *in = txd->in;
3612
3613 if (txd->done) {
3614 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3615 DEVNAME(sc)));
3616 return;
3617 }
3618
3619 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3620 BUS_DMASYNC_POSTREAD);
3621
3622 sc->sc_tx_timer = 0;
3623
3624 iwm_rx_tx_cmd_single(sc, pkt, in);
3625
3626 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3627 BUS_DMASYNC_POSTWRITE);
3628 bus_dmamap_unload(sc->sc_dmat, txd->map);
3629 m_freem(txd->m);
3630
3631 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3632 KASSERT(txd->done == 0);
3633 txd->done = 1;
3634 KASSERT(txd->in);
3635
3636 txd->m = NULL;
3637 txd->in = NULL;
3638 ieee80211_free_node(&in->in_ni);
3639
3640 if (--ring->queued < IWM_TX_RING_LOMARK) {
3641 sc->qfullmsk &= ~(1 << ring->qid);
3642 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3643 ifp->if_flags &= ~IFF_OACTIVE;
3644 if_start_lock(ifp);
3645 }
3646 }
3647 }
3648
3649 static int
3650 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3651 {
3652 struct iwm_binding_cmd cmd;
3653 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3654 int i, err;
3655 uint32_t status;
3656
3657 memset(&cmd, 0, sizeof(cmd));
3658
3659 cmd.id_and_color
3660 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3661 cmd.action = htole32(action);
3662 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3663
3664 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3665 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3666 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3667
3668 status = 0;
3669 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3670 sizeof(cmd), &cmd, &status);
3671 if (err == 0 && status != 0)
3672 err = EIO;
3673
3674 return err;
3675 }
3676
3677 static void
3678 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3679 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3680 {
3681 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3682
3683 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3684 ctxt->color));
3685 cmd->action = htole32(action);
3686 cmd->apply_time = htole32(apply_time);
3687 }
3688
3689 static void
3690 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3691 struct ieee80211_channel *chan, uint8_t chains_static,
3692 uint8_t chains_dynamic)
3693 {
3694 struct ieee80211com *ic = &sc->sc_ic;
3695 uint8_t active_cnt, idle_cnt;
3696
3697 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3698 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3699
3700 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3701 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3702 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3703
3704 /* Set rx the chains */
3705 idle_cnt = chains_static;
3706 active_cnt = chains_dynamic;
3707
3708 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3709 IWM_PHY_RX_CHAIN_VALID_POS);
3710 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3711 cmd->rxchain_info |= htole32(active_cnt <<
3712 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3713
3714 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3715 }
3716
3717 static int
3718 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3719 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3720 uint32_t apply_time)
3721 {
3722 struct iwm_phy_context_cmd cmd;
3723
3724 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3725
3726 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3727 chains_static, chains_dynamic);
3728
3729 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3730 sizeof(struct iwm_phy_context_cmd), &cmd);
3731 }
3732
3733 static int
3734 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3735 {
3736 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3737 struct iwm_tfd *desc;
3738 struct iwm_tx_data *txdata;
3739 struct iwm_device_cmd *cmd;
3740 struct mbuf *m;
3741 bus_addr_t paddr;
3742 uint32_t addr_lo;
3743 int err = 0, i, paylen, off, s;
3744 int code;
3745 int async, wantresp;
3746 int group_id;
3747 size_t hdrlen, datasz;
3748 uint8_t *data;
3749
3750 code = hcmd->id;
3751 async = hcmd->flags & IWM_CMD_ASYNC;
3752 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3753
3754 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3755 paylen += hcmd->len[i];
3756 }
3757
3758 /* if the command wants an answer, busy sc_cmd_resp */
3759 if (wantresp) {
3760 KASSERT(!async);
3761 while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3762 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3763 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3764 }
3765
3766 /*
3767 * Is the hardware still available? (after e.g. above wait).
3768 */
3769 s = splnet();
3770 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3771 err = ENXIO;
3772 goto out;
3773 }
3774
3775 desc = &ring->desc[ring->cur];
3776 txdata = &ring->data[ring->cur];
3777
3778 group_id = iwm_cmd_groupid(code);
3779 if (group_id != 0) {
3780 hdrlen = sizeof(cmd->hdr_wide);
3781 datasz = sizeof(cmd->data_wide);
3782 } else {
3783 hdrlen = sizeof(cmd->hdr);
3784 datasz = sizeof(cmd->data);
3785 }
3786
3787 if (paylen > datasz) {
3788 /* Command is too large to fit in pre-allocated space. */
3789 size_t totlen = hdrlen + paylen;
3790 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3791 aprint_error_dev(sc->sc_dev,
3792 "firmware command too long (%zd bytes)\n", totlen);
3793 err = EINVAL;
3794 goto out;
3795 }
3796 m = m_gethdr(M_DONTWAIT, MT_DATA);
3797 if (m == NULL) {
3798 err = ENOMEM;
3799 goto out;
3800 }
3801 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3802 if (!(m->m_flags & M_EXT)) {
3803 aprint_error_dev(sc->sc_dev,
3804 "could not get fw cmd mbuf (%zd bytes)\n", totlen);
3805 m_freem(m);
3806 err = ENOMEM;
3807 goto out;
3808 }
3809 cmd = mtod(m, struct iwm_device_cmd *);
3810 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3811 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3812 if (err) {
3813 aprint_error_dev(sc->sc_dev,
3814 "could not load fw cmd mbuf (%zd bytes)\n", totlen);
3815 m_freem(m);
3816 goto out;
3817 }
3818 txdata->m = m;
3819 paddr = txdata->map->dm_segs[0].ds_addr;
3820 } else {
3821 cmd = &ring->cmd[ring->cur];
3822 paddr = txdata->cmd_paddr;
3823 }
3824
3825 if (group_id != 0) {
3826 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3827 cmd->hdr_wide.group_id = group_id;
3828 cmd->hdr_wide.qid = ring->qid;
3829 cmd->hdr_wide.idx = ring->cur;
3830 cmd->hdr_wide.length = htole16(paylen);
3831 cmd->hdr_wide.version = iwm_cmd_version(code);
3832 data = cmd->data_wide;
3833 } else {
3834 cmd->hdr.code = code;
3835 cmd->hdr.flags = 0;
3836 cmd->hdr.qid = ring->qid;
3837 cmd->hdr.idx = ring->cur;
3838 data = cmd->data;
3839 }
3840
3841 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3842 if (hcmd->len[i] == 0)
3843 continue;
3844 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3845 off += hcmd->len[i];
3846 }
3847 KASSERT(off == paylen);
3848
3849 /* lo field is not aligned */
3850 addr_lo = htole32((uint32_t)paddr);
3851 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3852 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3853 | ((hdrlen + paylen) << 4));
3854 desc->num_tbs = 1;
3855
3856 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3857 code, hdrlen + paylen, async ? " (async)" : ""));
3858
3859 if (paylen > datasz) {
3860 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3861 hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3862 } else {
3863 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3864 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3865 hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3866 }
3867 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3868 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3869 sizeof(*desc), BUS_DMASYNC_PREWRITE);
3870
3871 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3872 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3873 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3874 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3875 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3876 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3877 aprint_error_dev(sc->sc_dev, "acquiring device failed\n");
3878 err = EBUSY;
3879 goto out;
3880 }
3881
3882 #if 0
3883 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3884 #endif
3885 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3886 code, ring->qid, ring->cur));
3887
3888 /* Kick command ring. */
3889 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3890 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3891
3892 if (!async) {
3893 int generation = sc->sc_generation;
3894 err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
3895 if (err == 0) {
3896 /* if hardware is no longer up, return error */
3897 if (generation != sc->sc_generation) {
3898 err = ENXIO;
3899 } else {
3900 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3901 }
3902 }
3903 }
3904 out:
3905 if (wantresp && err) {
3906 iwm_free_resp(sc, hcmd);
3907 }
3908 splx(s);
3909
3910 return err;
3911 }
3912
3913 static int
3914 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
3915 uint16_t len, const void *data)
3916 {
3917 struct iwm_host_cmd cmd = {
3918 .id = id,
3919 .len = { len, },
3920 .data = { data, },
3921 .flags = flags,
3922 };
3923
3924 return iwm_send_cmd(sc, &cmd);
3925 }
3926
3927 static int
3928 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
3929 uint32_t *status)
3930 {
3931 struct iwm_rx_packet *pkt;
3932 struct iwm_cmd_response *resp;
3933 int err, resp_len;
3934
3935 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3936 cmd->flags |= IWM_CMD_WANT_SKB;
3937
3938 err = iwm_send_cmd(sc, cmd);
3939 if (err)
3940 return err;
3941 pkt = cmd->resp_pkt;
3942
3943 /* Can happen if RFKILL is asserted */
3944 if (!pkt) {
3945 err = 0;
3946 goto out_free_resp;
3947 }
3948
3949 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3950 err = EIO;
3951 goto out_free_resp;
3952 }
3953
3954 resp_len = iwm_rx_packet_payload_len(pkt);
3955 if (resp_len != sizeof(*resp)) {
3956 err = EIO;
3957 goto out_free_resp;
3958 }
3959
3960 resp = (void *)pkt->data;
3961 *status = le32toh(resp->status);
3962 out_free_resp:
3963 iwm_free_resp(sc, cmd);
3964 return err;
3965 }
3966
3967 static int
3968 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
3969 const void *data, uint32_t *status)
3970 {
3971 struct iwm_host_cmd cmd = {
3972 .id = id,
3973 .len = { len, },
3974 .data = { data, },
3975 };
3976
3977 return iwm_send_cmd_status(sc, &cmd, status);
3978 }
3979
3980 static void
3981 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3982 {
3983 KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
3984 KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
3985 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
3986 wakeup(&sc->sc_wantresp);
3987 }
3988
3989 static void
3990 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
3991 {
3992 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3993 struct iwm_tx_data *data;
3994
3995 if (qid != IWM_CMD_QUEUE) {
3996 return; /* Not a command ack. */
3997 }
3998
3999 data = &ring->data[idx];
4000
4001 if (data->m != NULL) {
4002 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4003 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4004 bus_dmamap_unload(sc->sc_dmat, data->map);
4005 m_freem(data->m);
4006 data->m = NULL;
4007 }
4008 wakeup(&ring->desc[idx]);
4009 }
4010
4011 #if 0
4012 /*
4013 * necessary only for block ack mode
4014 */
4015 void
4016 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4017 uint16_t len)
4018 {
4019 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4020 uint16_t w_val;
4021
4022 scd_bc_tbl = sc->sched_dma.vaddr;
4023
4024 len += 8; /* magic numbers came naturally from paris */
4025 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4026 len = roundup(len, 4) / 4;
4027
4028 w_val = htole16(sta_id << 12 | len);
4029
4030 /* Update TX scheduler. */
4031 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4032 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4033 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4034 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4035
4036 /* I really wonder what this is ?!? */
4037 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4038 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4039 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4040 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4041 (char *)(void *)sc->sched_dma.vaddr,
4042 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4043 }
4044 }
4045 #endif
4046
4047 /*
4048 * Fill in various bit for management frames, and leave them
4049 * unfilled for data frames (firmware takes care of that).
4050 * Return the selected TX rate.
4051 */
4052 static const struct iwm_rate *
4053 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4054 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4055 {
4056 struct ieee80211com *ic = &sc->sc_ic;
4057 struct ieee80211_node *ni = &in->in_ni;
4058 const struct iwm_rate *rinfo;
4059 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4060 int ridx, rate_flags, i;
4061 int nrates = ni->ni_rates.rs_nrates;
4062
4063 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4064 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4065
4066 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4067 type != IEEE80211_FC0_TYPE_DATA) {
4068 /* for non-data, use the lowest supported rate */
4069 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4070 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4071 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4072 #ifndef IEEE80211_NO_HT
4073 } else if (ic->ic_fixed_mcs != -1) {
4074 ridx = sc->sc_fixed_ridx;
4075 #endif
4076 } else if (ic->ic_fixed_rate != -1) {
4077 ridx = sc->sc_fixed_ridx;
4078 } else {
4079 /* for data frames, use RS table */
4080 tx->initial_rate_index = 0;
4081 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4082 DPRINTFN(12, ("start with txrate %d\n",
4083 tx->initial_rate_index));
4084 #ifndef IEEE80211_NO_HT
4085 if (ni->ni_flags & IEEE80211_NODE_HT) {
4086 ridx = iwm_mcs2ridx[ni->ni_txmcs];
4087 return &iwm_rates[ridx];
4088 }
4089 #endif
4090 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4091 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4092 for (i = 0; i < nrates; i++) {
4093 if (iwm_rates[i].rate == (ni->ni_txrate &
4094 IEEE80211_RATE_VAL)) {
4095 ridx = i;
4096 break;
4097 }
4098 }
4099 return &iwm_rates[ridx];
4100 }
4101
4102 rinfo = &iwm_rates[ridx];
4103 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
4104 if (IWM_RIDX_IS_CCK(ridx))
4105 rate_flags |= IWM_RATE_MCS_CCK_MSK;
4106 #ifndef IEEE80211_NO_HT
4107 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4108 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4109 rate_flags |= IWM_RATE_MCS_HT_MSK;
4110 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4111 } else
4112 #endif
4113 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4114
4115 return rinfo;
4116 }
4117
4118 #define TB0_SIZE 16
4119 static int
4120 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4121 {
4122 struct ieee80211com *ic = &sc->sc_ic;
4123 struct iwm_node *in = (struct iwm_node *)ni;
4124 struct iwm_tx_ring *ring;
4125 struct iwm_tx_data *data;
4126 struct iwm_tfd *desc;
4127 struct iwm_device_cmd *cmd;
4128 struct iwm_tx_cmd *tx;
4129 struct ieee80211_frame *wh;
4130 struct ieee80211_key *k = NULL;
4131 struct mbuf *m1;
4132 const struct iwm_rate *rinfo;
4133 uint32_t flags;
4134 u_int hdrlen;
4135 bus_dma_segment_t *seg;
4136 uint8_t tid, type;
4137 int i, totlen, err, pad;
4138
4139 wh = mtod(m, struct ieee80211_frame *);
4140 hdrlen = ieee80211_anyhdrsize(wh);
4141 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4142
4143 tid = 0;
4144
4145 ring = &sc->txq[ac];
4146 desc = &ring->desc[ring->cur];
4147 memset(desc, 0, sizeof(*desc));
4148 data = &ring->data[ring->cur];
4149
4150 cmd = &ring->cmd[ring->cur];
4151 cmd->hdr.code = IWM_TX_CMD;
4152 cmd->hdr.flags = 0;
4153 cmd->hdr.qid = ring->qid;
4154 cmd->hdr.idx = ring->cur;
4155
4156 tx = (void *)cmd->data;
4157 memset(tx, 0, sizeof(*tx));
4158
4159 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4160
4161 if (__predict_false(sc->sc_drvbpf != NULL)) {
4162 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4163
4164 tap->wt_flags = 0;
4165 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4166 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4167 #ifndef IEEE80211_NO_HT
4168 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4169 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4170 type == IEEE80211_FC0_TYPE_DATA &&
4171 rinfo->plcp == IWM_RATE_INVM_PLCP) {
4172 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4173 } else
4174 #endif
4175 tap->wt_rate = rinfo->rate;
4176 tap->wt_hwqueue = ac;
4177 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4178 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4179
4180 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4181 }
4182
4183 /* Encrypt the frame if need be. */
4184 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4185 k = ieee80211_crypto_encap(ic, ni, m);
4186 if (k == NULL) {
4187 m_freem(m);
4188 return ENOBUFS;
4189 }
4190 /* Packet header may have moved, reset our local pointer. */
4191 wh = mtod(m, struct ieee80211_frame *);
4192 }
4193 totlen = m->m_pkthdr.len;
4194
4195 flags = 0;
4196 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4197 flags |= IWM_TX_CMD_FLG_ACK;
4198 }
4199
4200 if (type == IEEE80211_FC0_TYPE_DATA &&
4201 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4202 (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4203 (ic->ic_flags & IEEE80211_F_USEPROT)))
4204 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4205
4206 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4207 type != IEEE80211_FC0_TYPE_DATA)
4208 tx->sta_id = IWM_AUX_STA_ID;
4209 else
4210 tx->sta_id = IWM_STATION_ID;
4211
4212 if (type == IEEE80211_FC0_TYPE_MGT) {
4213 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4214
4215 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4216 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4217 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4218 else
4219 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4220 } else {
4221 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4222 }
4223
4224 if (hdrlen & 3) {
4225 /* First segment length must be a multiple of 4. */
4226 flags |= IWM_TX_CMD_FLG_MH_PAD;
4227 pad = 4 - (hdrlen & 3);
4228 } else
4229 pad = 0;
4230
4231 tx->driver_txop = 0;
4232 tx->next_frame_len = 0;
4233
4234 tx->len = htole16(totlen);
4235 tx->tid_tspec = tid;
4236 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4237
4238 /* Set physical address of "scratch area". */
4239 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4240 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4241
4242 /* Copy 802.11 header in TX command. */
4243 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4244
4245 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4246
4247 tx->sec_ctl = 0;
4248 tx->tx_flags |= htole32(flags);
4249
4250 /* Trim 802.11 header. */
4251 m_adj(m, hdrlen);
4252
4253 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4254 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4255 if (err) {
4256 if (err != EFBIG) {
4257 aprint_error_dev(sc->sc_dev,
4258 "can't map mbuf (error %d)\n", err);
4259 m_freem(m);
4260 return err;
4261 }
4262 /* Too many DMA segments, linearize mbuf. */
4263 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4264 if (m1 == NULL) {
4265 m_freem(m);
4266 return ENOBUFS;
4267 }
4268 if (m->m_pkthdr.len > MHLEN) {
4269 MCLGET(m1, M_DONTWAIT);
4270 if (!(m1->m_flags & M_EXT)) {
4271 m_freem(m);
4272 m_freem(m1);
4273 return ENOBUFS;
4274 }
4275 }
4276 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4277 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4278 m_freem(m);
4279 m = m1;
4280
4281 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4282 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4283 if (err) {
4284 aprint_error_dev(sc->sc_dev,
4285 "can't map mbuf (error %d)\n", err);
4286 m_freem(m);
4287 return err;
4288 }
4289 }
4290 data->m = m;
4291 data->in = in;
4292 data->done = 0;
4293
4294 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4295 KASSERT(data->in != NULL);
4296
4297 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4298 ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4299
4300 /* Fill TX descriptor. */
4301 desc->num_tbs = 2 + data->map->dm_nsegs;
4302
4303 desc->tbs[0].lo = htole32(data->cmd_paddr);
4304 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4305 (TB0_SIZE << 4);
4306 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4307 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4308 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4309 + hdrlen + pad - TB0_SIZE) << 4);
4310
4311 /* Other DMA segments are for data payload. */
4312 seg = data->map->dm_segs;
4313 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4314 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4315 desc->tbs[i+2].hi_n_len =
4316 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4317 | ((seg->ds_len) << 4);
4318 }
4319
4320 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4321 BUS_DMASYNC_PREWRITE);
4322 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4323 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4324 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4325 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4326 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4327 sizeof (*desc), BUS_DMASYNC_PREWRITE);
4328
4329 #if 0
4330 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4331 le16toh(tx->len));
4332 #endif
4333
4334 /* Kick TX ring. */
4335 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4336 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4337
4338 /* Mark TX ring as full if we reach a certain threshold. */
4339 if (++ring->queued > IWM_TX_RING_HIMARK) {
4340 sc->qfullmsk |= 1 << ring->qid;
4341 }
4342
4343 return 0;
4344 }
4345
4346 #if 0
4347 /* not necessary? */
4348 static int
4349 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4350 {
4351 struct iwm_tx_path_flush_cmd flush_cmd = {
4352 .queues_ctl = htole32(tfd_msk),
4353 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4354 };
4355 int err;
4356
4357 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4358 sizeof(flush_cmd), &flush_cmd);
4359 if (err)
4360 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4361 err);
4362 return err;
4363 }
4364 #endif
4365
4366 static void
4367 iwm_led_enable(struct iwm_softc *sc)
4368 {
4369 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4370 }
4371
4372 static void
4373 iwm_led_disable(struct iwm_softc *sc)
4374 {
4375 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4376 }
4377
4378 static int
4379 iwm_led_is_enabled(struct iwm_softc *sc)
4380 {
4381 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4382 }
4383
4384 static void
4385 iwm_led_blink_timeout(void *arg)
4386 {
4387 struct iwm_softc *sc = arg;
4388
4389 if (iwm_led_is_enabled(sc))
4390 iwm_led_disable(sc);
4391 else
4392 iwm_led_enable(sc);
4393
4394 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4395 }
4396
4397 static void
4398 iwm_led_blink_start(struct iwm_softc *sc)
4399 {
4400 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4401 }
4402
4403 static void
4404 iwm_led_blink_stop(struct iwm_softc *sc)
4405 {
4406 callout_stop(&sc->sc_led_blink_to);
4407 iwm_led_disable(sc);
4408 }
4409
4410 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4411
4412 static int
4413 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4414 struct iwm_beacon_filter_cmd *cmd)
4415 {
4416 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4417 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4418 }
4419
4420 static void
4421 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4422 struct iwm_beacon_filter_cmd *cmd)
4423 {
4424 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4425 }
4426
4427 static int
4428 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4429 {
4430 struct iwm_beacon_filter_cmd cmd = {
4431 IWM_BF_CMD_CONFIG_DEFAULTS,
4432 .bf_enable_beacon_filter = htole32(1),
4433 .ba_enable_beacon_abort = htole32(enable),
4434 };
4435
4436 if (!sc->sc_bf.bf_enabled)
4437 return 0;
4438
4439 sc->sc_bf.ba_enabled = enable;
4440 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4441 return iwm_beacon_filter_send_cmd(sc, &cmd);
4442 }
4443
4444 static void
4445 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4446 struct iwm_mac_power_cmd *cmd)
4447 {
4448 struct ieee80211_node *ni = &in->in_ni;
4449 int dtim_period, dtim_msec, keep_alive;
4450
4451 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4452 in->in_color));
4453 if (ni->ni_dtim_period)
4454 dtim_period = ni->ni_dtim_period;
4455 else
4456 dtim_period = 1;
4457
4458 /*
4459 * Regardless of power management state the driver must set
4460 * keep alive period. FW will use it for sending keep alive NDPs
4461 * immediately after association. Check that keep alive period
4462 * is at least 3 * DTIM.
4463 */
4464 dtim_msec = dtim_period * ni->ni_intval;
4465 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4466 keep_alive = roundup(keep_alive, 1000) / 1000;
4467 cmd->keep_alive_seconds = htole16(keep_alive);
4468
4469 #ifdef notyet
4470 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4471 cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4472 cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4473 #endif
4474 }
4475
4476 static int
4477 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4478 {
4479 int err;
4480 int ba_enable;
4481 struct iwm_mac_power_cmd cmd;
4482
4483 memset(&cmd, 0, sizeof(cmd));
4484
4485 iwm_power_build_cmd(sc, in, &cmd);
4486
4487 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4488 sizeof(cmd), &cmd);
4489 if (err)
4490 return err;
4491
4492 ba_enable = !!(cmd.flags &
4493 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4494 return iwm_update_beacon_abort(sc, in, ba_enable);
4495 }
4496
4497 static int
4498 iwm_power_update_device(struct iwm_softc *sc)
4499 {
4500 struct iwm_device_power_cmd cmd = {
4501 #ifdef notyet
4502 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4503 #else
4504 .flags = 0,
4505 #endif
4506 };
4507
4508 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4509 return 0;
4510
4511 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4512 DPRINTF(("Sending device power command with flags = 0x%X\n",
4513 cmd.flags));
4514
4515 return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4516 }
4517
4518 #ifdef notyet
4519 static int
4520 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4521 {
4522 struct iwm_beacon_filter_cmd cmd = {
4523 IWM_BF_CMD_CONFIG_DEFAULTS,
4524 .bf_enable_beacon_filter = htole32(1),
4525 };
4526 int err;
4527
4528 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4529 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4530
4531 if (err == 0)
4532 sc->sc_bf.bf_enabled = 1;
4533
4534 return err;
4535 }
4536 #endif
4537
4538 static int
4539 iwm_disable_beacon_filter(struct iwm_softc *sc)
4540 {
4541 struct iwm_beacon_filter_cmd cmd;
4542 int err;
4543
4544 memset(&cmd, 0, sizeof(cmd));
4545 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4546 return 0;
4547
4548 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4549 if (err == 0)
4550 sc->sc_bf.bf_enabled = 0;
4551
4552 return err;
4553 }
4554
4555 static int
4556 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4557 {
4558 struct iwm_add_sta_cmd_v7 add_sta_cmd;
4559 int err;
4560 uint32_t status;
4561
4562 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4563
4564 add_sta_cmd.sta_id = IWM_STATION_ID;
4565 add_sta_cmd.mac_id_n_color
4566 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4567 if (!update) {
4568 int ac;
4569 for (ac = 0; ac < WME_NUM_AC; ac++) {
4570 add_sta_cmd.tfd_queue_msk |=
4571 htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
4572 }
4573 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4574 }
4575 add_sta_cmd.add_modify = update ? 1 : 0;
4576 add_sta_cmd.station_flags_msk
4577 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4578 add_sta_cmd.tid_disable_tx = htole16(0xffff);
4579 if (update)
4580 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4581
4582 #ifndef IEEE80211_NO_HT
4583 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4584 add_sta_cmd.station_flags_msk
4585 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4586 IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4587
4588 add_sta_cmd.station_flags
4589 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4590 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4591 case IEEE80211_AMPDU_PARAM_SS_2:
4592 add_sta_cmd.station_flags
4593 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4594 break;
4595 case IEEE80211_AMPDU_PARAM_SS_4:
4596 add_sta_cmd.station_flags
4597 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4598 break;
4599 case IEEE80211_AMPDU_PARAM_SS_8:
4600 add_sta_cmd.station_flags
4601 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4602 break;
4603 case IEEE80211_AMPDU_PARAM_SS_16:
4604 add_sta_cmd.station_flags
4605 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4606 break;
4607 default:
4608 break;
4609 }
4610 }
4611 #endif
4612
4613 status = IWM_ADD_STA_SUCCESS;
4614 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4615 &add_sta_cmd, &status);
4616 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4617 err = EIO;
4618
4619 return err;
4620 }
4621
4622 static int
4623 iwm_add_aux_sta(struct iwm_softc *sc)
4624 {
4625 struct iwm_add_sta_cmd_v7 cmd;
4626 int err;
4627 uint32_t status;
4628
4629 err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4630 if (err)
4631 return err;
4632
4633 memset(&cmd, 0, sizeof(cmd));
4634 cmd.sta_id = IWM_AUX_STA_ID;
4635 cmd.mac_id_n_color =
4636 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4637 cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4638 cmd.tid_disable_tx = htole16(0xffff);
4639
4640 status = IWM_ADD_STA_SUCCESS;
4641 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4642 &status);
4643 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4644 err = EIO;
4645
4646 return err;
4647 }
4648
4649 #define IWM_PLCP_QUIET_THRESH 1
4650 #define IWM_ACTIVE_QUIET_TIME 10
4651 #define LONG_OUT_TIME_PERIOD 600
4652 #define SHORT_OUT_TIME_PERIOD 200
4653 #define SUSPEND_TIME_PERIOD 100
4654
4655 static uint16_t
4656 iwm_scan_rx_chain(struct iwm_softc *sc)
4657 {
4658 uint16_t rx_chain;
4659 uint8_t rx_ant;
4660
4661 rx_ant = iwm_fw_valid_rx_ant(sc);
4662 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4663 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4664 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4665 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4666 return htole16(rx_chain);
4667 }
4668
4669 static uint32_t
4670 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4671 {
4672 uint32_t tx_ant;
4673 int i, ind;
4674
4675 for (i = 0, ind = sc->sc_scan_last_antenna;
4676 i < IWM_RATE_MCS_ANT_NUM; i++) {
4677 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4678 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4679 sc->sc_scan_last_antenna = ind;
4680 break;
4681 }
4682 }
4683 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4684
4685 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4686 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4687 tx_ant);
4688 else
4689 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4690 }
4691
4692 #ifdef notyet
4693 /*
4694 * If req->n_ssids > 0, it means we should do an active scan.
4695 * In case of active scan w/o directed scan, we receive a zero-length SSID
4696 * just to notify that this scan is active and not passive.
4697 * In order to notify the FW of the number of SSIDs we wish to scan (including
4698 * the zero-length one), we need to set the corresponding bits in chan->type,
4699 * one for each SSID, and set the active bit (first). If the first SSID is
4700 * already included in the probe template, so we need to set only
4701 * req->n_ssids - 1 bits in addition to the first bit.
4702 */
4703 static uint16_t
4704 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4705 {
4706 if (flags & IEEE80211_CHAN_2GHZ)
4707 return 30 + 3 * (n_ssids + 1);
4708 return 20 + 2 * (n_ssids + 1);
4709 }
4710
4711 static uint16_t
4712 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4713 {
4714 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4715 }
4716 #endif
4717
4718 static uint8_t
4719 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4720 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4721 {
4722 struct ieee80211com *ic = &sc->sc_ic;
4723 struct ieee80211_channel *c;
4724 uint8_t nchan;
4725
4726 for (nchan = 0, c = &ic->ic_channels[1];
4727 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4728 nchan < sc->sc_capa_n_scan_channels;
4729 c++) {
4730 if (c->ic_flags == 0)
4731 continue;
4732
4733 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4734 chan->iter_count = htole16(1);
4735 chan->iter_interval = 0;
4736 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4737 #if 0 /* makes scanning while associated less useful */
4738 if (n_ssids != 0)
4739 chan->flags |= htole32(1 << 1); /* select SSID 0 */
4740 #endif
4741 chan++;
4742 nchan++;
4743 }
4744
4745 return nchan;
4746 }
4747
4748 static uint8_t
4749 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4750 struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4751 {
4752 struct ieee80211com *ic = &sc->sc_ic;
4753 struct ieee80211_channel *c;
4754 uint8_t nchan;
4755
4756 for (nchan = 0, c = &ic->ic_channels[1];
4757 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4758 nchan < sc->sc_capa_n_scan_channels;
4759 c++) {
4760 if (c->ic_flags == 0)
4761 continue;
4762 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4763 chan->iter_count = 1;
4764 chan->iter_interval = htole16(0);
4765 #if 0 /* makes scanning while associated less useful */
4766 if (n_ssids != 0)
4767 chan->flags = htole32(1 << 0); /* select SSID 0 */
4768 #endif
4769 chan++;
4770 nchan++;
4771 }
4772
4773 return nchan;
4774 }
4775
4776 static int
4777 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4778 {
4779 struct ieee80211com *ic = &sc->sc_ic;
4780 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4781 struct ieee80211_rateset *rs;
4782 size_t remain = sizeof(preq->buf);
4783 uint8_t *frm, *pos;
4784
4785 memset(preq, 0, sizeof(*preq));
4786
4787 if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4788 return ENOBUFS;
4789
4790 /*
4791 * Build a probe request frame. Most of the following code is a
4792 * copy & paste of what is done in net80211.
4793 */
4794 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4795 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4796 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4797 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4798 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4799 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4800 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4801 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4802
4803 frm = (uint8_t *)(wh + 1);
4804 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4805
4806 /* Tell the firmware where the MAC header is. */
4807 preq->mac_header.offset = 0;
4808 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4809 remain -= frm - (uint8_t *)wh;
4810
4811 /* Fill in 2GHz IEs and tell firmware where they are. */
4812 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4813 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4814 if (remain < 4 + rs->rs_nrates)
4815 return ENOBUFS;
4816 } else if (remain < 2 + rs->rs_nrates)
4817 return ENOBUFS;
4818 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4819 pos = frm;
4820 frm = ieee80211_add_rates(frm, rs);
4821 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4822 frm = ieee80211_add_xrates(frm, rs);
4823 preq->band_data[0].len = htole16(frm - pos);
4824 remain -= frm - pos;
4825
4826 if (isset(sc->sc_enabled_capa,
4827 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4828 if (remain < 3)
4829 return ENOBUFS;
4830 *frm++ = IEEE80211_ELEMID_DSPARMS;
4831 *frm++ = 1;
4832 *frm++ = 0;
4833 remain -= 3;
4834 }
4835
4836 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4837 /* Fill in 5GHz IEs. */
4838 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4839 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4840 if (remain < 4 + rs->rs_nrates)
4841 return ENOBUFS;
4842 } else if (remain < 2 + rs->rs_nrates)
4843 return ENOBUFS;
4844 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4845 pos = frm;
4846 frm = ieee80211_add_rates(frm, rs);
4847 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4848 frm = ieee80211_add_xrates(frm, rs);
4849 preq->band_data[1].len = htole16(frm - pos);
4850 remain -= frm - pos;
4851 }
4852
4853 #ifndef IEEE80211_NO_HT
4854 /* Send 11n IEs on both 2GHz and 5GHz bands. */
4855 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4856 pos = frm;
4857 if (ic->ic_flags & IEEE80211_F_HTON) {
4858 if (remain < 28)
4859 return ENOBUFS;
4860 frm = ieee80211_add_htcaps(frm, ic);
4861 /* XXX add WME info? */
4862 }
4863 #endif
4864
4865 preq->common_data.len = htole16(frm - pos);
4866
4867 return 0;
4868 }
4869
4870 static int
4871 iwm_lmac_scan(struct iwm_softc *sc)
4872 {
4873 struct ieee80211com *ic = &sc->sc_ic;
4874 struct iwm_host_cmd hcmd = {
4875 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4876 .len = { 0, },
4877 .data = { NULL, },
4878 .flags = 0,
4879 };
4880 struct iwm_scan_req_lmac *req;
4881 size_t req_len;
4882 int err;
4883
4884 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
4885
4886 req_len = sizeof(struct iwm_scan_req_lmac) +
4887 (sizeof(struct iwm_scan_channel_cfg_lmac) *
4888 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
4889 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4890 return ENOMEM;
4891 req = kmem_zalloc(req_len, KM_SLEEP);
4892 if (req == NULL)
4893 return ENOMEM;
4894
4895 hcmd.len[0] = (uint16_t)req_len;
4896 hcmd.data[0] = (void *)req;
4897
4898 /* These timings correspond to iwlwifi's UNASSOC scan. */
4899 req->active_dwell = 10;
4900 req->passive_dwell = 110;
4901 req->fragmented_dwell = 44;
4902 req->extended_dwell = 90;
4903 req->max_out_time = 0;
4904 req->suspend_time = 0;
4905
4906 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
4907 req->rx_chain_select = iwm_scan_rx_chain(sc);
4908 req->iter_num = htole32(1);
4909 req->delay = 0;
4910
4911 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
4912 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
4913 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
4914 if (ic->ic_des_esslen == 0)
4915 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
4916 else
4917 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
4918 if (isset(sc->sc_enabled_capa,
4919 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4920 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
4921
4922 req->flags = htole32(IWM_PHY_BAND_24);
4923 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
4924 req->flags |= htole32(IWM_PHY_BAND_5);
4925 req->filter_flags =
4926 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
4927
4928 /* Tx flags 2 GHz. */
4929 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4930 IWM_TX_CMD_FLG_BT_DIS);
4931 req->tx_cmd[0].rate_n_flags =
4932 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
4933 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
4934
4935 /* Tx flags 5 GHz. */
4936 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4937 IWM_TX_CMD_FLG_BT_DIS);
4938 req->tx_cmd[1].rate_n_flags =
4939 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
4940 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
4941
4942 /* Check if we're doing an active directed scan. */
4943 if (ic->ic_des_esslen != 0) {
4944 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4945 req->direct_scan[0].len = ic->ic_des_esslen;
4946 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
4947 ic->ic_des_esslen);
4948 }
4949
4950 req->n_channels = iwm_lmac_scan_fill_channels(sc,
4951 (struct iwm_scan_channel_cfg_lmac *)req->data,
4952 ic->ic_des_esslen != 0);
4953
4954 err = iwm_fill_probe_req(sc,
4955 (struct iwm_scan_probe_req *)(req->data +
4956 (sizeof(struct iwm_scan_channel_cfg_lmac) *
4957 sc->sc_capa_n_scan_channels)));
4958 if (err) {
4959 kmem_free(req, req_len);
4960 return err;
4961 }
4962
4963 /* Specify the scan plan: We'll do one iteration. */
4964 req->schedule[0].iterations = 1;
4965 req->schedule[0].full_scan_mul = 1;
4966
4967 /* Disable EBS. */
4968 req->channel_opt[0].non_ebs_ratio = 1;
4969 req->channel_opt[1].non_ebs_ratio = 1;
4970
4971 err = iwm_send_cmd(sc, &hcmd);
4972 kmem_free(req, req_len);
4973 return err;
4974 }
4975
4976 static int
4977 iwm_config_umac_scan(struct iwm_softc *sc)
4978 {
4979 struct ieee80211com *ic = &sc->sc_ic;
4980 struct iwm_scan_config *scan_config;
4981 int err, nchan;
4982 size_t cmd_size;
4983 struct ieee80211_channel *c;
4984 struct iwm_host_cmd hcmd = {
4985 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
4986 .flags = 0,
4987 };
4988 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
4989 IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
4990 IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
4991 IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
4992 IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
4993 IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
4994 IWM_SCAN_CONFIG_RATE_54M);
4995
4996 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4997
4998 scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
4999 if (scan_config == NULL)
5000 return ENOMEM;
5001
5002 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5003 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5004 scan_config->legacy_rates = htole32(rates |
5005 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5006
5007 /* These timings correspond to iwlwifi's UNASSOC scan. */
5008 scan_config->dwell_active = 10;
5009 scan_config->dwell_passive = 110;
5010 scan_config->dwell_fragmented = 44;
5011 scan_config->dwell_extended = 90;
5012 scan_config->out_of_channel_time = htole32(0);
5013 scan_config->suspend_time = htole32(0);
5014
5015 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5016
5017 scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5018 scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5019 IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5020 IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5021
5022 for (c = &ic->ic_channels[1], nchan = 0;
5023 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5024 nchan < sc->sc_capa_n_scan_channels; c++) {
5025 if (c->ic_flags == 0)
5026 continue;
5027 scan_config->channel_array[nchan++] =
5028 ieee80211_mhz2ieee(c->ic_freq, 0);
5029 }
5030
5031 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5032 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5033 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5034 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5035 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5036 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5037 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5038 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5039 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5040 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5041 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5042
5043 hcmd.data[0] = scan_config;
5044 hcmd.len[0] = cmd_size;
5045
5046 err = iwm_send_cmd(sc, &hcmd);
5047 kmem_free(scan_config, cmd_size);
5048 return err;
5049 }
5050
5051 static int
5052 iwm_umac_scan(struct iwm_softc *sc)
5053 {
5054 struct ieee80211com *ic = &sc->sc_ic;
5055 struct iwm_host_cmd hcmd = {
5056 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5057 .len = { 0, },
5058 .data = { NULL, },
5059 .flags = 0,
5060 };
5061 struct iwm_scan_req_umac *req;
5062 struct iwm_scan_req_umac_tail *tail;
5063 size_t req_len;
5064 int err;
5065
5066 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5067
5068 req_len = sizeof(struct iwm_scan_req_umac) +
5069 (sizeof(struct iwm_scan_channel_cfg_umac) *
5070 sc->sc_capa_n_scan_channels) +
5071 sizeof(struct iwm_scan_req_umac_tail);
5072 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5073 return ENOMEM;
5074 req = kmem_zalloc(req_len, KM_SLEEP);
5075 if (req == NULL)
5076 return ENOMEM;
5077
5078 hcmd.len[0] = (uint16_t)req_len;
5079 hcmd.data[0] = (void *)req;
5080
5081 /* These timings correspond to iwlwifi's UNASSOC scan. */
5082 req->active_dwell = 10;
5083 req->passive_dwell = 110;
5084 req->fragmented_dwell = 44;
5085 req->extended_dwell = 90;
5086 req->max_out_time = 0;
5087 req->suspend_time = 0;
5088
5089 req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5090 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5091
5092 req->n_channels = iwm_umac_scan_fill_channels(sc,
5093 (struct iwm_scan_channel_cfg_umac *)req->data,
5094 ic->ic_des_esslen != 0);
5095
5096 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5097 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5098 IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5099
5100 tail = (struct iwm_scan_req_umac_tail *)(req->data +
5101 sizeof(struct iwm_scan_channel_cfg_umac) *
5102 sc->sc_capa_n_scan_channels);
5103
5104 /* Check if we're doing an active directed scan. */
5105 if (ic->ic_des_esslen != 0) {
5106 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5107 tail->direct_scan[0].len = ic->ic_des_esslen;
5108 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5109 ic->ic_des_esslen);
5110 req->general_flags |=
5111 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5112 } else
5113 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5114
5115 if (isset(sc->sc_enabled_capa,
5116 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5117 req->general_flags |=
5118 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5119
5120 err = iwm_fill_probe_req(sc, &tail->preq);
5121 if (err) {
5122 kmem_free(req, req_len);
5123 return err;
5124 }
5125
5126 /* Specify the scan plan: We'll do one iteration. */
5127 tail->schedule[0].interval = 0;
5128 tail->schedule[0].iter_count = 1;
5129
5130 err = iwm_send_cmd(sc, &hcmd);
5131 kmem_free(req, req_len);
5132 return err;
5133 }
5134
5135 static uint8_t
5136 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5137 {
5138 int i;
5139 uint8_t rval;
5140
5141 for (i = 0; i < rs->rs_nrates; i++) {
5142 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5143 if (rval == iwm_rates[ridx].rate)
5144 return rs->rs_rates[i];
5145 }
5146 return 0;
5147 }
5148
5149 static void
5150 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5151 int *ofdm_rates)
5152 {
5153 struct ieee80211_node *ni = &in->in_ni;
5154 struct ieee80211_rateset *rs = &ni->ni_rates;
5155 int lowest_present_ofdm = 100;
5156 int lowest_present_cck = 100;
5157 uint8_t cck = 0;
5158 uint8_t ofdm = 0;
5159 int i;
5160
5161 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5162 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5163 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5164 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5165 continue;
5166 cck |= (1 << i);
5167 if (lowest_present_cck > i)
5168 lowest_present_cck = i;
5169 }
5170 }
5171 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5172 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5173 continue;
5174 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5175 if (lowest_present_ofdm > i)
5176 lowest_present_ofdm = i;
5177 }
5178
5179 /*
5180 * Now we've got the basic rates as bitmaps in the ofdm and cck
5181 * variables. This isn't sufficient though, as there might not
5182 * be all the right rates in the bitmap. E.g. if the only basic
5183 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5184 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5185 *
5186 * [...] a STA responding to a received frame shall transmit
5187 * its Control Response frame [...] at the highest rate in the
5188 * BSSBasicRateSet parameter that is less than or equal to the
5189 * rate of the immediately previous frame in the frame exchange
5190 * sequence ([...]) and that is of the same modulation class
5191 * ([...]) as the received frame. If no rate contained in the
5192 * BSSBasicRateSet parameter meets these conditions, then the
5193 * control frame sent in response to a received frame shall be
5194 * transmitted at the highest mandatory rate of the PHY that is
5195 * less than or equal to the rate of the received frame, and
5196 * that is of the same modulation class as the received frame.
5197 *
5198 * As a consequence, we need to add all mandatory rates that are
5199 * lower than all of the basic rates to these bitmaps.
5200 */
5201
5202 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5203 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5204 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5205 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5206 /* 6M already there or needed so always add */
5207 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5208
5209 /*
5210 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5211 * Note, however:
5212 * - if no CCK rates are basic, it must be ERP since there must
5213 * be some basic rates at all, so they're OFDM => ERP PHY
5214 * (or we're in 5 GHz, and the cck bitmap will never be used)
5215 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5216 * - if 5.5M is basic, 1M and 2M are mandatory
5217 * - if 2M is basic, 1M is mandatory
5218 * - if 1M is basic, that's the only valid ACK rate.
5219 * As a consequence, it's not as complicated as it sounds, just add
5220 * any lower rates to the ACK rate bitmap.
5221 */
5222 if (IWM_RATE_11M_INDEX < lowest_present_cck)
5223 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5224 if (IWM_RATE_5M_INDEX < lowest_present_cck)
5225 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5226 if (IWM_RATE_2M_INDEX < lowest_present_cck)
5227 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5228 /* 1M already there or needed so always add */
5229 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5230
5231 *cck_rates = cck;
5232 *ofdm_rates = ofdm;
5233 }
5234
5235 static void
5236 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5237 struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5238 {
5239 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5240 struct ieee80211com *ic = &sc->sc_ic;
5241 struct ieee80211_node *ni = ic->ic_bss;
5242 int cck_ack_rates, ofdm_ack_rates;
5243 int i;
5244
5245 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5246 in->in_color));
5247 cmd->action = htole32(action);
5248
5249 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5250 cmd->tsf_id = htole32(IWM_TSF_ID_A);
5251
5252 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5253 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5254
5255 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5256 cmd->cck_rates = htole32(cck_ack_rates);
5257 cmd->ofdm_rates = htole32(ofdm_ack_rates);
5258
5259 cmd->cck_short_preamble
5260 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5261 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5262 cmd->short_slot
5263 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5264 ? IWM_MAC_FLG_SHORT_SLOT : 0);
5265
5266 for (i = 0; i < WME_NUM_AC; i++) {
5267 struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5268 int txf = iwm_ac_to_tx_fifo[i];
5269
5270 cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5271 cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5272 cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5273 cmd->ac[txf].fifos_mask = (1 << txf);
5274 cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5275 }
5276 if (ni->ni_flags & IEEE80211_NODE_QOS)
5277 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5278
5279 #ifndef IEEE80211_NO_HT
5280 if (ni->ni_flags & IEEE80211_NODE_HT) {
5281 enum ieee80211_htprot htprot =
5282 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5283 switch (htprot) {
5284 case IEEE80211_HTPROT_NONE:
5285 break;
5286 case IEEE80211_HTPROT_NONMEMBER:
5287 case IEEE80211_HTPROT_NONHT_MIXED:
5288 cmd->protection_flags |=
5289 htole32(IWM_MAC_PROT_FLG_HT_PROT);
5290 case IEEE80211_HTPROT_20MHZ:
5291 cmd->protection_flags |=
5292 htole32(IWM_MAC_PROT_FLG_HT_PROT |
5293 IWM_MAC_PROT_FLG_FAT_PROT);
5294 break;
5295 default:
5296 break;
5297 }
5298
5299 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5300 }
5301 #endif
5302
5303 if (ic->ic_flags & IEEE80211_F_USEPROT)
5304 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5305
5306 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5307 #undef IWM_EXP2
5308 }
5309
5310 static void
5311 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5312 struct iwm_mac_data_sta *sta, int assoc)
5313 {
5314 struct ieee80211_node *ni = &in->in_ni;
5315 uint32_t dtim_off;
5316 uint64_t tsf;
5317
5318 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5319 tsf = le64toh(ni->ni_tstamp.tsf);
5320
5321 sta->is_assoc = htole32(assoc);
5322 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5323 sta->dtim_tsf = htole64(tsf + dtim_off);
5324 sta->bi = htole32(ni->ni_intval);
5325 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5326 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5327 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5328 sta->listen_interval = htole32(10);
5329 sta->assoc_id = htole32(ni->ni_associd);
5330 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5331 }
5332
5333 static int
5334 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5335 int assoc)
5336 {
5337 struct ieee80211_node *ni = &in->in_ni;
5338 struct iwm_mac_ctx_cmd cmd;
5339
5340 memset(&cmd, 0, sizeof(cmd));
5341
5342 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5343
5344 /* Allow beacons to pass through as long as we are not associated or we
5345 * do not have dtim period information */
5346 if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5347 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5348 else
5349 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5350
5351 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5352 }
5353
5354 #define IWM_MISSED_BEACONS_THRESHOLD 8
5355
5356 static void
5357 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5358 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5359 {
5360 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5361
5362 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5363 le32toh(mb->mac_id),
5364 le32toh(mb->consec_missed_beacons),
5365 le32toh(mb->consec_missed_beacons_since_last_rx),
5366 le32toh(mb->num_recvd_beacons),
5367 le32toh(mb->num_expected_beacons)));
5368
5369 /*
5370 * TODO: the threshold should be adjusted based on latency conditions,
5371 * and/or in case of a CS flow on one of the other AP vifs.
5372 */
5373 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5374 IWM_MISSED_BEACONS_THRESHOLD)
5375 ieee80211_beacon_miss(&sc->sc_ic);
5376 }
5377
5378 static int
5379 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5380 {
5381 struct iwm_time_quota_cmd cmd;
5382 int i, idx, num_active_macs, quota, quota_rem;
5383 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5384 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5385 uint16_t id;
5386
5387 memset(&cmd, 0, sizeof(cmd));
5388
5389 /* currently, PHY ID == binding ID */
5390 if (in) {
5391 id = in->in_phyctxt->id;
5392 KASSERT(id < IWM_MAX_BINDINGS);
5393 colors[id] = in->in_phyctxt->color;
5394
5395 if (1)
5396 n_ifs[id] = 1;
5397 }
5398
5399 /*
5400 * The FW's scheduling session consists of
5401 * IWM_MAX_QUOTA fragments. Divide these fragments
5402 * equally between all the bindings that require quota
5403 */
5404 num_active_macs = 0;
5405 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5406 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5407 num_active_macs += n_ifs[i];
5408 }
5409
5410 quota = 0;
5411 quota_rem = 0;
5412 if (num_active_macs) {
5413 quota = IWM_MAX_QUOTA / num_active_macs;
5414 quota_rem = IWM_MAX_QUOTA % num_active_macs;
5415 }
5416
5417 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5418 if (colors[i] < 0)
5419 continue;
5420
5421 cmd.quotas[idx].id_and_color =
5422 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5423
5424 if (n_ifs[i] <= 0) {
5425 cmd.quotas[idx].quota = htole32(0);
5426 cmd.quotas[idx].max_duration = htole32(0);
5427 } else {
5428 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5429 cmd.quotas[idx].max_duration = htole32(0);
5430 }
5431 idx++;
5432 }
5433
5434 /* Give the remainder of the session to the first binding */
5435 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5436
5437 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5438 }
5439
5440 static int
5441 iwm_auth(struct iwm_softc *sc)
5442 {
5443 struct ieee80211com *ic = &sc->sc_ic;
5444 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5445 uint32_t duration;
5446 int err;
5447
5448 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5449 if (err)
5450 return err;
5451
5452 err = iwm_allow_mcast(sc);
5453 if (err)
5454 return err;
5455
5456 sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5457 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5458 IWM_FW_CTXT_ACTION_MODIFY, 0);
5459 if (err)
5460 return err;
5461 in->in_phyctxt = &sc->sc_phyctxt[0];
5462
5463 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5464 if (err) {
5465 aprint_error_dev(sc->sc_dev,
5466 "could not add MAC context (error %d)\n", err);
5467 return err;
5468 }
5469
5470 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5471 if (err)
5472 return err;
5473
5474 err = iwm_add_sta_cmd(sc, in, 0);
5475 if (err)
5476 return err;
5477
5478 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5479 if (err) {
5480 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5481 return err;
5482 }
5483
5484 /*
5485 * Prevent the FW from wandering off channel during association
5486 * by "protecting" the session with a time event.
5487 */
5488 if (in->in_ni.ni_intval)
5489 duration = in->in_ni.ni_intval * 2;
5490 else
5491 duration = IEEE80211_DUR_TU;
5492 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5493 DELAY(100);
5494
5495 return 0;
5496 }
5497
5498 static int
5499 iwm_assoc(struct iwm_softc *sc)
5500 {
5501 struct ieee80211com *ic = &sc->sc_ic;
5502 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5503 int err;
5504
5505 err = iwm_add_sta_cmd(sc, in, 1);
5506 if (err)
5507 return err;
5508
5509 return 0;
5510 }
5511
5512 static struct ieee80211_node *
5513 iwm_node_alloc(struct ieee80211_node_table *nt)
5514 {
5515 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5516 }
5517
5518 static void
5519 iwm_calib_timeout(void *arg)
5520 {
5521 struct iwm_softc *sc = arg;
5522 struct ieee80211com *ic = &sc->sc_ic;
5523 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5524 #ifndef IEEE80211_NO_HT
5525 struct ieee80211_node *ni = &in->in_ni;
5526 int otxrate;
5527 #endif
5528 int s;
5529
5530 s = splnet();
5531 if ((ic->ic_fixed_rate == -1
5532 #ifndef IEEE80211_NO_HT
5533 || ic->ic_fixed_mcs == -1
5534 #endif
5535 ) &&
5536 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5537 #ifndef IEEE80211_NO_HT
5538 if (ni->ni_flags & IEEE80211_NODE_HT)
5539 otxrate = ni->ni_txmcs;
5540 else
5541 otxrate = ni->ni_txrate;
5542 #endif
5543 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5544
5545 #ifndef IEEE80211_NO_HT
5546 /*
5547 * If AMRR has chosen a new TX rate we must update
5548 * the firwmare's LQ rate table from process context.
5549 */
5550 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5551 otxrate != ni->ni_txmcs)
5552 softint_schedule(sc->setrates_task);
5553 else if (otxrate != ni->ni_txrate)
5554 softint_schedule(sc->setrates_task);
5555 #endif
5556 }
5557 splx(s);
5558
5559 callout_schedule(&sc->sc_calib_to, mstohz(500));
5560 }
5561
5562 #ifndef IEEE80211_NO_HT
5563 static void
5564 iwm_setrates_task(void *arg)
5565 {
5566 struct iwm_softc *sc = arg;
5567 struct ieee80211com *ic = &sc->sc_ic;
5568 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5569
5570 /* Update rates table based on new TX rate determined by AMRR. */
5571 iwm_setrates(in);
5572 }
5573
5574 static int
5575 iwm_setrates(struct iwm_node *in)
5576 {
5577 struct ieee80211_node *ni = &in->in_ni;
5578 struct ieee80211com *ic = ni->ni_ic;
5579 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5580 struct iwm_lq_cmd *lq = &in->in_lq;
5581 struct ieee80211_rateset *rs = &ni->ni_rates;
5582 int i, j, ridx, ridx_min, tab = 0;
5583 #ifndef IEEE80211_NO_HT
5584 int sgi_ok;
5585 #endif
5586 struct iwm_host_cmd cmd = {
5587 .id = IWM_LQ_CMD,
5588 .len = { sizeof(in->in_lq), },
5589 };
5590
5591 memset(lq, 0, sizeof(*lq));
5592 lq->sta_id = IWM_STATION_ID;
5593
5594 if (ic->ic_flags & IEEE80211_F_USEPROT)
5595 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5596
5597 #ifndef IEEE80211_NO_HT
5598 sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5599 (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5600 #endif
5601
5602
5603 /*
5604 * Fill the LQ rate selection table with legacy and/or HT rates
5605 * in descending order, i.e. with the node's current TX rate first.
5606 * In cases where throughput of an HT rate corresponds to a legacy
5607 * rate it makes no sense to add both. We rely on the fact that
5608 * iwm_rates is laid out such that equivalent HT/legacy rates share
5609 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5610 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5611 */
5612 j = 0;
5613 ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5614 IWM_RIDX_OFDM : IWM_RIDX_CCK;
5615 for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5616 if (j >= __arraycount(lq->rs_table))
5617 break;
5618 tab = 0;
5619 #ifndef IEEE80211_NO_HT
5620 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5621 iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5622 for (i = ni->ni_txmcs; i >= 0; i--) {
5623 if (isclr(ni->ni_rxmcs, i))
5624 continue;
5625 if (ridx == iwm_mcs2ridx[i]) {
5626 tab = iwm_rates[ridx].ht_plcp;
5627 tab |= IWM_RATE_MCS_HT_MSK;
5628 if (sgi_ok)
5629 tab |= IWM_RATE_MCS_SGI_MSK;
5630 break;
5631 }
5632 }
5633 }
5634 #endif
5635 if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5636 for (i = ni->ni_txrate; i >= 0; i--) {
5637 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5638 IEEE80211_RATE_VAL)) {
5639 tab = iwm_rates[ridx].plcp;
5640 break;
5641 }
5642 }
5643 }
5644
5645 if (tab == 0)
5646 continue;
5647
5648 tab |= 1 << IWM_RATE_MCS_ANT_POS;
5649 if (IWM_RIDX_IS_CCK(ridx))
5650 tab |= IWM_RATE_MCS_CCK_MSK;
5651 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5652 lq->rs_table[j++] = htole32(tab);
5653 }
5654
5655 /* Fill the rest with the lowest possible rate */
5656 i = j > 0 ? j - 1 : 0;
5657 while (j < __arraycount(lq->rs_table))
5658 lq->rs_table[j++] = lq->rs_table[i];
5659
5660 lq->single_stream_ant_msk = IWM_ANT_A;
5661 lq->dual_stream_ant_msk = IWM_ANT_AB;
5662
5663 lq->agg_time_limit = htole16(4000); /* 4ms */
5664 lq->agg_disable_start_th = 3;
5665 #ifdef notyet
5666 lq->agg_frame_cnt_limit = 0x3f;
5667 #else
5668 lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5669 #endif
5670
5671 cmd.data[0] = &in->in_lq;
5672 return iwm_send_cmd(sc, &cmd);
5673 }
5674 #endif
5675
5676 static int
5677 iwm_media_change(struct ifnet *ifp)
5678 {
5679 struct iwm_softc *sc = ifp->if_softc;
5680 struct ieee80211com *ic = &sc->sc_ic;
5681 uint8_t rate, ridx;
5682 int err;
5683
5684 err = ieee80211_media_change(ifp);
5685 if (err != ENETRESET)
5686 return err;
5687
5688 #ifndef IEEE80211_NO_HT
5689 if (ic->ic_fixed_mcs != -1)
5690 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5691 else
5692 #endif
5693 if (ic->ic_fixed_rate != -1) {
5694 rate = ic->ic_sup_rates[ic->ic_curmode].
5695 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5696 /* Map 802.11 rate to HW rate index. */
5697 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5698 if (iwm_rates[ridx].rate == rate)
5699 break;
5700 sc->sc_fixed_ridx = ridx;
5701 }
5702
5703 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5704 (IFF_UP | IFF_RUNNING)) {
5705 iwm_stop(ifp, 0);
5706 err = iwm_init(ifp);
5707 }
5708 return err;
5709 }
5710
5711 static void
5712 iwm_newstate_cb(struct work *wk, void *v)
5713 {
5714 struct iwm_softc *sc = v;
5715 struct ieee80211com *ic = &sc->sc_ic;
5716 struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
5717 enum ieee80211_state nstate = iwmns->ns_nstate;
5718 enum ieee80211_state ostate = ic->ic_state;
5719 int generation = iwmns->ns_generation;
5720 struct iwm_node *in;
5721 int arg = iwmns->ns_arg;
5722 int err;
5723
5724 kmem_free(iwmns, sizeof(*iwmns));
5725
5726 DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
5727 if (sc->sc_generation != generation) {
5728 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5729 if (nstate == IEEE80211_S_INIT) {
5730 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5731 sc->sc_newstate(ic, nstate, arg);
5732 }
5733 return;
5734 }
5735
5736 DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
5737 ieee80211_state_name[nstate]));
5738
5739 if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5740 iwm_led_blink_stop(sc);
5741
5742 if (ostate == IEEE80211_S_RUN && nstate != ostate)
5743 iwm_disable_beacon_filter(sc);
5744
5745 /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5746 /* XXX Is there a way to switch states without a full reset? */
5747 if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5748 iwm_stop_device(sc);
5749 iwm_init_hw(sc);
5750
5751 /*
5752 * Upon receiving a deauth frame from AP the net80211 stack
5753 * puts the driver into AUTH state. This will fail with this
5754 * driver so bring the FSM from RUN to SCAN in this case.
5755 */
5756 if (nstate == IEEE80211_S_SCAN ||
5757 nstate == IEEE80211_S_AUTH ||
5758 nstate == IEEE80211_S_ASSOC) {
5759 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5760 /* Always pass arg as -1 since we can't Tx right now. */
5761 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5762 DPRINTF(("Going INIT->SCAN\n"));
5763 nstate = IEEE80211_S_SCAN;
5764 }
5765 }
5766
5767 switch (nstate) {
5768 case IEEE80211_S_INIT:
5769 break;
5770
5771 case IEEE80211_S_SCAN:
5772 if (ostate == nstate &&
5773 ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
5774 return;
5775 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5776 err = iwm_umac_scan(sc);
5777 else
5778 err = iwm_lmac_scan(sc);
5779 if (err) {
5780 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5781 return;
5782 }
5783 SET(sc->sc_flags, IWM_FLAG_SCANNING);
5784 ic->ic_state = nstate;
5785 iwm_led_blink_start(sc);
5786 return;
5787
5788 case IEEE80211_S_AUTH:
5789 err = iwm_auth(sc);
5790 if (err) {
5791 DPRINTF(("%s: could not move to auth state: %d\n",
5792 DEVNAME(sc), err));
5793 return;
5794 }
5795 break;
5796
5797 case IEEE80211_S_ASSOC:
5798 err = iwm_assoc(sc);
5799 if (err) {
5800 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5801 err));
5802 return;
5803 }
5804 break;
5805
5806 case IEEE80211_S_RUN:
5807 in = (struct iwm_node *)ic->ic_bss;
5808
5809 /* We have now been assigned an associd by the AP. */
5810 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5811 if (err) {
5812 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5813 return;
5814 }
5815
5816 err = iwm_power_update_device(sc);
5817 if (err) {
5818 aprint_error_dev(sc->sc_dev,
5819 "could send power command (error %d)\n", err);
5820 return;
5821 }
5822 #ifdef notyet
5823 /*
5824 * Disabled for now. Default beacon filter settings
5825 * prevent net80211 from getting ERP and HT protection
5826 * updates from beacons.
5827 */
5828 err = iwm_enable_beacon_filter(sc, in);
5829 if (err) {
5830 aprint_error_dev(sc->sc_dev,
5831 "could not enable beacon filter\n");
5832 return;
5833 }
5834 #endif
5835 err = iwm_power_mac_update_mode(sc, in);
5836 if (err) {
5837 aprint_error_dev(sc->sc_dev,
5838 "could not update MAC power (error %d)\n", err);
5839 return;
5840 }
5841
5842 err = iwm_update_quotas(sc, in);
5843 if (err) {
5844 aprint_error_dev(sc->sc_dev,
5845 "could not update quotas (error %d)\n", err);
5846 return;
5847 }
5848
5849 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5850
5851 /* Start at lowest available bit-rate, AMRR will raise. */
5852 in->in_ni.ni_txrate = 0;
5853 #ifndef IEEE80211_NO_HT
5854 in->in_ni.ni_txmcs = 0;
5855 iwm_setrates(in);
5856 #endif
5857
5858 callout_schedule(&sc->sc_calib_to, mstohz(500));
5859 iwm_led_enable(sc);
5860 break;
5861
5862 default:
5863 break;
5864 }
5865
5866 sc->sc_newstate(ic, nstate, arg);
5867 }
5868
5869 static int
5870 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5871 {
5872 struct iwm_newstate_state *iwmns;
5873 struct ifnet *ifp = IC2IFP(ic);
5874 struct iwm_softc *sc = ifp->if_softc;
5875
5876 callout_stop(&sc->sc_calib_to);
5877
5878 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5879 if (!iwmns) {
5880 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5881 return ENOMEM;
5882 }
5883
5884 iwmns->ns_nstate = nstate;
5885 iwmns->ns_arg = arg;
5886 iwmns->ns_generation = sc->sc_generation;
5887
5888 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5889
5890 return 0;
5891 }
5892
5893 static void
5894 iwm_endscan(struct iwm_softc *sc)
5895 {
5896 struct ieee80211com *ic = &sc->sc_ic;
5897
5898 DPRINTF(("scan ended\n"));
5899
5900 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
5901 ieee80211_end_scan(ic);
5902 }
5903
5904 /*
5905 * Aging and idle timeouts for the different possible scenarios
5906 * in default configuration
5907 */
5908 static const uint32_t
5909 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5910 {
5911 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
5912 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
5913 },
5914 {
5915 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
5916 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
5917 },
5918 {
5919 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
5920 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
5921 },
5922 {
5923 htole32(IWM_SF_BA_AGING_TIMER_DEF),
5924 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
5925 },
5926 {
5927 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
5928 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
5929 },
5930 };
5931
5932 /*
5933 * Aging and idle timeouts for the different possible scenarios
5934 * in single BSS MAC configuration.
5935 */
5936 static const uint32_t
5937 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5938 {
5939 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
5940 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
5941 },
5942 {
5943 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
5944 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
5945 },
5946 {
5947 htole32(IWM_SF_MCAST_AGING_TIMER),
5948 htole32(IWM_SF_MCAST_IDLE_TIMER)
5949 },
5950 {
5951 htole32(IWM_SF_BA_AGING_TIMER),
5952 htole32(IWM_SF_BA_IDLE_TIMER)
5953 },
5954 {
5955 htole32(IWM_SF_TX_RE_AGING_TIMER),
5956 htole32(IWM_SF_TX_RE_IDLE_TIMER)
5957 },
5958 };
5959
5960 static void
5961 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
5962 struct ieee80211_node *ni)
5963 {
5964 int i, j, watermark;
5965
5966 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
5967
5968 /*
5969 * If we are in association flow - check antenna configuration
5970 * capabilities of the AP station, and choose the watermark accordingly.
5971 */
5972 if (ni) {
5973 #ifndef IEEE80211_NO_HT
5974 if (ni->ni_flags & IEEE80211_NODE_HT) {
5975 #ifdef notyet
5976 if (ni->ni_rxmcs[2] != 0)
5977 watermark = IWM_SF_W_MARK_MIMO3;
5978 else if (ni->ni_rxmcs[1] != 0)
5979 watermark = IWM_SF_W_MARK_MIMO2;
5980 else
5981 #endif
5982 watermark = IWM_SF_W_MARK_SISO;
5983 } else
5984 #endif
5985 watermark = IWM_SF_W_MARK_LEGACY;
5986 /* default watermark value for unassociated mode. */
5987 } else {
5988 watermark = IWM_SF_W_MARK_MIMO2;
5989 }
5990 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
5991
5992 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
5993 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
5994 sf_cmd->long_delay_timeouts[i][j] =
5995 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
5996 }
5997 }
5998
5999 if (ni) {
6000 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6001 sizeof(iwm_sf_full_timeout));
6002 } else {
6003 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6004 sizeof(iwm_sf_full_timeout_def));
6005 }
6006 }
6007
6008 static int
6009 iwm_sf_config(struct iwm_softc *sc, int new_state)
6010 {
6011 struct ieee80211com *ic = &sc->sc_ic;
6012 struct iwm_sf_cfg_cmd sf_cmd = {
6013 .state = htole32(IWM_SF_FULL_ON),
6014 };
6015
6016 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6017 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6018
6019 switch (new_state) {
6020 case IWM_SF_UNINIT:
6021 case IWM_SF_INIT_OFF:
6022 iwm_fill_sf_command(sc, &sf_cmd, NULL);
6023 break;
6024 case IWM_SF_FULL_ON:
6025 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6026 break;
6027 default:
6028 return EINVAL;
6029 }
6030
6031 return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6032 sizeof(sf_cmd), &sf_cmd);
6033 }
6034
6035 static int
6036 iwm_send_bt_init_conf(struct iwm_softc *sc)
6037 {
6038 struct iwm_bt_coex_cmd bt_cmd;
6039
6040 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6041 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6042
6043 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6044 }
6045
6046 static int
6047 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6048 {
6049 struct iwm_mcc_update_cmd mcc_cmd;
6050 struct iwm_host_cmd hcmd = {
6051 .id = IWM_MCC_UPDATE_CMD,
6052 .flags = IWM_CMD_WANT_SKB,
6053 .data = { &mcc_cmd },
6054 };
6055 int resp_v2 = isset(sc->sc_enabled_capa,
6056 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6057 int err;
6058
6059 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6060 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6061 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6062 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6063 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6064 else
6065 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6066
6067 if (resp_v2)
6068 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6069 else
6070 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6071
6072 err = iwm_send_cmd(sc, &hcmd);
6073 if (err)
6074 return err;
6075
6076 iwm_free_resp(sc, &hcmd);
6077
6078 return 0;
6079 }
6080
6081 static void
6082 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6083 {
6084 struct iwm_host_cmd cmd = {
6085 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6086 .len = { sizeof(uint32_t), },
6087 .data = { &backoff, },
6088 };
6089
6090 iwm_send_cmd(sc, &cmd);
6091 }
6092
6093 static int
6094 iwm_init_hw(struct iwm_softc *sc)
6095 {
6096 struct ieee80211com *ic = &sc->sc_ic;
6097 int err, i, ac;
6098
6099 err = iwm_preinit(sc);
6100 if (err)
6101 return err;
6102
6103 err = iwm_start_hw(sc);
6104 if (err) {
6105 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6106 return err;
6107 }
6108
6109 err = iwm_run_init_mvm_ucode(sc, 0);
6110 if (err)
6111 return err;
6112
6113 /* Should stop and start HW since INIT image just loaded. */
6114 iwm_stop_device(sc);
6115 err = iwm_start_hw(sc);
6116 if (err) {
6117 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6118 return err;
6119 }
6120
6121 /* Restart, this time with the regular firmware */
6122 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6123 if (err) {
6124 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
6125 goto err;
6126 }
6127
6128 err = iwm_send_bt_init_conf(sc);
6129 if (err) {
6130 aprint_error_dev(sc->sc_dev,
6131 "could not init bt coex (error %d)\n", err);
6132 goto err;
6133 }
6134
6135 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6136 if (err) {
6137 aprint_error_dev(sc->sc_dev,
6138 "could not init tx ant config (error %d)\n", err);
6139 goto err;
6140 }
6141
6142 /* Send phy db control command and then phy db calibration*/
6143 err = iwm_send_phy_db_data(sc);
6144 if (err) {
6145 aprint_error_dev(sc->sc_dev,
6146 "could not init phy db (error %d)\n", err);
6147 goto err;
6148 }
6149
6150 err = iwm_send_phy_cfg_cmd(sc);
6151 if (err) {
6152 aprint_error_dev(sc->sc_dev,
6153 "could not send phy config (error %d)\n", err);
6154 goto err;
6155 }
6156
6157 /* Add auxiliary station for scanning */
6158 err = iwm_add_aux_sta(sc);
6159 if (err) {
6160 aprint_error_dev(sc->sc_dev,
6161 "could not add aux station (error %d)\n", err);
6162 goto err;
6163 }
6164
6165 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6166 /*
6167 * The channel used here isn't relevant as it's
6168 * going to be overwritten in the other flows.
6169 * For now use the first channel we have.
6170 */
6171 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6172 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6173 IWM_FW_CTXT_ACTION_ADD, 0);
6174 if (err) {
6175 aprint_error_dev(sc->sc_dev,
6176 "could not add phy context %d (error %d)\n",
6177 i, err);
6178 goto err;
6179 }
6180 }
6181
6182 /* Initialize tx backoffs to the minimum. */
6183 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6184 iwm_tt_tx_backoff(sc, 0);
6185
6186 err = iwm_power_update_device(sc);
6187 if (err) {
6188 aprint_error_dev(sc->sc_dev,
6189 "could send power command (error %d)\n", err);
6190 goto err;
6191 }
6192
6193 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
6194 err = iwm_send_update_mcc_cmd(sc, "ZZ");
6195 if (err) {
6196 aprint_error_dev(sc->sc_dev,
6197 "could not init LAR (error %d)\n", err);
6198 goto err;
6199 }
6200 }
6201
6202 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6203 err = iwm_config_umac_scan(sc);
6204 if (err) {
6205 aprint_error_dev(sc->sc_dev,
6206 "could not configure scan (error %d)\n", err);
6207 goto err;
6208 }
6209 }
6210
6211 for (ac = 0; ac < WME_NUM_AC; ac++) {
6212 err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6213 iwm_ac_to_tx_fifo[ac]);
6214 if (err) {
6215 aprint_error_dev(sc->sc_dev,
6216 "could not enable Tx queue %d (error %d)\n",
6217 i, err);
6218 goto err;
6219 }
6220 }
6221
6222 err = iwm_disable_beacon_filter(sc);
6223 if (err) {
6224 aprint_error_dev(sc->sc_dev,
6225 "could not disable beacon filter (error %d)\n", err);
6226 goto err;
6227 }
6228
6229 return 0;
6230
6231 err:
6232 iwm_stop_device(sc);
6233 return err;
6234 }
6235
6236 /* Allow multicast from our BSSID. */
6237 static int
6238 iwm_allow_mcast(struct iwm_softc *sc)
6239 {
6240 struct ieee80211com *ic = &sc->sc_ic;
6241 struct ieee80211_node *ni = ic->ic_bss;
6242 struct iwm_mcast_filter_cmd *cmd;
6243 size_t size;
6244 int err;
6245
6246 size = roundup(sizeof(*cmd), 4);
6247 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6248 if (cmd == NULL)
6249 return ENOMEM;
6250 cmd->filter_own = 1;
6251 cmd->port_id = 0;
6252 cmd->count = 0;
6253 cmd->pass_all = 1;
6254 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6255
6256 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6257 kmem_intr_free(cmd, size);
6258 return err;
6259 }
6260
6261 static int
6262 iwm_init(struct ifnet *ifp)
6263 {
6264 struct iwm_softc *sc = ifp->if_softc;
6265 int err;
6266
6267 if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6268 return 0;
6269
6270 sc->sc_generation++;
6271 sc->sc_flags &= ~IWM_FLAG_STOPPED;
6272
6273 err = iwm_init_hw(sc);
6274 if (err) {
6275 iwm_stop(ifp, 1);
6276 return err;
6277 }
6278
6279 ifp->if_flags &= ~IFF_OACTIVE;
6280 ifp->if_flags |= IFF_RUNNING;
6281
6282 ieee80211_begin_scan(&sc->sc_ic, 0);
6283 SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6284
6285 return 0;
6286 }
6287
6288 static void
6289 iwm_start(struct ifnet *ifp)
6290 {
6291 struct iwm_softc *sc = ifp->if_softc;
6292 struct ieee80211com *ic = &sc->sc_ic;
6293 struct ieee80211_node *ni;
6294 struct ether_header *eh;
6295 struct mbuf *m;
6296 int ac;
6297
6298 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6299 return;
6300
6301 for (;;) {
6302 /* why isn't this done per-queue? */
6303 if (sc->qfullmsk != 0) {
6304 ifp->if_flags |= IFF_OACTIVE;
6305 break;
6306 }
6307
6308 /* need to send management frames even if we're not RUNning */
6309 IF_DEQUEUE(&ic->ic_mgtq, m);
6310 if (m) {
6311 ni = M_GETCTX(m, struct ieee80211_node *);
6312 M_CLEARCTX(m);
6313 ac = WME_AC_BE;
6314 goto sendit;
6315 }
6316 if (ic->ic_state != IEEE80211_S_RUN) {
6317 break;
6318 }
6319
6320 IFQ_DEQUEUE(&ifp->if_snd, m);
6321 if (m == NULL)
6322 break;
6323
6324 if (m->m_len < sizeof (*eh) &&
6325 (m = m_pullup(m, sizeof (*eh))) == NULL) {
6326 ifp->if_oerrors++;
6327 continue;
6328 }
6329
6330 eh = mtod(m, struct ether_header *);
6331 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6332 if (ni == NULL) {
6333 m_freem(m);
6334 ifp->if_oerrors++;
6335 continue;
6336 }
6337
6338 /* classify mbuf so we can find which tx ring to use */
6339 if (ieee80211_classify(ic, m, ni) != 0) {
6340 m_freem(m);
6341 ieee80211_free_node(ni);
6342 ifp->if_oerrors++;
6343 continue;
6344 }
6345
6346 /* No QoS encapsulation for EAPOL frames. */
6347 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6348 M_WME_GETAC(m) : WME_AC_BE;
6349
6350 bpf_mtap(ifp, m);
6351
6352 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6353 ieee80211_free_node(ni);
6354 ifp->if_oerrors++;
6355 continue;
6356 }
6357
6358 sendit:
6359 bpf_mtap3(ic->ic_rawbpf, m);
6360
6361 if (iwm_tx(sc, m, ni, ac) != 0) {
6362 ieee80211_free_node(ni);
6363 ifp->if_oerrors++;
6364 continue;
6365 }
6366
6367 if (ifp->if_flags & IFF_UP) {
6368 sc->sc_tx_timer = 15;
6369 ifp->if_timer = 1;
6370 }
6371 }
6372 }
6373
6374 static void
6375 iwm_stop(struct ifnet *ifp, int disable)
6376 {
6377 struct iwm_softc *sc = ifp->if_softc;
6378 struct ieee80211com *ic = &sc->sc_ic;
6379 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6380
6381 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6382 sc->sc_flags |= IWM_FLAG_STOPPED;
6383 sc->sc_generation++;
6384 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6385
6386 if (in)
6387 in->in_phyctxt = NULL;
6388
6389 if (ic->ic_state != IEEE80211_S_INIT)
6390 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6391
6392 callout_stop(&sc->sc_calib_to);
6393 iwm_led_blink_stop(sc);
6394 ifp->if_timer = sc->sc_tx_timer = 0;
6395 iwm_stop_device(sc);
6396 }
6397
6398 static void
6399 iwm_watchdog(struct ifnet *ifp)
6400 {
6401 struct iwm_softc *sc = ifp->if_softc;
6402
6403 ifp->if_timer = 0;
6404 if (sc->sc_tx_timer > 0) {
6405 if (--sc->sc_tx_timer == 0) {
6406 aprint_error_dev(sc->sc_dev, "device timeout\n");
6407 #ifdef IWM_DEBUG
6408 iwm_nic_error(sc);
6409 #endif
6410 ifp->if_flags &= ~IFF_UP;
6411 iwm_stop(ifp, 1);
6412 ifp->if_oerrors++;
6413 return;
6414 }
6415 ifp->if_timer = 1;
6416 }
6417
6418 ieee80211_watchdog(&sc->sc_ic);
6419 }
6420
6421 static int
6422 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6423 {
6424 struct iwm_softc *sc = ifp->if_softc;
6425 struct ieee80211com *ic = &sc->sc_ic;
6426 const struct sockaddr *sa;
6427 int s, err = 0;
6428
6429 s = splnet();
6430
6431 switch (cmd) {
6432 case SIOCSIFADDR:
6433 ifp->if_flags |= IFF_UP;
6434 /* FALLTHROUGH */
6435 case SIOCSIFFLAGS:
6436 err = ifioctl_common(ifp, cmd, data);
6437 if (err)
6438 break;
6439 if (ifp->if_flags & IFF_UP) {
6440 if (!(ifp->if_flags & IFF_RUNNING)) {
6441 err = iwm_init(ifp);
6442 if (err)
6443 ifp->if_flags &= ~IFF_UP;
6444 }
6445 } else {
6446 if (ifp->if_flags & IFF_RUNNING)
6447 iwm_stop(ifp, 1);
6448 }
6449 break;
6450
6451 case SIOCADDMULTI:
6452 case SIOCDELMULTI:
6453 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6454 err = ENXIO;
6455 break;
6456 }
6457 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6458 err = (cmd == SIOCADDMULTI) ?
6459 ether_addmulti(sa, &sc->sc_ec) :
6460 ether_delmulti(sa, &sc->sc_ec);
6461 if (err == ENETRESET)
6462 err = 0;
6463 break;
6464
6465 default:
6466 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6467 err = ether_ioctl(ifp, cmd, data);
6468 break;
6469 }
6470 err = ieee80211_ioctl(ic, cmd, data);
6471 break;
6472 }
6473
6474 if (err == ENETRESET) {
6475 err = 0;
6476 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6477 (IFF_UP | IFF_RUNNING)) {
6478 iwm_stop(ifp, 0);
6479 err = iwm_init(ifp);
6480 }
6481 }
6482
6483 splx(s);
6484 return err;
6485 }
6486
6487 /*
6488 * Note: This structure is read from the device with IO accesses,
6489 * and the reading already does the endian conversion. As it is
6490 * read with uint32_t-sized accesses, any members with a different size
6491 * need to be ordered correctly though!
6492 */
6493 struct iwm_error_event_table {
6494 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6495 uint32_t error_id; /* type of error */
6496 uint32_t trm_hw_status0; /* TRM HW status */
6497 uint32_t trm_hw_status1; /* TRM HW status */
6498 uint32_t blink2; /* branch link */
6499 uint32_t ilink1; /* interrupt link */
6500 uint32_t ilink2; /* interrupt link */
6501 uint32_t data1; /* error-specific data */
6502 uint32_t data2; /* error-specific data */
6503 uint32_t data3; /* error-specific data */
6504 uint32_t bcon_time; /* beacon timer */
6505 uint32_t tsf_low; /* network timestamp function timer */
6506 uint32_t tsf_hi; /* network timestamp function timer */
6507 uint32_t gp1; /* GP1 timer register */
6508 uint32_t gp2; /* GP2 timer register */
6509 uint32_t fw_rev_type; /* firmware revision type */
6510 uint32_t major; /* uCode version major */
6511 uint32_t minor; /* uCode version minor */
6512 uint32_t hw_ver; /* HW Silicon version */
6513 uint32_t brd_ver; /* HW board version */
6514 uint32_t log_pc; /* log program counter */
6515 uint32_t frame_ptr; /* frame pointer */
6516 uint32_t stack_ptr; /* stack pointer */
6517 uint32_t hcmd; /* last host command header */
6518 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
6519 * rxtx_flag */
6520 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
6521 * host_flag */
6522 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
6523 * enc_flag */
6524 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
6525 * time_flag */
6526 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
6527 * wico interrupt */
6528 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
6529 uint32_t wait_event; /* wait event() caller address */
6530 uint32_t l2p_control; /* L2pControlField */
6531 uint32_t l2p_duration; /* L2pDurationField */
6532 uint32_t l2p_mhvalid; /* L2pMhValidBits */
6533 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
6534 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
6535 * (LMPM_PMG_SEL) */
6536 uint32_t u_timestamp; /* indicate when the date and time of the
6537 * compilation */
6538 uint32_t flow_handler; /* FH read/write pointers, RX credit */
6539 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6540
6541 /*
6542 * UMAC error struct - relevant starting from family 8000 chip.
6543 * Note: This structure is read from the device with IO accesses,
6544 * and the reading already does the endian conversion. As it is
6545 * read with u32-sized accesses, any members with a different size
6546 * need to be ordered correctly though!
6547 */
6548 struct iwm_umac_error_event_table {
6549 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6550 uint32_t error_id; /* type of error */
6551 uint32_t blink1; /* branch link */
6552 uint32_t blink2; /* branch link */
6553 uint32_t ilink1; /* interrupt link */
6554 uint32_t ilink2; /* interrupt link */
6555 uint32_t data1; /* error-specific data */
6556 uint32_t data2; /* error-specific data */
6557 uint32_t data3; /* error-specific data */
6558 uint32_t umac_major;
6559 uint32_t umac_minor;
6560 uint32_t frame_pointer; /* core register 27 */
6561 uint32_t stack_pointer; /* core register 28 */
6562 uint32_t cmd_header; /* latest host cmd sent to UMAC */
6563 uint32_t nic_isr_pref; /* ISR status register */
6564 } __packed;
6565
6566 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6567 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6568
6569 #ifdef IWM_DEBUG
6570 static const struct {
6571 const char *name;
6572 uint8_t num;
6573 } advanced_lookup[] = {
6574 { "NMI_INTERRUPT_WDG", 0x34 },
6575 { "SYSASSERT", 0x35 },
6576 { "UCODE_VERSION_MISMATCH", 0x37 },
6577 { "BAD_COMMAND", 0x38 },
6578 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6579 { "FATAL_ERROR", 0x3D },
6580 { "NMI_TRM_HW_ERR", 0x46 },
6581 { "NMI_INTERRUPT_TRM", 0x4C },
6582 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6583 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6584 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6585 { "NMI_INTERRUPT_HOST", 0x66 },
6586 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6587 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6588 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6589 { "ADVANCED_SYSASSERT", 0 },
6590 };
6591
6592 static const char *
6593 iwm_desc_lookup(uint32_t num)
6594 {
6595 int i;
6596
6597 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6598 if (advanced_lookup[i].num == num)
6599 return advanced_lookup[i].name;
6600
6601 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6602 return advanced_lookup[i].name;
6603 }
6604
6605 /*
6606 * Support for dumping the error log seemed like a good idea ...
6607 * but it's mostly hex junk and the only sensible thing is the
6608 * hw/ucode revision (which we know anyway). Since it's here,
6609 * I'll just leave it in, just in case e.g. the Intel guys want to
6610 * help us decipher some "ADVANCED_SYSASSERT" later.
6611 */
6612 static void
6613 iwm_nic_error(struct iwm_softc *sc)
6614 {
6615 struct iwm_error_event_table t;
6616 uint32_t base;
6617
6618 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6619 base = sc->sc_uc.uc_error_event_table;
6620 if (base < 0x800000) {
6621 aprint_error_dev(sc->sc_dev,
6622 "Invalid error log pointer 0x%08x\n", base);
6623 return;
6624 }
6625
6626 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6627 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6628 return;
6629 }
6630
6631 if (!t.valid) {
6632 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6633 return;
6634 }
6635
6636 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6637 aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
6638 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6639 sc->sc_flags, t.valid);
6640 }
6641
6642 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
6643 iwm_desc_lookup(t.error_id));
6644 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
6645 t.trm_hw_status0);
6646 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
6647 t.trm_hw_status1);
6648 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
6649 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
6650 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
6651 aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
6652 aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
6653 aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
6654 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
6655 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
6656 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
6657 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
6658 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
6659 aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
6660 t.fw_rev_type);
6661 aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
6662 t.major);
6663 aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
6664 t.minor);
6665 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
6666 aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
6667 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
6668 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
6669 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
6670 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
6671 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
6672 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
6673 aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
6674 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
6675 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
6676 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
6677 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
6678 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6679 t.l2p_addr_match);
6680 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
6681 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
6682 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
6683
6684 if (sc->sc_uc.uc_umac_error_event_table)
6685 iwm_nic_umac_error(sc);
6686 }
6687
6688 static void
6689 iwm_nic_umac_error(struct iwm_softc *sc)
6690 {
6691 struct iwm_umac_error_event_table t;
6692 uint32_t base;
6693
6694 base = sc->sc_uc.uc_umac_error_event_table;
6695
6696 if (base < 0x800000) {
6697 aprint_error_dev(sc->sc_dev,
6698 "Invalid error log pointer 0x%08x\n", base);
6699 return;
6700 }
6701
6702 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6703 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6704 return;
6705 }
6706
6707 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6708 aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
6709 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6710 sc->sc_flags, t.valid);
6711 }
6712
6713 aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
6714 iwm_desc_lookup(t.error_id));
6715 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
6716 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
6717 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
6718 t.ilink1);
6719 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
6720 t.ilink2);
6721 aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
6722 aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
6723 aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
6724 aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
6725 aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
6726 aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
6727 t.frame_pointer);
6728 aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
6729 t.stack_pointer);
6730 aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
6731 aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
6732 t.nic_isr_pref);
6733 }
6734 #endif
6735
6736 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6737 do { \
6738 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6739 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6740 _var_ = (void *)((_pkt_)+1); \
6741 } while (/*CONSTCOND*/0)
6742
6743 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6744 do { \
6745 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6746 sizeof(len), BUS_DMASYNC_POSTREAD); \
6747 _ptr_ = (void *)((_pkt_)+1); \
6748 } while (/*CONSTCOND*/0)
6749
6750 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6751
6752 static void
6753 iwm_notif_intr(struct iwm_softc *sc)
6754 {
6755 uint16_t hw;
6756
6757 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6758 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6759
6760 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6761 while (sc->rxq.cur != hw) {
6762 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6763 struct iwm_rx_packet *pkt;
6764 struct iwm_cmd_response *cresp;
6765 int orig_qid, qid, idx, code;
6766
6767 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6768 BUS_DMASYNC_POSTREAD);
6769 pkt = mtod(data->m, struct iwm_rx_packet *);
6770
6771 orig_qid = pkt->hdr.qid;
6772 qid = orig_qid & ~0x80;
6773 idx = pkt->hdr.idx;
6774
6775 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6776
6777 /*
6778 * randomly get these from the firmware, no idea why.
6779 * they at least seem harmless, so just ignore them for now
6780 */
6781 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6782 || pkt->len_n_flags == htole32(0x55550000))) {
6783 ADVANCE_RXQ(sc);
6784 continue;
6785 }
6786
6787 switch (code) {
6788 case IWM_REPLY_RX_PHY_CMD:
6789 iwm_rx_rx_phy_cmd(sc, pkt, data);
6790 break;
6791
6792 case IWM_REPLY_RX_MPDU_CMD:
6793 iwm_rx_rx_mpdu(sc, pkt, data);
6794 break;
6795
6796 case IWM_TX_CMD:
6797 iwm_rx_tx_cmd(sc, pkt, data);
6798 break;
6799
6800 case IWM_MISSED_BEACONS_NOTIFICATION:
6801 iwm_rx_missed_beacons_notif(sc, pkt, data);
6802 break;
6803
6804 case IWM_MFUART_LOAD_NOTIFICATION:
6805 break;
6806
6807 case IWM_ALIVE: {
6808 struct iwm_alive_resp_v1 *resp1;
6809 struct iwm_alive_resp_v2 *resp2;
6810 struct iwm_alive_resp_v3 *resp3;
6811
6812 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6813 SYNC_RESP_STRUCT(resp1, pkt);
6814 sc->sc_uc.uc_error_event_table
6815 = le32toh(resp1->error_event_table_ptr);
6816 sc->sc_uc.uc_log_event_table
6817 = le32toh(resp1->log_event_table_ptr);
6818 sc->sched_base = le32toh(resp1->scd_base_ptr);
6819 if (resp1->status == IWM_ALIVE_STATUS_OK)
6820 sc->sc_uc.uc_ok = 1;
6821 else
6822 sc->sc_uc.uc_ok = 0;
6823 }
6824 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
6825 SYNC_RESP_STRUCT(resp2, pkt);
6826 sc->sc_uc.uc_error_event_table
6827 = le32toh(resp2->error_event_table_ptr);
6828 sc->sc_uc.uc_log_event_table
6829 = le32toh(resp2->log_event_table_ptr);
6830 sc->sched_base = le32toh(resp2->scd_base_ptr);
6831 sc->sc_uc.uc_umac_error_event_table
6832 = le32toh(resp2->error_info_addr);
6833 if (resp2->status == IWM_ALIVE_STATUS_OK)
6834 sc->sc_uc.uc_ok = 1;
6835 else
6836 sc->sc_uc.uc_ok = 0;
6837 }
6838 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
6839 SYNC_RESP_STRUCT(resp3, pkt);
6840 sc->sc_uc.uc_error_event_table
6841 = le32toh(resp3->error_event_table_ptr);
6842 sc->sc_uc.uc_log_event_table
6843 = le32toh(resp3->log_event_table_ptr);
6844 sc->sched_base = le32toh(resp3->scd_base_ptr);
6845 sc->sc_uc.uc_umac_error_event_table
6846 = le32toh(resp3->error_info_addr);
6847 if (resp3->status == IWM_ALIVE_STATUS_OK)
6848 sc->sc_uc.uc_ok = 1;
6849 else
6850 sc->sc_uc.uc_ok = 0;
6851 }
6852
6853 sc->sc_uc.uc_intr = 1;
6854 wakeup(&sc->sc_uc);
6855 break;
6856 }
6857
6858 case IWM_CALIB_RES_NOTIF_PHY_DB: {
6859 struct iwm_calib_res_notif_phy_db *phy_db_notif;
6860 SYNC_RESP_STRUCT(phy_db_notif, pkt);
6861 uint16_t size = le16toh(phy_db_notif->length);
6862 bus_dmamap_sync(sc->sc_dmat, data->map,
6863 sizeof(*pkt) + sizeof(*phy_db_notif),
6864 size, BUS_DMASYNC_POSTREAD);
6865 iwm_phy_db_set_section(sc, phy_db_notif, size);
6866 break;
6867 }
6868
6869 case IWM_STATISTICS_NOTIFICATION: {
6870 struct iwm_notif_statistics *stats;
6871 SYNC_RESP_STRUCT(stats, pkt);
6872 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6873 sc->sc_noise = iwm_get_noise(&stats->rx.general);
6874 break;
6875 }
6876
6877 case IWM_NVM_ACCESS_CMD:
6878 case IWM_MCC_UPDATE_CMD:
6879 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6880 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6881 sizeof(sc->sc_cmd_resp),
6882 BUS_DMASYNC_POSTREAD);
6883 memcpy(sc->sc_cmd_resp,
6884 pkt, sizeof(sc->sc_cmd_resp));
6885 }
6886 break;
6887
6888 case IWM_MCC_CHUB_UPDATE_CMD: {
6889 struct iwm_mcc_chub_notif *notif;
6890 SYNC_RESP_STRUCT(notif, pkt);
6891
6892 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
6893 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
6894 sc->sc_fw_mcc[2] = '\0';
6895 break;
6896 }
6897
6898 case IWM_DTS_MEASUREMENT_NOTIFICATION:
6899 break;
6900
6901 case IWM_PHY_CONFIGURATION_CMD:
6902 case IWM_TX_ANT_CONFIGURATION_CMD:
6903 case IWM_ADD_STA:
6904 case IWM_MAC_CONTEXT_CMD:
6905 case IWM_REPLY_SF_CFG_CMD:
6906 case IWM_POWER_TABLE_CMD:
6907 case IWM_PHY_CONTEXT_CMD:
6908 case IWM_BINDING_CONTEXT_CMD:
6909 case IWM_TIME_EVENT_CMD:
6910 case IWM_SCAN_REQUEST_CMD:
6911 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
6912 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
6913 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
6914 case IWM_REPLY_BEACON_FILTERING_CMD:
6915 case IWM_MAC_PM_POWER_TABLE:
6916 case IWM_TIME_QUOTA_CMD:
6917 case IWM_REMOVE_STA:
6918 case IWM_TXPATH_FLUSH:
6919 case IWM_LQ_CMD:
6920 case IWM_BT_CONFIG:
6921 case IWM_REPLY_THERMAL_MNG_BACKOFF:
6922 SYNC_RESP_STRUCT(cresp, pkt);
6923 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6924 memcpy(sc->sc_cmd_resp,
6925 pkt, sizeof(*pkt) + sizeof(*cresp));
6926 }
6927 break;
6928
6929 /* ignore */
6930 case 0x6c: /* IWM_PHY_DB_CMD */
6931 break;
6932
6933 case IWM_INIT_COMPLETE_NOTIF:
6934 sc->sc_init_complete = 1;
6935 wakeup(&sc->sc_init_complete);
6936 break;
6937
6938 case IWM_SCAN_OFFLOAD_COMPLETE: {
6939 struct iwm_periodic_scan_complete *notif;
6940 SYNC_RESP_STRUCT(notif, pkt);
6941 break;
6942 }
6943
6944 case IWM_SCAN_ITERATION_COMPLETE: {
6945 struct iwm_lmac_scan_complete_notif *notif;
6946 SYNC_RESP_STRUCT(notif, pkt);
6947 iwm_endscan(sc);
6948 break;
6949 }
6950
6951 case IWM_SCAN_COMPLETE_UMAC: {
6952 struct iwm_umac_scan_complete *notif;
6953 SYNC_RESP_STRUCT(notif, pkt);
6954 iwm_endscan(sc);
6955 break;
6956 }
6957
6958 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
6959 struct iwm_umac_scan_iter_complete_notif *notif;
6960 SYNC_RESP_STRUCT(notif, pkt);
6961 iwm_endscan(sc);
6962 break;
6963 }
6964
6965 case IWM_REPLY_ERROR: {
6966 struct iwm_error_resp *resp;
6967 SYNC_RESP_STRUCT(resp, pkt);
6968 aprint_error_dev(sc->sc_dev,
6969 "firmware error 0x%x, cmd 0x%x\n",
6970 le32toh(resp->error_type), resp->cmd_id);
6971 break;
6972 }
6973
6974 case IWM_TIME_EVENT_NOTIFICATION: {
6975 struct iwm_time_event_notif *notif;
6976 SYNC_RESP_STRUCT(notif, pkt);
6977 break;
6978 }
6979
6980 case IWM_MCAST_FILTER_CMD:
6981 break;
6982
6983 case IWM_SCD_QUEUE_CFG: {
6984 struct iwm_scd_txq_cfg_rsp *rsp;
6985 SYNC_RESP_STRUCT(rsp, pkt);
6986 break;
6987 }
6988
6989 default:
6990 aprint_error_dev(sc->sc_dev,
6991 "unhandled firmware response 0x%x 0x%x/0x%x "
6992 "rx ring %d[%d]\n",
6993 code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
6994 break;
6995 }
6996
6997 /*
6998 * uCode sets bit 0x80 when it originates the notification,
6999 * i.e. when the notification is not a direct response to a
7000 * command sent by the driver.
7001 * For example, uCode issues IWM_REPLY_RX when it sends a
7002 * received frame to the driver.
7003 */
7004 if (!(orig_qid & (1 << 7))) {
7005 iwm_cmd_done(sc, qid, idx);
7006 }
7007
7008 ADVANCE_RXQ(sc);
7009 }
7010
7011 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
7012 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
7013
7014 /*
7015 * Seems like the hardware gets upset unless we align the write by 8??
7016 */
7017 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7018 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7019 }
7020
7021 static void
7022 iwm_softintr(void *arg)
7023 {
7024 struct iwm_softc *sc = arg;
7025 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7026 uint32_t r1;
7027 int isperiodic = 0;
7028
7029 r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7030
7031 restart:
7032 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7033 #ifdef IWM_DEBUG
7034 int i;
7035
7036 iwm_nic_error(sc);
7037
7038 /* Dump driver status (TX and RX rings) while we're here. */
7039 DPRINTF(("driver status:\n"));
7040 for (i = 0; i < IWM_MAX_QUEUES; i++) {
7041 struct iwm_tx_ring *ring = &sc->txq[i];
7042 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7043 "queued=%-3d\n",
7044 i, ring->qid, ring->cur, ring->queued));
7045 }
7046 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7047 DPRINTF((" 802.11 state %s\n",
7048 ieee80211_state_name[sc->sc_ic.ic_state]));
7049 #endif
7050
7051 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7052 fatal:
7053 ifp->if_flags &= ~IFF_UP;
7054 iwm_stop(ifp, 1);
7055 /* Don't restore interrupt mask */
7056 return;
7057
7058 }
7059
7060 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7061 aprint_error_dev(sc->sc_dev,
7062 "hardware error, stopping device\n");
7063 goto fatal;
7064 }
7065
7066 /* firmware chunk loaded */
7067 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7068 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7069 sc->sc_fw_chunk_done = 1;
7070 wakeup(&sc->sc_fw);
7071 }
7072
7073 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7074 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
7075 ifp->if_flags &= ~IFF_UP;
7076 iwm_stop(ifp, 1);
7077 }
7078 }
7079
7080 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7081 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7082 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7083 IWM_WRITE_1(sc,
7084 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7085 isperiodic = 1;
7086 }
7087
7088 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7089 isperiodic) {
7090 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7091
7092 iwm_notif_intr(sc);
7093
7094 /* enable periodic interrupt, see above */
7095 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7096 !isperiodic)
7097 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7098 IWM_CSR_INT_PERIODIC_ENA);
7099 }
7100
7101 r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7102 if (r1 != 0)
7103 goto restart;
7104
7105 iwm_restore_interrupts(sc);
7106 }
7107
7108 static int
7109 iwm_intr(void *arg)
7110 {
7111 struct iwm_softc *sc = arg;
7112 int r1, r2;
7113
7114 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7115
7116 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
7117 uint32_t *ict = sc->ict_dma.vaddr;
7118 int tmp;
7119
7120 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7121 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7122 tmp = htole32(ict[sc->ict_cur]);
7123 if (!tmp)
7124 goto out_ena;
7125
7126 /*
7127 * ok, there was something. keep plowing until we have all.
7128 */
7129 r1 = r2 = 0;
7130 while (tmp) {
7131 r1 |= tmp;
7132 ict[sc->ict_cur] = 0; /* Acknowledge. */
7133 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7134 &ict[sc->ict_cur] - ict, sizeof(*ict),
7135 BUS_DMASYNC_PREWRITE);
7136 sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7137 tmp = htole32(ict[sc->ict_cur]);
7138 }
7139
7140 /* this is where the fun begins. don't ask */
7141 if (r1 == 0xffffffff)
7142 r1 = 0;
7143
7144 /* i am not expected to understand this */
7145 if (r1 & 0xc0000)
7146 r1 |= 0x8000;
7147 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7148 } else {
7149 r1 = IWM_READ(sc, IWM_CSR_INT);
7150 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7151 goto out;
7152 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7153 }
7154 if (r1 == 0 && r2 == 0) {
7155 goto out_ena;
7156 }
7157
7158 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7159
7160 atomic_or_32(&sc->sc_soft_flags, r1);
7161 softint_schedule(sc->sc_soft_ih);
7162 return 1;
7163
7164 out_ena:
7165 iwm_restore_interrupts(sc);
7166 out:
7167 return 0;
7168 }
7169
7170 /*
7171 * Autoconf glue-sniffing
7172 */
7173
7174 static const pci_product_id_t iwm_devices[] = {
7175 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7176 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7177 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7178 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7179 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7180 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7181 #if 0
7182 PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7183 PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7184 PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7185 PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7186 #endif
7187 };
7188
7189 static int
7190 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7191 {
7192 struct pci_attach_args *pa = aux;
7193
7194 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7195 return 0;
7196
7197 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7198 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7199 return 1;
7200
7201 return 0;
7202 }
7203
7204 static int
7205 iwm_preinit(struct iwm_softc *sc)
7206 {
7207 struct ieee80211com *ic = &sc->sc_ic;
7208 int err;
7209
7210 if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7211 return 0;
7212
7213 err = iwm_start_hw(sc);
7214 if (err) {
7215 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7216 return err;
7217 }
7218
7219 err = iwm_run_init_mvm_ucode(sc, 1);
7220 iwm_stop_device(sc);
7221 if (err)
7222 return err;
7223
7224 sc->sc_flags |= IWM_FLAG_ATTACHED;
7225
7226 aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7227 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7228 ether_sprintf(sc->sc_nvm.hw_addr));
7229
7230 #ifndef IEEE80211_NO_HT
7231 if (sc->sc_nvm.sku_cap_11n_enable)
7232 iwm_setup_ht_rates(sc);
7233 #endif
7234
7235 /* not all hardware can do 5GHz band */
7236 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7237 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7238
7239 ieee80211_ifattach(ic);
7240
7241 ic->ic_node_alloc = iwm_node_alloc;
7242
7243 /* Override 802.11 state transition machine. */
7244 sc->sc_newstate = ic->ic_newstate;
7245 ic->ic_newstate = iwm_newstate;
7246 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7247 ieee80211_announce(ic);
7248
7249 iwm_radiotap_attach(sc);
7250
7251 return 0;
7252 }
7253
7254 static void
7255 iwm_attach_hook(device_t dev)
7256 {
7257 struct iwm_softc *sc = device_private(dev);
7258
7259 iwm_preinit(sc);
7260 }
7261
7262 static void
7263 iwm_attach(device_t parent, device_t self, void *aux)
7264 {
7265 struct iwm_softc *sc = device_private(self);
7266 struct pci_attach_args *pa = aux;
7267 struct ieee80211com *ic = &sc->sc_ic;
7268 struct ifnet *ifp = &sc->sc_ec.ec_if;
7269 pcireg_t reg, memtype;
7270 char intrbuf[PCI_INTRSTR_LEN];
7271 const char *intrstr;
7272 int err;
7273 int txq_i;
7274 const struct sysctlnode *node;
7275
7276 sc->sc_dev = self;
7277 sc->sc_pct = pa->pa_pc;
7278 sc->sc_pcitag = pa->pa_tag;
7279 sc->sc_dmat = pa->pa_dmat;
7280 sc->sc_pciid = pa->pa_id;
7281
7282 pci_aprint_devinfo(pa, NULL);
7283
7284 if (workqueue_create(&sc->sc_nswq, "iwmns",
7285 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7286 panic("%s: could not create workqueue: newstate",
7287 device_xname(self));
7288 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7289 if (sc->sc_soft_ih == NULL)
7290 panic("%s: could not establish softint", device_xname(self));
7291
7292 /*
7293 * Get the offset of the PCI Express Capability Structure in PCI
7294 * Configuration Space.
7295 */
7296 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7297 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7298 if (err == 0) {
7299 aprint_error_dev(self,
7300 "PCIe capability structure not found!\n");
7301 return;
7302 }
7303
7304 /* Clear device-specific "PCI retry timeout" register (41h). */
7305 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7306 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7307
7308 /* Enable bus-mastering */
7309 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7310 reg |= PCI_COMMAND_MASTER_ENABLE;
7311 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7312
7313 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7314 err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7315 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7316 if (err) {
7317 aprint_error_dev(self, "can't map mem space\n");
7318 return;
7319 }
7320
7321 /* Install interrupt handler. */
7322 err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7323 if (err) {
7324 aprint_error_dev(self, "can't allocate interrupt\n");
7325 return;
7326 }
7327 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) {
7328 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
7329 PCI_COMMAND_STATUS_REG);
7330 if (ISSET(reg, PCI_COMMAND_INTERRUPT_DISABLE)) {
7331 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7332 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
7333 PCI_COMMAND_STATUS_REG, reg);
7334 }
7335 }
7336 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7337 sizeof(intrbuf));
7338 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7339 IPL_NET, iwm_intr, sc, device_xname(self));
7340 if (sc->sc_ih == NULL) {
7341 aprint_error_dev(self, "can't establish interrupt");
7342 if (intrstr != NULL)
7343 aprint_error(" at %s", intrstr);
7344 aprint_error("\n");
7345 return;
7346 }
7347 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7348
7349 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7350
7351 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7352 switch (PCI_PRODUCT(sc->sc_pciid)) {
7353 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7354 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7355 sc->sc_fwname = "iwlwifi-3160-16.ucode";
7356 sc->host_interrupt_operation_mode = 1;
7357 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7358 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7359 break;
7360 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7361 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7362 sc->sc_fwname = "iwlwifi-7265D-16.ucode";
7363 sc->host_interrupt_operation_mode = 0;
7364 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7365 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7366 break;
7367 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7368 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7369 sc->sc_fwname = "iwlwifi-7260-16.ucode";
7370 sc->host_interrupt_operation_mode = 1;
7371 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7372 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7373 break;
7374 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7375 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7376 sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7377 IWM_CSR_HW_REV_TYPE_7265D ?
7378 "iwlwifi-7265D-16.ucode": "iwlwifi-7265-16.ucode";
7379 sc->host_interrupt_operation_mode = 0;
7380 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7381 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7382 break;
7383 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7384 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7385 sc->sc_fwname = "iwlwifi-8000C-16.ucode";
7386 sc->host_interrupt_operation_mode = 0;
7387 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7388 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7389 break;
7390 default:
7391 aprint_error_dev(self, "unknown product %#x",
7392 PCI_PRODUCT(sc->sc_pciid));
7393 return;
7394 }
7395 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7396
7397 /*
7398 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7399 * changed, and now the revision step also includes bit 0-1 (no more
7400 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7401 * in the old format.
7402 */
7403
7404 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7405 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7406 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7407
7408 if (iwm_prepare_card_hw(sc) != 0) {
7409 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7410 return;
7411 }
7412
7413 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7414 uint32_t hw_step;
7415
7416 /*
7417 * In order to recognize C step the driver should read the
7418 * chip version id located at the AUX bus MISC address.
7419 */
7420 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7421 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7422 DELAY(2);
7423
7424 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7425 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7426 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7427 25000);
7428 if (!err) {
7429 aprint_error_dev(sc->sc_dev,
7430 "failed to wake up the nic\n");
7431 return;
7432 }
7433
7434 if (iwm_nic_lock(sc)) {
7435 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7436 hw_step |= IWM_ENABLE_WFPM;
7437 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7438 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7439 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7440 if (hw_step == 0x3)
7441 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7442 (IWM_SILICON_C_STEP << 2);
7443 iwm_nic_unlock(sc);
7444 } else {
7445 aprint_error_dev(sc->sc_dev,
7446 "failed to lock the nic\n");
7447 return;
7448 }
7449 }
7450
7451 /*
7452 * Allocate DMA memory for firmware transfers.
7453 * Must be aligned on a 16-byte boundary.
7454 */
7455 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7456 16);
7457 if (err) {
7458 aprint_error_dev(sc->sc_dev,
7459 "could not allocate memory for firmware\n");
7460 return;
7461 }
7462
7463 /* Allocate "Keep Warm" page, used internally by the card. */
7464 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7465 if (err) {
7466 aprint_error_dev(sc->sc_dev,
7467 "could not allocate keep warm page\n");
7468 goto fail1;
7469 }
7470
7471 /* Allocate interrupt cause table (ICT).*/
7472 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7473 1 << IWM_ICT_PADDR_SHIFT);
7474 if (err) {
7475 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7476 goto fail2;
7477 }
7478
7479 /* TX scheduler rings must be aligned on a 1KB boundary. */
7480 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7481 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7482 if (err) {
7483 aprint_error_dev(sc->sc_dev,
7484 "could not allocate TX scheduler rings\n");
7485 goto fail3;
7486 }
7487
7488 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7489 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7490 if (err) {
7491 aprint_error_dev(sc->sc_dev,
7492 "could not allocate TX ring %d\n", txq_i);
7493 goto fail4;
7494 }
7495 }
7496
7497 err = iwm_alloc_rx_ring(sc, &sc->rxq);
7498 if (err) {
7499 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
7500 goto fail4;
7501 }
7502
7503 /* Clear pending interrupts. */
7504 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7505
7506 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7507 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
7508 SYSCTL_DESCR("iwm per-controller controls"),
7509 NULL, 0, NULL, 0,
7510 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
7511 CTL_EOL)) != 0) {
7512 aprint_normal_dev(sc->sc_dev,
7513 "couldn't create iwm per-controller sysctl node\n");
7514 }
7515 if (err == 0) {
7516 int iwm_nodenum = node->sysctl_num;
7517
7518 /* Reload firmware sysctl node */
7519 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7520 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
7521 SYSCTL_DESCR("Reload firmware"),
7522 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
7523 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
7524 CTL_EOL)) != 0) {
7525 aprint_normal_dev(sc->sc_dev,
7526 "couldn't create load_fw sysctl node\n");
7527 }
7528 }
7529
7530 /*
7531 * Attach interface
7532 */
7533 ic->ic_ifp = ifp;
7534 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
7535 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
7536 ic->ic_state = IEEE80211_S_INIT;
7537
7538 /* Set device capabilities. */
7539 ic->ic_caps =
7540 IEEE80211_C_WEP | /* WEP */
7541 IEEE80211_C_WPA | /* 802.11i */
7542 #ifdef notyet
7543 IEEE80211_C_SCANALL | /* device scans all channels at once */
7544 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
7545 #endif
7546 IEEE80211_C_SHSLOT | /* short slot time supported */
7547 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
7548
7549 #ifndef IEEE80211_NO_HT
7550 ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7551 ic->ic_htxcaps = 0;
7552 ic->ic_txbfcaps = 0;
7553 ic->ic_aselcaps = 0;
7554 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7555 #endif
7556
7557 /* all hardware can do 2.4GHz band */
7558 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7559 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7560
7561 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
7562 sc->sc_phyctxt[i].id = i;
7563 }
7564
7565 sc->sc_amrr.amrr_min_success_threshold = 1;
7566 sc->sc_amrr.amrr_max_success_threshold = 15;
7567
7568 /* IBSS channel undefined for now. */
7569 ic->ic_ibss_chan = &ic->ic_channels[1];
7570
7571 #if 0
7572 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7573 #endif
7574
7575 ifp->if_softc = sc;
7576 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7577 ifp->if_init = iwm_init;
7578 ifp->if_stop = iwm_stop;
7579 ifp->if_ioctl = iwm_ioctl;
7580 ifp->if_start = iwm_start;
7581 ifp->if_watchdog = iwm_watchdog;
7582 IFQ_SET_READY(&ifp->if_snd);
7583 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7584
7585 if_initialize(ifp);
7586 #if 0
7587 ieee80211_ifattach(ic);
7588 #else
7589 ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
7590 #endif
7591 /* Use common softint-based if_input */
7592 ifp->if_percpuq = if_percpuq_create(ifp);
7593 if_deferred_start_init(ifp, NULL);
7594 if_register(ifp);
7595
7596 callout_init(&sc->sc_calib_to, 0);
7597 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
7598 callout_init(&sc->sc_led_blink_to, 0);
7599 callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7600 #ifndef IEEE80211_NO_HT
7601 if (workqueue_create(&sc->sc_setratewq, "iwmsr",
7602 iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
7603 panic("%s: could not create workqueue: setrates",
7604 device_xname(self));
7605 if (workqueue_create(&sc->sc_bawq, "iwmba",
7606 iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
7607 panic("%s: could not create workqueue: blockack",
7608 device_xname(self));
7609 if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
7610 iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
7611 panic("%s: could not create workqueue: htprot",
7612 device_xname(self));
7613 #endif
7614
7615 if (pmf_device_register(self, NULL, NULL))
7616 pmf_class_network_register(self, ifp);
7617 else
7618 aprint_error_dev(self, "couldn't establish power handler\n");
7619
7620 /*
7621 * We can't do normal attach before the file system is mounted
7622 * because we cannot read the MAC address without loading the
7623 * firmware from disk. So we postpone until mountroot is done.
7624 * Notably, this will require a full driver unload/load cycle
7625 * (or reboot) in case the firmware is not present when the
7626 * hook runs.
7627 */
7628 config_mountroot(self, iwm_attach_hook);
7629
7630 return;
7631
7632 fail4: while (--txq_i >= 0)
7633 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7634 iwm_free_rx_ring(sc, &sc->rxq);
7635 iwm_dma_contig_free(&sc->sched_dma);
7636 fail3: if (sc->ict_dma.vaddr != NULL)
7637 iwm_dma_contig_free(&sc->ict_dma);
7638 fail2: iwm_dma_contig_free(&sc->kw_dma);
7639 fail1: iwm_dma_contig_free(&sc->fw_dma);
7640 }
7641
7642 void
7643 iwm_radiotap_attach(struct iwm_softc *sc)
7644 {
7645 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7646
7647 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
7648 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
7649 &sc->sc_drvbpf);
7650
7651 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7652 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7653 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7654
7655 sc->sc_txtap_len = sizeof sc->sc_txtapu;
7656 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7657 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7658 }
7659
7660 #if 0
7661 static void
7662 iwm_init_task(void *arg)
7663 {
7664 struct iwm_softc *sc = arg;
7665 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7666 int s;
7667
7668 rw_enter_write(&sc->ioctl_rwl);
7669 s = splnet();
7670
7671 iwm_stop(ifp, 0);
7672 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7673 iwm_init(ifp);
7674
7675 splx(s);
7676 rw_exit(&sc->ioctl_rwl);
7677 }
7678
7679 static void
7680 iwm_wakeup(struct iwm_softc *sc)
7681 {
7682 pcireg_t reg;
7683
7684 /* Clear device-specific "PCI retry timeout" register (41h). */
7685 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7686 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7687
7688 iwm_init_task(sc);
7689 }
7690
7691 static int
7692 iwm_activate(device_t self, enum devact act)
7693 {
7694 struct iwm_softc *sc = device_private(self);
7695 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7696
7697 switch (act) {
7698 case DVACT_DEACTIVATE:
7699 if (ifp->if_flags & IFF_RUNNING)
7700 iwm_stop(ifp, 0);
7701 return 0;
7702 default:
7703 return EOPNOTSUPP;
7704 }
7705 }
7706 #endif
7707
7708 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
7709 NULL, NULL);
7710
7711 static int
7712 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
7713 {
7714 struct sysctlnode node;
7715 struct iwm_softc *sc;
7716 int err, t;
7717
7718 node = *rnode;
7719 sc = node.sysctl_data;
7720 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
7721 node.sysctl_data = &t;
7722 err = sysctl_lookup(SYSCTLFN_CALL(&node));
7723 if (err || newp == NULL)
7724 return err;
7725
7726 if (t == 0)
7727 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
7728 return 0;
7729 }
7730
7731 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
7732 {
7733 const struct sysctlnode *rnode;
7734 #ifdef IWM_DEBUG
7735 const struct sysctlnode *cnode;
7736 #endif /* IWM_DEBUG */
7737 int rc;
7738
7739 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
7740 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
7741 SYSCTL_DESCR("iwm global controls"),
7742 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
7743 goto err;
7744
7745 iwm_sysctl_root_num = rnode->sysctl_num;
7746
7747 #ifdef IWM_DEBUG
7748 /* control debugging printfs */
7749 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
7750 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
7751 "debug", SYSCTL_DESCR("Enable debugging output"),
7752 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
7753 goto err;
7754 #endif /* IWM_DEBUG */
7755
7756 return;
7757
7758 err:
7759 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
7760 }
7761