if_iwm.c revision 1.46 1 /* $NetBSD: if_iwm.c,v 1.46 2017/01/04 03:05:24 nonaka Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.147 2016/11/17 14:12:33 stsp Exp */
3 #define IEEE80211_NO_HT
4 /*
5 * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 * Author: Stefan Sperling <stsp (at) openbsd.org>
7 * Copyright (c) 2014 Fixup Software Ltd.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 /*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 * Copyright(c) 2016 Intel Deutschland GmbH
64 * All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 *
70 * * Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * * Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in
74 * the documentation and/or other materials provided with the
75 * distribution.
76 * * Neither the name Intel Corporation nor the names of its
77 * contributors may be used to endorse or promote products derived
78 * from this software without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93 /*-
94 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
95 *
96 * Permission to use, copy, modify, and distribute this software for any
97 * purpose with or without fee is hereby granted, provided that the above
98 * copyright notice and this permission notice appear in all copies.
99 *
100 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 */
108
109 #include <sys/cdefs.h>
110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.46 2017/01/04 03:05:24 nonaka Exp $");
111
112 #include <sys/param.h>
113 #include <sys/conf.h>
114 #include <sys/kernel.h>
115 #include <sys/kmem.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/sysctl.h>
122 #include <sys/systm.h>
123
124 #include <sys/cpu.h>
125 #include <sys/bus.h>
126 #include <sys/workqueue.h>
127 #include <machine/endian.h>
128 #include <machine/intr.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 #include <dev/firmload.h>
134
135 #include <net/bpf.h>
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_ether.h>
140
141 #include <netinet/in.h>
142 #include <netinet/ip.h>
143
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147
148 #define DEVNAME(_s) device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
150
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 0;
158 #else
159 #define DPRINTF(x) do { ; } while (0)
160 #define DPRINTFN(n, x) do { ; } while (0)
161 #endif
162
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165
166 static const uint8_t iwm_nvm_channels[] = {
167 /* 2.4 GHz */
168 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 /* 5 GHz */
170 36, 40, 44, 48, 52, 56, 60, 64,
171 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 149, 153, 157, 161, 165
173 };
174
175 static const uint8_t iwm_nvm_channels_8000[] = {
176 /* 2.4 GHz */
177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 /* 5 GHz */
179 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
180 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 149, 153, 157, 161, 165, 169, 173, 177, 181
182 };
183
184 #define IWM_NUM_2GHZ_CHANNELS 14
185
186 static const struct iwm_rate {
187 uint8_t rate;
188 uint8_t plcp;
189 uint8_t ht_plcp;
190 } iwm_rates[] = {
191 /* Legacy */ /* HT */
192 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
197 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
199 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
200 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
201 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
202 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
203 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
204 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
205 };
206 #define IWM_RIDX_CCK 0
207 #define IWM_RIDX_OFDM 4
208 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
211
212 #ifndef IEEE80211_NO_HT
213 /* Convert an MCS index into an iwm_rates[] index. */
214 static const int iwm_mcs2ridx[] = {
215 IWM_RATE_MCS_0_INDEX,
216 IWM_RATE_MCS_1_INDEX,
217 IWM_RATE_MCS_2_INDEX,
218 IWM_RATE_MCS_3_INDEX,
219 IWM_RATE_MCS_4_INDEX,
220 IWM_RATE_MCS_5_INDEX,
221 IWM_RATE_MCS_6_INDEX,
222 IWM_RATE_MCS_7_INDEX,
223 };
224 #endif
225
226 struct iwm_nvm_section {
227 uint16_t length;
228 uint8_t *data;
229 };
230
231 struct iwm_newstate_state {
232 struct work ns_wk;
233 enum ieee80211_state ns_nstate;
234 int ns_arg;
235 int ns_generation;
236 };
237
238 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
239 static int iwm_firmware_store_section(struct iwm_softc *,
240 enum iwm_ucode_type, uint8_t *, size_t);
241 static int iwm_set_default_calib(struct iwm_softc *, const void *);
242 static int iwm_read_firmware(struct iwm_softc *);
243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
244 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
245 #ifdef IWM_DEBUG
246 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
247 #endif
248 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
249 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
250 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
251 static int iwm_nic_lock(struct iwm_softc *);
252 static void iwm_nic_unlock(struct iwm_softc *);
253 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
254 uint32_t);
255 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
257 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
258 bus_size_t, bus_size_t);
259 static void iwm_dma_contig_free(struct iwm_dma_info *);
260 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
261 static void iwm_disable_rx_dma(struct iwm_softc *);
262 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
265 int);
266 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
268 static void iwm_enable_rfkill_int(struct iwm_softc *);
269 static int iwm_check_rfkill(struct iwm_softc *);
270 static void iwm_enable_interrupts(struct iwm_softc *);
271 static void iwm_restore_interrupts(struct iwm_softc *);
272 static void iwm_disable_interrupts(struct iwm_softc *);
273 static void iwm_ict_reset(struct iwm_softc *);
274 static int iwm_set_hw_ready(struct iwm_softc *);
275 static int iwm_prepare_card_hw(struct iwm_softc *);
276 static void iwm_apm_config(struct iwm_softc *);
277 static int iwm_apm_init(struct iwm_softc *);
278 static void iwm_apm_stop(struct iwm_softc *);
279 static int iwm_allow_mcast(struct iwm_softc *);
280 static int iwm_start_hw(struct iwm_softc *);
281 static void iwm_stop_device(struct iwm_softc *);
282 static void iwm_nic_config(struct iwm_softc *);
283 static int iwm_nic_rx_init(struct iwm_softc *);
284 static int iwm_nic_tx_init(struct iwm_softc *);
285 static int iwm_nic_init(struct iwm_softc *);
286 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
287 static int iwm_post_alive(struct iwm_softc *);
288 static struct iwm_phy_db_entry *
289 iwm_phy_db_get_section(struct iwm_softc *,
290 enum iwm_phy_db_section_type, uint16_t);
291 static int iwm_phy_db_set_section(struct iwm_softc *,
292 struct iwm_calib_res_notif_phy_db *, uint16_t);
293 static int iwm_is_valid_channel(uint16_t);
294 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
295 static uint16_t iwm_channel_id_to_papd(uint16_t);
296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
297 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
298 uint8_t **, uint16_t *, uint16_t);
299 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
300 void *);
301 static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
302 enum iwm_phy_db_section_type, uint8_t);
303 static int iwm_send_phy_db_data(struct iwm_softc *);
304 static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
305 struct iwm_time_event_cmd_v1 *);
306 static int iwm_send_time_event_cmd(struct iwm_softc *,
307 const struct iwm_time_event_cmd_v2 *);
308 static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
309 uint32_t, uint32_t);
310 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
311 uint16_t, uint8_t *, uint16_t *);
312 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
313 uint16_t *, size_t);
314 static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
315 const uint8_t *, size_t);
316 #ifndef IEEE80211_NO_HT
317 static void iwm_setup_ht_rates(struct iwm_softc *);
318 static void iwm_htprot_task(void *);
319 static void iwm_update_htprot(struct ieee80211com *,
320 struct ieee80211_node *);
321 static int iwm_ampdu_rx_start(struct ieee80211com *,
322 struct ieee80211_node *, uint8_t);
323 static void iwm_ampdu_rx_stop(struct ieee80211com *,
324 struct ieee80211_node *, uint8_t);
325 static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
326 uint8_t, uint16_t, int);
327 #ifdef notyet
328 static int iwm_ampdu_tx_start(struct ieee80211com *,
329 struct ieee80211_node *, uint8_t);
330 static void iwm_ampdu_tx_stop(struct ieee80211com *,
331 struct ieee80211_node *, uint8_t);
332 #endif
333 static void iwm_ba_task(void *);
334 #endif
335
336 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
337 const uint16_t *, const uint16_t *, const uint16_t *,
338 const uint16_t *, const uint16_t *);
339 static void iwm_set_hw_address_8000(struct iwm_softc *,
340 struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
341 static int iwm_parse_nvm_sections(struct iwm_softc *,
342 struct iwm_nvm_section *);
343 static int iwm_nvm_init(struct iwm_softc *);
344 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
345 const uint8_t *, uint32_t);
346 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
347 const uint8_t *, uint32_t);
348 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
349 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
350 struct iwm_fw_sects *, int , int *);
351 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
352 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
353 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
354 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
355 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
356 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
357 enum iwm_ucode_type);
358 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
359 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
360 static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
361 static int iwm_get_signal_strength(struct iwm_softc *,
362 struct iwm_rx_phy_info *);
363 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
364 struct iwm_rx_packet *, struct iwm_rx_data *);
365 static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
366 static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
367 struct iwm_rx_data *);
368 static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
369 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
370 struct iwm_rx_data *);
371 static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
372 uint32_t);
373 #if 0
374 static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
375 static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
376 #endif
377 static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
380 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
381 uint8_t, uint8_t);
382 static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
383 uint8_t, uint8_t, uint32_t, uint32_t);
384 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
385 static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
386 uint16_t, const void *);
387 static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
388 uint32_t *);
389 static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
390 const void *, uint32_t *);
391 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
392 static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
393 #if 0
394 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
395 uint16_t);
396 #endif
397 static const struct iwm_rate *
398 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
399 struct ieee80211_frame *, struct iwm_tx_cmd *);
400 static int iwm_tx(struct iwm_softc *, struct mbuf *,
401 struct ieee80211_node *, int);
402 static void iwm_led_enable(struct iwm_softc *);
403 static void iwm_led_disable(struct iwm_softc *);
404 static int iwm_led_is_enabled(struct iwm_softc *);
405 static void iwm_led_blink_timeout(void *);
406 static void iwm_led_blink_start(struct iwm_softc *);
407 static void iwm_led_blink_stop(struct iwm_softc *);
408 static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
409 struct iwm_beacon_filter_cmd *);
410 static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
411 struct iwm_node *, struct iwm_beacon_filter_cmd *);
412 static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
413 int);
414 static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
415 struct iwm_mac_power_cmd *);
416 static int iwm_power_mac_update_mode(struct iwm_softc *,
417 struct iwm_node *);
418 static int iwm_power_update_device(struct iwm_softc *);
419 #ifdef notyet
420 static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
421 #endif
422 static int iwm_disable_beacon_filter(struct iwm_softc *);
423 static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 static int iwm_add_aux_sta(struct iwm_softc *);
425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
427 #ifdef notyet
428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
430 #endif
431 static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
432 struct iwm_scan_channel_cfg_lmac *, int);
433 static int iwm_fill_probe_req(struct iwm_softc *,
434 struct iwm_scan_probe_req *);
435 static int iwm_lmac_scan(struct iwm_softc *);
436 static int iwm_config_umac_scan(struct iwm_softc *);
437 static int iwm_umac_scan(struct iwm_softc *);
438 static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
439 static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
440 int *);
441 static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
442 struct iwm_mac_ctx_cmd *, uint32_t, int);
443 static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
444 struct iwm_mac_data_sta *, int);
445 static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
446 uint32_t, int);
447 static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
448 static int iwm_auth(struct iwm_softc *);
449 static int iwm_assoc(struct iwm_softc *);
450 static void iwm_calib_timeout(void *);
451 #ifndef IEEE80211_NO_HT
452 static void iwm_setrates_task(void *);
453 static int iwm_setrates(struct iwm_node *);
454 #endif
455 static int iwm_media_change(struct ifnet *);
456 static void iwm_newstate_cb(struct work *, void *);
457 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
458 static void iwm_endscan_cb(struct work *, void *);
459 static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
460 struct ieee80211_node *);
461 static int iwm_sf_config(struct iwm_softc *, int);
462 static int iwm_send_bt_init_conf(struct iwm_softc *);
463 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
464 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
465 static int iwm_init_hw(struct iwm_softc *);
466 static int iwm_init(struct ifnet *);
467 static void iwm_start(struct ifnet *);
468 static void iwm_stop(struct ifnet *, int);
469 static void iwm_watchdog(struct ifnet *);
470 static int iwm_ioctl(struct ifnet *, u_long, void *);
471 #ifdef IWM_DEBUG
472 static const char *iwm_desc_lookup(uint32_t);
473 static void iwm_nic_error(struct iwm_softc *);
474 static void iwm_nic_umac_error(struct iwm_softc *);
475 #endif
476 static void iwm_notif_intr(struct iwm_softc *);
477 static int iwm_intr(void *);
478 static int iwm_preinit(struct iwm_softc *);
479 static void iwm_attach_hook(device_t);
480 static void iwm_attach(device_t, device_t, void *);
481 #if 0
482 static void iwm_init_task(void *);
483 static int iwm_activate(device_t, enum devact);
484 static void iwm_wakeup(struct iwm_softc *);
485 #endif
486 static void iwm_radiotap_attach(struct iwm_softc *);
487 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
488
489 static int iwm_sysctl_root_num;
490
491 static int
492 iwm_firmload(struct iwm_softc *sc)
493 {
494 struct iwm_fw_info *fw = &sc->sc_fw;
495 firmware_handle_t fwh;
496 int err;
497
498 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
499 return 0;
500
501 /* Open firmware image. */
502 err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
503 if (err) {
504 aprint_error_dev(sc->sc_dev,
505 "could not get firmware handle %s\n", sc->sc_fwname);
506 return err;
507 }
508
509 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
510 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
511 fw->fw_rawdata = NULL;
512 }
513
514 fw->fw_rawsize = firmware_get_size(fwh);
515 /*
516 * Well, this is how the Linux driver checks it ....
517 */
518 if (fw->fw_rawsize < sizeof(uint32_t)) {
519 aprint_error_dev(sc->sc_dev,
520 "firmware too short: %zd bytes\n", fw->fw_rawsize);
521 err = EINVAL;
522 goto out;
523 }
524
525 /* some sanity */
526 if (fw->fw_rawsize > IWM_FWMAXSIZE) {
527 aprint_error_dev(sc->sc_dev,
528 "firmware size is ridiculous: %zd bytes\n", fw->fw_rawsize);
529 err = EINVAL;
530 goto out;
531 }
532
533 /* Read the firmware. */
534 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
535 if (fw->fw_rawdata == NULL) {
536 aprint_error_dev(sc->sc_dev,
537 "not enough memory to stock firmware %s\n", sc->sc_fwname);
538 err = ENOMEM;
539 goto out;
540 }
541 err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
542 if (err) {
543 aprint_error_dev(sc->sc_dev,
544 "could not read firmware %s\n", sc->sc_fwname);
545 goto out;
546 }
547
548 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
549 out:
550 /* caller will release memory, if necessary */
551
552 firmware_close(fwh);
553 return err;
554 }
555
556 /*
557 * just maintaining status quo.
558 */
559 static void
560 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
561 {
562 struct ieee80211com *ic = &sc->sc_ic;
563 struct ieee80211_frame *wh;
564 uint8_t subtype;
565
566 wh = mtod(m, struct ieee80211_frame *);
567
568 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
569 return;
570
571 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
572
573 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
574 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
575 return;
576
577 int chan = le32toh(sc->sc_last_phy_info.channel);
578 if (chan < __arraycount(ic->ic_channels))
579 ic->ic_curchan = &ic->ic_channels[chan];
580 }
581
582 static int
583 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
584 {
585 struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
586
587 if (dlen < sizeof(*l) ||
588 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
589 return EINVAL;
590
591 /* we don't actually store anything for now, always use s/w crypto */
592
593 return 0;
594 }
595
596 static int
597 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
598 uint8_t *data, size_t dlen)
599 {
600 struct iwm_fw_sects *fws;
601 struct iwm_fw_onesect *fwone;
602
603 if (type >= IWM_UCODE_TYPE_MAX)
604 return EINVAL;
605 if (dlen < sizeof(uint32_t))
606 return EINVAL;
607
608 fws = &sc->sc_fw.fw_sects[type];
609 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
610 return EINVAL;
611
612 fwone = &fws->fw_sect[fws->fw_count];
613
614 /* first 32bit are device load offset */
615 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
616
617 /* rest is data */
618 fwone->fws_data = data + sizeof(uint32_t);
619 fwone->fws_len = dlen - sizeof(uint32_t);
620
621 /* for freeing the buffer during driver unload */
622 fwone->fws_alloc = data;
623 fwone->fws_allocsize = dlen;
624
625 fws->fw_count++;
626 fws->fw_totlen += fwone->fws_len;
627
628 return 0;
629 }
630
631 struct iwm_tlv_calib_data {
632 uint32_t ucode_type;
633 struct iwm_tlv_calib_ctrl calib;
634 } __packed;
635
636 static int
637 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
638 {
639 const struct iwm_tlv_calib_data *def_calib = data;
640 uint32_t ucode_type = le32toh(def_calib->ucode_type);
641
642 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
643 DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
644 DEVNAME(sc), ucode_type));
645 return EINVAL;
646 }
647
648 sc->sc_default_calib[ucode_type].flow_trigger =
649 def_calib->calib.flow_trigger;
650 sc->sc_default_calib[ucode_type].event_trigger =
651 def_calib->calib.event_trigger;
652
653 return 0;
654 }
655
656 static int
657 iwm_read_firmware(struct iwm_softc *sc)
658 {
659 struct iwm_fw_info *fw = &sc->sc_fw;
660 struct iwm_tlv_ucode_header *uhdr;
661 struct iwm_ucode_tlv tlv;
662 enum iwm_ucode_tlv_type tlv_type;
663 uint8_t *data;
664 int err, status;
665 size_t len;
666
667 if (fw->fw_status == IWM_FW_STATUS_NONE) {
668 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
669 } else {
670 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
671 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
672 }
673 status = fw->fw_status;
674
675 if (status == IWM_FW_STATUS_DONE)
676 return 0;
677
678 err = iwm_firmload(sc);
679 if (err) {
680 aprint_error_dev(sc->sc_dev,
681 "could not read firmware %s (error %d)\n",
682 sc->sc_fwname, err);
683 goto out;
684 }
685
686 sc->sc_capaflags = 0;
687 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
688 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
689 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
690
691 uhdr = (void *)fw->fw_rawdata;
692 if (*(uint32_t *)fw->fw_rawdata != 0
693 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
694 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
695 sc->sc_fwname);
696 err = EINVAL;
697 goto out;
698 }
699
700 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
701 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
702 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
703 IWM_UCODE_API(le32toh(uhdr->ver)));
704 data = uhdr->data;
705 len = fw->fw_rawsize - sizeof(*uhdr);
706
707 while (len >= sizeof(tlv)) {
708 size_t tlv_len;
709 void *tlv_data;
710
711 memcpy(&tlv, data, sizeof(tlv));
712 tlv_len = le32toh(tlv.length);
713 tlv_type = le32toh(tlv.type);
714
715 len -= sizeof(tlv);
716 data += sizeof(tlv);
717 tlv_data = data;
718
719 if (len < tlv_len) {
720 aprint_error_dev(sc->sc_dev,
721 "firmware too short: %zu bytes\n", len);
722 err = EINVAL;
723 goto parse_out;
724 }
725
726 switch (tlv_type) {
727 case IWM_UCODE_TLV_PROBE_MAX_LEN:
728 if (tlv_len < sizeof(uint32_t)) {
729 err = EINVAL;
730 goto parse_out;
731 }
732 sc->sc_capa_max_probe_len
733 = le32toh(*(uint32_t *)tlv_data);
734 /* limit it to something sensible */
735 if (sc->sc_capa_max_probe_len >
736 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
737 err = EINVAL;
738 goto parse_out;
739 }
740 break;
741 case IWM_UCODE_TLV_PAN:
742 if (tlv_len) {
743 err = EINVAL;
744 goto parse_out;
745 }
746 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
747 break;
748 case IWM_UCODE_TLV_FLAGS:
749 if (tlv_len < sizeof(uint32_t)) {
750 err = EINVAL;
751 goto parse_out;
752 }
753 /*
754 * Apparently there can be many flags, but Linux driver
755 * parses only the first one, and so do we.
756 *
757 * XXX: why does this override IWM_UCODE_TLV_PAN?
758 * Intentional or a bug? Observations from
759 * current firmware file:
760 * 1) TLV_PAN is parsed first
761 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
762 * ==> this resets TLV_PAN to itself... hnnnk
763 */
764 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
765 break;
766 case IWM_UCODE_TLV_CSCHEME:
767 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
768 if (err)
769 goto parse_out;
770 break;
771 case IWM_UCODE_TLV_NUM_OF_CPU: {
772 uint32_t num_cpu;
773 if (tlv_len != sizeof(uint32_t)) {
774 err = EINVAL;
775 goto parse_out;
776 }
777 num_cpu = le32toh(*(uint32_t *)tlv_data);
778 if (num_cpu < 1 || num_cpu > 2) {
779 err = EINVAL;
780 goto parse_out;
781 }
782 break;
783 }
784 case IWM_UCODE_TLV_SEC_RT:
785 err = iwm_firmware_store_section(sc,
786 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
787 if (err)
788 goto parse_out;
789 break;
790 case IWM_UCODE_TLV_SEC_INIT:
791 err = iwm_firmware_store_section(sc,
792 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
793 if (err)
794 goto parse_out;
795 break;
796 case IWM_UCODE_TLV_SEC_WOWLAN:
797 err = iwm_firmware_store_section(sc,
798 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
799 if (err)
800 goto parse_out;
801 break;
802 case IWM_UCODE_TLV_DEF_CALIB:
803 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
804 err = EINVAL;
805 goto parse_out;
806 }
807 err = iwm_set_default_calib(sc, tlv_data);
808 if (err)
809 goto parse_out;
810 break;
811 case IWM_UCODE_TLV_PHY_SKU:
812 if (tlv_len != sizeof(uint32_t)) {
813 err = EINVAL;
814 goto parse_out;
815 }
816 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
817 break;
818
819 case IWM_UCODE_TLV_API_CHANGES_SET: {
820 struct iwm_ucode_api *api;
821 if (tlv_len != sizeof(*api)) {
822 err = EINVAL;
823 goto parse_out;
824 }
825 api = (struct iwm_ucode_api *)tlv_data;
826 /* Flags may exceed 32 bits in future firmware. */
827 if (le32toh(api->api_index) > 0) {
828 goto parse_out;
829 }
830 sc->sc_ucode_api = le32toh(api->api_flags);
831 break;
832 }
833
834 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
835 struct iwm_ucode_capa *capa;
836 int idx, i;
837 if (tlv_len != sizeof(*capa)) {
838 err = EINVAL;
839 goto parse_out;
840 }
841 capa = (struct iwm_ucode_capa *)tlv_data;
842 idx = le32toh(capa->api_index);
843 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
844 goto parse_out;
845 }
846 for (i = 0; i < 32; i++) {
847 if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
848 continue;
849 setbit(sc->sc_enabled_capa, i + (32 * idx));
850 }
851 break;
852 }
853
854 case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
855 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
856 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
857 /* ignore, not used by current driver */
858 break;
859
860 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
861 err = iwm_firmware_store_section(sc,
862 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
863 tlv_len);
864 if (err)
865 goto parse_out;
866 break;
867
868 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
869 if (tlv_len != sizeof(uint32_t)) {
870 err = EINVAL;
871 goto parse_out;
872 }
873 sc->sc_capa_n_scan_channels =
874 le32toh(*(uint32_t *)tlv_data);
875 break;
876
877 case IWM_UCODE_TLV_FW_VERSION:
878 if (tlv_len != sizeof(uint32_t) * 3) {
879 err = EINVAL;
880 goto parse_out;
881 }
882 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
883 "%d.%d.%d",
884 le32toh(((uint32_t *)tlv_data)[0]),
885 le32toh(((uint32_t *)tlv_data)[1]),
886 le32toh(((uint32_t *)tlv_data)[2]));
887 break;
888
889 default:
890 DPRINTF(("%s: unknown firmware section %d, abort\n",
891 DEVNAME(sc), tlv_type));
892 err = EINVAL;
893 goto parse_out;
894 }
895
896 len -= roundup(tlv_len, 4);
897 data += roundup(tlv_len, 4);
898 }
899
900 KASSERT(err == 0);
901
902 parse_out:
903 if (err) {
904 aprint_error_dev(sc->sc_dev,
905 "firmware parse error, section type %d\n", tlv_type);
906 }
907
908 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
909 aprint_error_dev(sc->sc_dev,
910 "device uses unsupported power ops\n");
911 err = ENOTSUP;
912 }
913
914 out:
915 if (err)
916 fw->fw_status = IWM_FW_STATUS_NONE;
917 else
918 fw->fw_status = IWM_FW_STATUS_DONE;
919 wakeup(&sc->sc_fw);
920
921 if (err && fw->fw_rawdata != NULL) {
922 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
923 fw->fw_rawdata = NULL;
924 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
925 /* don't touch fw->fw_status */
926 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
927 }
928 return err;
929 }
930
931 static uint32_t
932 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
933 {
934 IWM_WRITE(sc,
935 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
936 IWM_BARRIER_READ_WRITE(sc);
937 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
938 }
939
940 static void
941 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
942 {
943 IWM_WRITE(sc,
944 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
945 IWM_BARRIER_WRITE(sc);
946 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
947 }
948
949 #ifdef IWM_DEBUG
950 static int
951 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
952 {
953 int offs, err = 0;
954 uint32_t *vals = buf;
955
956 if (iwm_nic_lock(sc)) {
957 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
958 for (offs = 0; offs < dwords; offs++)
959 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
960 iwm_nic_unlock(sc);
961 } else {
962 err = EBUSY;
963 }
964 return err;
965 }
966 #endif
967
968 static int
969 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
970 {
971 int offs;
972 const uint32_t *vals = buf;
973
974 if (iwm_nic_lock(sc)) {
975 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
976 /* WADDR auto-increments */
977 for (offs = 0; offs < dwords; offs++) {
978 uint32_t val = vals ? vals[offs] : 0;
979 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
980 }
981 iwm_nic_unlock(sc);
982 } else {
983 return EBUSY;
984 }
985 return 0;
986 }
987
988 static int
989 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
990 {
991 return iwm_write_mem(sc, addr, &val, 1);
992 }
993
994 static int
995 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
996 int timo)
997 {
998 for (;;) {
999 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1000 return 1;
1001 }
1002 if (timo < 10) {
1003 return 0;
1004 }
1005 timo -= 10;
1006 DELAY(10);
1007 }
1008 }
1009
1010 static int
1011 iwm_nic_lock(struct iwm_softc *sc)
1012 {
1013 int rv = 0;
1014
1015 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1016 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1017
1018 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1019 DELAY(2);
1020
1021 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1022 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1023 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1024 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1025 rv = 1;
1026 } else {
1027 aprint_error_dev(sc->sc_dev, "device timeout\n");
1028 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1029 }
1030
1031 return rv;
1032 }
1033
1034 static void
1035 iwm_nic_unlock(struct iwm_softc *sc)
1036 {
1037 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1038 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1039 }
1040
1041 static void
1042 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1043 uint32_t mask)
1044 {
1045 uint32_t val;
1046
1047 /* XXX: no error path? */
1048 if (iwm_nic_lock(sc)) {
1049 val = iwm_read_prph(sc, reg) & mask;
1050 val |= bits;
1051 iwm_write_prph(sc, reg, val);
1052 iwm_nic_unlock(sc);
1053 }
1054 }
1055
1056 static void
1057 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1058 {
1059 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1060 }
1061
1062 static void
1063 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1064 {
1065 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1066 }
1067
1068 static int
1069 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1070 bus_size_t size, bus_size_t alignment)
1071 {
1072 int nsegs, err;
1073 void *va;
1074
1075 dma->tag = tag;
1076 dma->size = size;
1077
1078 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1079 &dma->map);
1080 if (err)
1081 goto fail;
1082
1083 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1084 BUS_DMA_NOWAIT);
1085 if (err)
1086 goto fail;
1087
1088 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1089 if (err)
1090 goto fail;
1091 dma->vaddr = va;
1092
1093 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1094 BUS_DMA_NOWAIT);
1095 if (err)
1096 goto fail;
1097
1098 memset(dma->vaddr, 0, size);
1099 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1100 dma->paddr = dma->map->dm_segs[0].ds_addr;
1101
1102 return 0;
1103
1104 fail: iwm_dma_contig_free(dma);
1105 return err;
1106 }
1107
1108 static void
1109 iwm_dma_contig_free(struct iwm_dma_info *dma)
1110 {
1111 if (dma->map != NULL) {
1112 if (dma->vaddr != NULL) {
1113 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1114 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1115 bus_dmamap_unload(dma->tag, dma->map);
1116 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1117 bus_dmamem_free(dma->tag, &dma->seg, 1);
1118 dma->vaddr = NULL;
1119 }
1120 bus_dmamap_destroy(dma->tag, dma->map);
1121 dma->map = NULL;
1122 }
1123 }
1124
1125 static int
1126 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1127 {
1128 bus_size_t size;
1129 int i, err;
1130
1131 ring->cur = 0;
1132
1133 /* Allocate RX descriptors (256-byte aligned). */
1134 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1135 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1136 if (err) {
1137 aprint_error_dev(sc->sc_dev,
1138 "could not allocate RX ring DMA memory\n");
1139 goto fail;
1140 }
1141 ring->desc = ring->desc_dma.vaddr;
1142
1143 /* Allocate RX status area (16-byte aligned). */
1144 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1145 sizeof(*ring->stat), 16);
1146 if (err) {
1147 aprint_error_dev(sc->sc_dev,
1148 "could not allocate RX status DMA memory\n");
1149 goto fail;
1150 }
1151 ring->stat = ring->stat_dma.vaddr;
1152
1153 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1154 struct iwm_rx_data *data = &ring->data[i];
1155
1156 memset(data, 0, sizeof(*data));
1157 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1158 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1159 &data->map);
1160 if (err) {
1161 aprint_error_dev(sc->sc_dev,
1162 "could not create RX buf DMA map\n");
1163 goto fail;
1164 }
1165
1166 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1167 if (err)
1168 goto fail;
1169 }
1170 return 0;
1171
1172 fail: iwm_free_rx_ring(sc, ring);
1173 return err;
1174 }
1175
1176 static void
1177 iwm_disable_rx_dma(struct iwm_softc *sc)
1178 {
1179 int ntries;
1180
1181 if (iwm_nic_lock(sc)) {
1182 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1183 for (ntries = 0; ntries < 1000; ntries++) {
1184 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1185 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1186 break;
1187 DELAY(10);
1188 }
1189 iwm_nic_unlock(sc);
1190 }
1191 }
1192
1193 void
1194 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1195 {
1196 ring->cur = 0;
1197 memset(ring->stat, 0, sizeof(*ring->stat));
1198 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1199 ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1200 }
1201
1202 static void
1203 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1204 {
1205 int i;
1206
1207 iwm_dma_contig_free(&ring->desc_dma);
1208 iwm_dma_contig_free(&ring->stat_dma);
1209
1210 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1211 struct iwm_rx_data *data = &ring->data[i];
1212
1213 if (data->m != NULL) {
1214 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1215 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1216 bus_dmamap_unload(sc->sc_dmat, data->map);
1217 m_freem(data->m);
1218 data->m = NULL;
1219 }
1220 if (data->map != NULL) {
1221 bus_dmamap_destroy(sc->sc_dmat, data->map);
1222 data->map = NULL;
1223 }
1224 }
1225 }
1226
1227 static int
1228 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1229 {
1230 bus_addr_t paddr;
1231 bus_size_t size;
1232 int i, err;
1233
1234 ring->qid = qid;
1235 ring->queued = 0;
1236 ring->cur = 0;
1237
1238 /* Allocate TX descriptors (256-byte aligned). */
1239 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1240 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1241 if (err) {
1242 aprint_error_dev(sc->sc_dev,
1243 "could not allocate TX ring DMA memory\n");
1244 goto fail;
1245 }
1246 ring->desc = ring->desc_dma.vaddr;
1247
1248 /*
1249 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1250 * to allocate commands space for other rings.
1251 */
1252 if (qid > IWM_CMD_QUEUE)
1253 return 0;
1254
1255 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1256 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1257 if (err) {
1258 aprint_error_dev(sc->sc_dev,
1259 "could not allocate TX cmd DMA memory\n");
1260 goto fail;
1261 }
1262 ring->cmd = ring->cmd_dma.vaddr;
1263
1264 paddr = ring->cmd_dma.paddr;
1265 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1266 struct iwm_tx_data *data = &ring->data[i];
1267 size_t mapsize;
1268
1269 data->cmd_paddr = paddr;
1270 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1271 + offsetof(struct iwm_tx_cmd, scratch);
1272 paddr += sizeof(struct iwm_device_cmd);
1273
1274 /* FW commands may require more mapped space than packets. */
1275 if (qid == IWM_CMD_QUEUE)
1276 mapsize = (sizeof(struct iwm_cmd_header) +
1277 IWM_MAX_CMD_PAYLOAD_SIZE);
1278 else
1279 mapsize = MCLBYTES;
1280 err = bus_dmamap_create(sc->sc_dmat, mapsize,
1281 IWM_NUM_OF_TBS - 2, mapsize, 0, BUS_DMA_NOWAIT, &data->map);
1282 if (err) {
1283 aprint_error_dev(sc->sc_dev,
1284 "could not create TX buf DMA map\n");
1285 goto fail;
1286 }
1287 }
1288 KASSERT(paddr == ring->cmd_dma.paddr + size);
1289 return 0;
1290
1291 fail: iwm_free_tx_ring(sc, ring);
1292 return err;
1293 }
1294
1295 static void
1296 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1297 {
1298 int i;
1299
1300 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1301 struct iwm_tx_data *data = &ring->data[i];
1302
1303 if (data->m != NULL) {
1304 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1305 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1306 bus_dmamap_unload(sc->sc_dmat, data->map);
1307 m_freem(data->m);
1308 data->m = NULL;
1309 }
1310 }
1311 /* Clear TX descriptors. */
1312 memset(ring->desc, 0, ring->desc_dma.size);
1313 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1314 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1315 sc->qfullmsk &= ~(1 << ring->qid);
1316 ring->queued = 0;
1317 ring->cur = 0;
1318 }
1319
1320 static void
1321 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1322 {
1323 int i;
1324
1325 iwm_dma_contig_free(&ring->desc_dma);
1326 iwm_dma_contig_free(&ring->cmd_dma);
1327
1328 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1329 struct iwm_tx_data *data = &ring->data[i];
1330
1331 if (data->m != NULL) {
1332 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1333 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1334 bus_dmamap_unload(sc->sc_dmat, data->map);
1335 m_freem(data->m);
1336 }
1337 if (data->map != NULL) {
1338 bus_dmamap_destroy(sc->sc_dmat, data->map);
1339 data->map = NULL;
1340 }
1341 }
1342 }
1343
1344 static void
1345 iwm_enable_rfkill_int(struct iwm_softc *sc)
1346 {
1347 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1348 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1349 }
1350
1351 static int
1352 iwm_check_rfkill(struct iwm_softc *sc)
1353 {
1354 uint32_t v;
1355 int s;
1356 int rv;
1357
1358 s = splnet();
1359
1360 /*
1361 * "documentation" is not really helpful here:
1362 * 27: HW_RF_KILL_SW
1363 * Indicates state of (platform's) hardware RF-Kill switch
1364 *
1365 * But apparently when it's off, it's on ...
1366 */
1367 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1368 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1369 if (rv) {
1370 sc->sc_flags |= IWM_FLAG_RFKILL;
1371 } else {
1372 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1373 }
1374
1375 splx(s);
1376 return rv;
1377 }
1378
1379 static void
1380 iwm_enable_interrupts(struct iwm_softc *sc)
1381 {
1382 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1383 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1384 }
1385
1386 static void
1387 iwm_restore_interrupts(struct iwm_softc *sc)
1388 {
1389 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1390 }
1391
1392 static void
1393 iwm_disable_interrupts(struct iwm_softc *sc)
1394 {
1395 int s = splnet();
1396
1397 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1398
1399 /* acknowledge all interrupts */
1400 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1401 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1402
1403 splx(s);
1404 }
1405
1406 static void
1407 iwm_ict_reset(struct iwm_softc *sc)
1408 {
1409 iwm_disable_interrupts(sc);
1410
1411 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1412 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1413 BUS_DMASYNC_PREWRITE);
1414 sc->ict_cur = 0;
1415
1416 /* Set physical address of ICT (4KB aligned). */
1417 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1418 IWM_CSR_DRAM_INT_TBL_ENABLE
1419 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1420 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1421 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1422
1423 /* Switch to ICT interrupt mode in driver. */
1424 sc->sc_flags |= IWM_FLAG_USE_ICT;
1425
1426 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1427 iwm_enable_interrupts(sc);
1428 }
1429
1430 #define IWM_HW_READY_TIMEOUT 50
1431 static int
1432 iwm_set_hw_ready(struct iwm_softc *sc)
1433 {
1434 int ready;
1435
1436 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1437 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1438
1439 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1440 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1441 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1442 IWM_HW_READY_TIMEOUT);
1443 if (ready)
1444 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1445 IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1446
1447 return ready;
1448 }
1449 #undef IWM_HW_READY_TIMEOUT
1450
1451 static int
1452 iwm_prepare_card_hw(struct iwm_softc *sc)
1453 {
1454 int t = 0;
1455
1456 if (iwm_set_hw_ready(sc))
1457 return 0;
1458
1459 DELAY(100);
1460
1461 /* If HW is not ready, prepare the conditions to check again */
1462 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1463 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1464
1465 do {
1466 if (iwm_set_hw_ready(sc))
1467 return 0;
1468 DELAY(200);
1469 t += 200;
1470 } while (t < 150000);
1471
1472 return ETIMEDOUT;
1473 }
1474
1475 static void
1476 iwm_apm_config(struct iwm_softc *sc)
1477 {
1478 pcireg_t reg;
1479
1480 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1481 sc->sc_cap_off + PCIE_LCSR);
1482 if (reg & PCIE_LCSR_ASPM_L1) {
1483 /* Um the Linux driver prints "Disabling L0S for this one ... */
1484 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1485 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1486 } else {
1487 /* ... and "Enabling" here */
1488 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1489 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1490 }
1491 }
1492
1493 /*
1494 * Start up NIC's basic functionality after it has been reset
1495 * e.g. after platform boot or shutdown.
1496 * NOTE: This does not load uCode nor start the embedded processor
1497 */
1498 static int
1499 iwm_apm_init(struct iwm_softc *sc)
1500 {
1501 int err = 0;
1502
1503 /* Disable L0S exit timer (platform NMI workaround) */
1504 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1505 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1506 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1507
1508 /*
1509 * Disable L0s without affecting L1;
1510 * don't wait for ICH L0s (ICH bug W/A)
1511 */
1512 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1513 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1514
1515 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1516 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1517
1518 /*
1519 * Enable HAP INTA (interrupt from management bus) to
1520 * wake device's PCI Express link L1a -> L0s
1521 */
1522 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1523 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1524
1525 iwm_apm_config(sc);
1526
1527 #if 0 /* not for 7k/8k */
1528 /* Configure analog phase-lock-loop before activating to D0A */
1529 if (trans->cfg->base_params->pll_cfg_val)
1530 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1531 trans->cfg->base_params->pll_cfg_val);
1532 #endif
1533
1534 /*
1535 * Set "initialization complete" bit to move adapter from
1536 * D0U* --> D0A* (powered-up active) state.
1537 */
1538 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1539
1540 /*
1541 * Wait for clock stabilization; once stabilized, access to
1542 * device-internal resources is supported, e.g. iwm_write_prph()
1543 * and accesses to uCode SRAM.
1544 */
1545 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1546 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1547 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1548 aprint_error_dev(sc->sc_dev,
1549 "timeout waiting for clock stabilization\n");
1550 err = ETIMEDOUT;
1551 goto out;
1552 }
1553
1554 if (sc->host_interrupt_operation_mode) {
1555 /*
1556 * This is a bit of an abuse - This is needed for 7260 / 3160
1557 * only check host_interrupt_operation_mode even if this is
1558 * not related to host_interrupt_operation_mode.
1559 *
1560 * Enable the oscillator to count wake up time for L1 exit. This
1561 * consumes slightly more power (100uA) - but allows to be sure
1562 * that we wake up from L1 on time.
1563 *
1564 * This looks weird: read twice the same register, discard the
1565 * value, set a bit, and yet again, read that same register
1566 * just to discard the value. But that's the way the hardware
1567 * seems to like it.
1568 */
1569 iwm_read_prph(sc, IWM_OSC_CLK);
1570 iwm_read_prph(sc, IWM_OSC_CLK);
1571 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1572 iwm_read_prph(sc, IWM_OSC_CLK);
1573 iwm_read_prph(sc, IWM_OSC_CLK);
1574 }
1575
1576 /*
1577 * Enable DMA clock and wait for it to stabilize.
1578 *
1579 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1580 * do not disable clocks. This preserves any hardware bits already
1581 * set by default in "CLK_CTRL_REG" after reset.
1582 */
1583 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1584 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1585 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1586 DELAY(20);
1587
1588 /* Disable L1-Active */
1589 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1590 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1591
1592 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1593 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1594 IWM_APMG_RTC_INT_STT_RFKILL);
1595 }
1596 out:
1597 if (err)
1598 aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1599 return err;
1600 }
1601
1602 static void
1603 iwm_apm_stop(struct iwm_softc *sc)
1604 {
1605 /* stop device's busmaster DMA activity */
1606 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1607
1608 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1609 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1610 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1611 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1612 DPRINTF(("iwm apm stop\n"));
1613 }
1614
1615 static int
1616 iwm_start_hw(struct iwm_softc *sc)
1617 {
1618 int err;
1619
1620 err = iwm_prepare_card_hw(sc);
1621 if (err)
1622 return err;
1623
1624 /* Reset the entire device */
1625 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1626 DELAY(10);
1627
1628 err = iwm_apm_init(sc);
1629 if (err)
1630 return err;
1631
1632 iwm_enable_rfkill_int(sc);
1633 iwm_check_rfkill(sc);
1634
1635 return 0;
1636 }
1637
1638 static void
1639 iwm_stop_device(struct iwm_softc *sc)
1640 {
1641 int chnl, ntries;
1642 int qid;
1643
1644 iwm_disable_interrupts(sc);
1645 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1646
1647 /* Deactivate TX scheduler. */
1648 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1649
1650 /* Stop all DMA channels. */
1651 if (iwm_nic_lock(sc)) {
1652 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1653 IWM_WRITE(sc,
1654 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1655 for (ntries = 0; ntries < 200; ntries++) {
1656 uint32_t r;
1657
1658 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1659 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1660 chnl))
1661 break;
1662 DELAY(20);
1663 }
1664 }
1665 iwm_nic_unlock(sc);
1666 }
1667 iwm_disable_rx_dma(sc);
1668
1669 iwm_reset_rx_ring(sc, &sc->rxq);
1670
1671 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1672 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1673
1674 /*
1675 * Power-down device's busmaster DMA clocks
1676 */
1677 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1678 DELAY(5);
1679
1680 /* Make sure (redundant) we've released our request to stay awake */
1681 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1682 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1683
1684 /* Stop the device, and put it in low power state */
1685 iwm_apm_stop(sc);
1686
1687 /*
1688 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1689 * Clean again the interrupt here
1690 */
1691 iwm_disable_interrupts(sc);
1692
1693 /* Reset the on-board processor. */
1694 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1695
1696 /* Even though we stop the HW we still want the RF kill interrupt. */
1697 iwm_enable_rfkill_int(sc);
1698 iwm_check_rfkill(sc);
1699 }
1700
1701 static void
1702 iwm_nic_config(struct iwm_softc *sc)
1703 {
1704 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1705 uint32_t reg_val = 0;
1706
1707 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1708 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1709 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1710 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1711 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1712 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1713
1714 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1715 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1716 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1717 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1718
1719 /* radio configuration */
1720 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1721 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1722 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1723
1724 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1725
1726 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1727 radio_cfg_step, radio_cfg_dash));
1728
1729 /*
1730 * W/A : NIC is stuck in a reset state after Early PCIe power off
1731 * (PCIe power is lost before PERST# is asserted), causing ME FW
1732 * to lose ownership and not being able to obtain it back.
1733 */
1734 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1735 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1736 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1737 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1738 }
1739
1740 static int
1741 iwm_nic_rx_init(struct iwm_softc *sc)
1742 {
1743 if (!iwm_nic_lock(sc))
1744 return EBUSY;
1745
1746 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1747 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1748 0, sc->rxq.stat_dma.size,
1749 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1750
1751 iwm_disable_rx_dma(sc);
1752 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1753 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1754 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1755 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1756
1757 /* Set physical address of RX ring (256-byte aligned). */
1758 IWM_WRITE(sc,
1759 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1760
1761 /* Set physical address of RX status (16-byte aligned). */
1762 IWM_WRITE(sc,
1763 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1764
1765 /* Enable RX. */
1766 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1767 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1768 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1769 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1770 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1771 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1772 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1773 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1774
1775 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1776
1777 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1778 if (sc->host_interrupt_operation_mode)
1779 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1780
1781 /*
1782 * This value should initially be 0 (before preparing any RBs),
1783 * and should be 8 after preparing the first 8 RBs (for example).
1784 */
1785 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1786
1787 iwm_nic_unlock(sc);
1788
1789 return 0;
1790 }
1791
1792 static int
1793 iwm_nic_tx_init(struct iwm_softc *sc)
1794 {
1795 int qid;
1796
1797 if (!iwm_nic_lock(sc))
1798 return EBUSY;
1799
1800 /* Deactivate TX scheduler. */
1801 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1802
1803 /* Set physical address of "keep warm" page (16-byte aligned). */
1804 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1805
1806 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1807 struct iwm_tx_ring *txq = &sc->txq[qid];
1808
1809 /* Set physical address of TX ring (256-byte aligned). */
1810 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1811 txq->desc_dma.paddr >> 8);
1812 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1813 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1814 }
1815
1816 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1817
1818 iwm_nic_unlock(sc);
1819
1820 return 0;
1821 }
1822
1823 static int
1824 iwm_nic_init(struct iwm_softc *sc)
1825 {
1826 int err;
1827
1828 iwm_apm_init(sc);
1829 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
1830 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1831 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1832 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1833
1834 iwm_nic_config(sc);
1835
1836 err = iwm_nic_rx_init(sc);
1837 if (err)
1838 return err;
1839
1840 err = iwm_nic_tx_init(sc);
1841 if (err)
1842 return err;
1843
1844 DPRINTF(("shadow registers enabled\n"));
1845 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1846
1847 return 0;
1848 }
1849
1850 static const uint8_t iwm_ac_to_tx_fifo[] = {
1851 IWM_TX_FIFO_VO,
1852 IWM_TX_FIFO_VI,
1853 IWM_TX_FIFO_BE,
1854 IWM_TX_FIFO_BK,
1855 };
1856
1857 static int
1858 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1859 {
1860 if (!iwm_nic_lock(sc)) {
1861 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1862 return EBUSY;
1863 }
1864
1865 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1866
1867 if (qid == IWM_CMD_QUEUE) {
1868 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1869 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1870 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1871
1872 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1873
1874 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1875
1876 iwm_write_mem32(sc,
1877 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1878
1879 /* Set scheduler window size and frame limit. */
1880 iwm_write_mem32(sc,
1881 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1882 sizeof(uint32_t),
1883 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1884 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1885 ((IWM_FRAME_LIMIT
1886 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1887 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1888
1889 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1890 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1891 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1892 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1893 IWM_SCD_QUEUE_STTS_REG_MSK);
1894 } else {
1895 struct iwm_scd_txq_cfg_cmd cmd;
1896 int err;
1897
1898 iwm_nic_unlock(sc);
1899
1900 memset(&cmd, 0, sizeof(cmd));
1901 cmd.scd_queue = qid;
1902 cmd.enable = 1;
1903 cmd.sta_id = sta_id;
1904 cmd.tx_fifo = fifo;
1905 cmd.aggregate = 0;
1906 cmd.window = IWM_FRAME_LIMIT;
1907
1908 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
1909 &cmd);
1910 if (err)
1911 return err;
1912
1913 if (!iwm_nic_lock(sc))
1914 return EBUSY;
1915 }
1916
1917 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1918 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
1919
1920 iwm_nic_unlock(sc);
1921
1922 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1923
1924 return 0;
1925 }
1926
1927 static int
1928 iwm_post_alive(struct iwm_softc *sc)
1929 {
1930 int nwords;
1931 int err, chnl;
1932 uint32_t base;
1933
1934 if (!iwm_nic_lock(sc))
1935 return EBUSY;
1936
1937 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
1938 if (sc->sched_base != base) {
1939 DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
1940 DEVNAME(sc), sc->sched_base, base));
1941 err = EINVAL;
1942 goto out;
1943 }
1944
1945 iwm_ict_reset(sc);
1946
1947 /* Clear TX scheduler state in SRAM. */
1948 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1949 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1950 / sizeof(uint32_t);
1951 err = iwm_write_mem(sc,
1952 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1953 NULL, nwords);
1954 if (err)
1955 goto out;
1956
1957 /* Set physical address of TX scheduler rings (1KB aligned). */
1958 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1959
1960 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1961
1962 iwm_nic_unlock(sc);
1963
1964 /* enable command channel */
1965 err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1966 if (err)
1967 return err;
1968
1969 if (!iwm_nic_lock(sc))
1970 return EBUSY;
1971
1972 /* Activate TX scheduler. */
1973 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1974
1975 /* Enable DMA channels. */
1976 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1977 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1978 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1979 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1980 }
1981
1982 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1983 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1984
1985 /* Enable L1-Active */
1986 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
1987 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1988 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1989
1990 out:
1991 iwm_nic_unlock(sc);
1992 return err;
1993 }
1994
1995 static struct iwm_phy_db_entry *
1996 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
1997 uint16_t chg_id)
1998 {
1999 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2000
2001 if (type >= IWM_PHY_DB_MAX)
2002 return NULL;
2003
2004 switch (type) {
2005 case IWM_PHY_DB_CFG:
2006 return &phy_db->cfg;
2007 case IWM_PHY_DB_CALIB_NCH:
2008 return &phy_db->calib_nch;
2009 case IWM_PHY_DB_CALIB_CHG_PAPD:
2010 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2011 return NULL;
2012 return &phy_db->calib_ch_group_papd[chg_id];
2013 case IWM_PHY_DB_CALIB_CHG_TXP:
2014 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2015 return NULL;
2016 return &phy_db->calib_ch_group_txp[chg_id];
2017 default:
2018 return NULL;
2019 }
2020 return NULL;
2021 }
2022
2023 static int
2024 iwm_phy_db_set_section(struct iwm_softc *sc,
2025 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2026 {
2027 struct iwm_phy_db_entry *entry;
2028 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2029 uint16_t chg_id = 0;
2030
2031 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2032 type == IWM_PHY_DB_CALIB_CHG_TXP)
2033 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2034
2035 entry = iwm_phy_db_get_section(sc, type, chg_id);
2036 if (!entry)
2037 return EINVAL;
2038
2039 if (entry->data)
2040 kmem_intr_free(entry->data, entry->size);
2041 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2042 if (!entry->data) {
2043 entry->size = 0;
2044 return ENOMEM;
2045 }
2046 memcpy(entry->data, phy_db_notif->data, size);
2047 entry->size = size;
2048
2049 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2050 __func__, __LINE__, type, size, entry->data));
2051
2052 return 0;
2053 }
2054
2055 static int
2056 iwm_is_valid_channel(uint16_t ch_id)
2057 {
2058 if (ch_id <= 14 ||
2059 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2060 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2061 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2062 return 1;
2063 return 0;
2064 }
2065
2066 static uint8_t
2067 iwm_ch_id_to_ch_index(uint16_t ch_id)
2068 {
2069 if (!iwm_is_valid_channel(ch_id))
2070 return 0xff;
2071
2072 if (ch_id <= 14)
2073 return ch_id - 1;
2074 if (ch_id <= 64)
2075 return (ch_id + 20) / 4;
2076 if (ch_id <= 140)
2077 return (ch_id - 12) / 4;
2078 return (ch_id - 13) / 4;
2079 }
2080
2081
2082 static uint16_t
2083 iwm_channel_id_to_papd(uint16_t ch_id)
2084 {
2085 if (!iwm_is_valid_channel(ch_id))
2086 return 0xff;
2087
2088 if (1 <= ch_id && ch_id <= 14)
2089 return 0;
2090 if (36 <= ch_id && ch_id <= 64)
2091 return 1;
2092 if (100 <= ch_id && ch_id <= 140)
2093 return 2;
2094 return 3;
2095 }
2096
2097 static uint16_t
2098 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2099 {
2100 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2101 struct iwm_phy_db_chg_txp *txp_chg;
2102 int i;
2103 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2104
2105 if (ch_index == 0xff)
2106 return 0xff;
2107
2108 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2109 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2110 if (!txp_chg)
2111 return 0xff;
2112 /*
2113 * Looking for the first channel group the max channel
2114 * of which is higher than the requested channel.
2115 */
2116 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2117 return i;
2118 }
2119 return 0xff;
2120 }
2121
2122 static int
2123 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2124 uint16_t *size, uint16_t ch_id)
2125 {
2126 struct iwm_phy_db_entry *entry;
2127 uint16_t ch_group_id = 0;
2128
2129 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2130 ch_group_id = iwm_channel_id_to_papd(ch_id);
2131 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2132 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2133
2134 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2135 if (!entry)
2136 return EINVAL;
2137
2138 *data = entry->data;
2139 *size = entry->size;
2140
2141 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2142 __func__, __LINE__, type, *size));
2143
2144 return 0;
2145 }
2146
2147 static int
2148 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2149 void *data)
2150 {
2151 struct iwm_phy_db_cmd phy_db_cmd;
2152 struct iwm_host_cmd cmd = {
2153 .id = IWM_PHY_DB_CMD,
2154 .flags = IWM_CMD_ASYNC,
2155 };
2156
2157 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2158 type, length));
2159
2160 phy_db_cmd.type = le16toh(type);
2161 phy_db_cmd.length = le16toh(length);
2162
2163 cmd.data[0] = &phy_db_cmd;
2164 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2165 cmd.data[1] = data;
2166 cmd.len[1] = length;
2167
2168 return iwm_send_cmd(sc, &cmd);
2169 }
2170
2171 static int
2172 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2173 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2174 {
2175 uint16_t i;
2176 int err;
2177 struct iwm_phy_db_entry *entry;
2178
2179 /* Send all the channel-specific groups to operational fw */
2180 for (i = 0; i < max_ch_groups; i++) {
2181 entry = iwm_phy_db_get_section(sc, type, i);
2182 if (!entry)
2183 return EINVAL;
2184
2185 if (!entry->size)
2186 continue;
2187
2188 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2189 if (err) {
2190 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2191 "err %d\n", DEVNAME(sc), type, i, err));
2192 return err;
2193 }
2194
2195 DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2196 DEVNAME(sc), type, i));
2197
2198 DELAY(1000);
2199 }
2200
2201 return 0;
2202 }
2203
2204 static int
2205 iwm_send_phy_db_data(struct iwm_softc *sc)
2206 {
2207 uint8_t *data = NULL;
2208 uint16_t size = 0;
2209 int err;
2210
2211 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2212 if (err)
2213 return err;
2214
2215 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2216 if (err)
2217 return err;
2218
2219 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2220 &data, &size, 0);
2221 if (err)
2222 return err;
2223
2224 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2225 if (err)
2226 return err;
2227
2228 err = iwm_phy_db_send_all_channel_groups(sc,
2229 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2230 if (err)
2231 return err;
2232
2233 err = iwm_phy_db_send_all_channel_groups(sc,
2234 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2235 if (err)
2236 return err;
2237
2238 return 0;
2239 }
2240
2241 /*
2242 * For the high priority TE use a time event type that has similar priority to
2243 * the FW's action scan priority.
2244 */
2245 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2246 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2247
2248 /* used to convert from time event API v2 to v1 */
2249 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2250 IWM_TE_V2_EVENT_SOCIOPATHIC)
2251 static inline uint16_t
2252 iwm_te_v2_get_notify(uint16_t policy)
2253 {
2254 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2255 }
2256
2257 static inline uint16_t
2258 iwm_te_v2_get_dep_policy(uint16_t policy)
2259 {
2260 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2261 IWM_TE_V2_PLACEMENT_POS;
2262 }
2263
2264 static inline uint16_t
2265 iwm_te_v2_get_absence(uint16_t policy)
2266 {
2267 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2268 }
2269
2270 static void
2271 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2272 struct iwm_time_event_cmd_v1 *cmd_v1)
2273 {
2274 cmd_v1->id_and_color = cmd_v2->id_and_color;
2275 cmd_v1->action = cmd_v2->action;
2276 cmd_v1->id = cmd_v2->id;
2277 cmd_v1->apply_time = cmd_v2->apply_time;
2278 cmd_v1->max_delay = cmd_v2->max_delay;
2279 cmd_v1->depends_on = cmd_v2->depends_on;
2280 cmd_v1->interval = cmd_v2->interval;
2281 cmd_v1->duration = cmd_v2->duration;
2282 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2283 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2284 else
2285 cmd_v1->repeat = htole32(cmd_v2->repeat);
2286 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2287 cmd_v1->interval_reciprocal = 0; /* unused */
2288
2289 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2290 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2291 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2292 }
2293
2294 static int
2295 iwm_send_time_event_cmd(struct iwm_softc *sc,
2296 const struct iwm_time_event_cmd_v2 *cmd)
2297 {
2298 struct iwm_time_event_cmd_v1 cmd_v1;
2299
2300 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2301 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2302 cmd);
2303
2304 iwm_te_v2_to_v1(cmd, &cmd_v1);
2305 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2306 &cmd_v1);
2307 }
2308
2309 static void
2310 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2311 uint32_t duration, uint32_t max_delay)
2312 {
2313 struct iwm_time_event_cmd_v2 time_cmd;
2314
2315 memset(&time_cmd, 0, sizeof(time_cmd));
2316
2317 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2318 time_cmd.id_and_color =
2319 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2320 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2321
2322 time_cmd.apply_time = htole32(0);
2323
2324 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2325 time_cmd.max_delay = htole32(max_delay);
2326 /* TODO: why do we need to interval = bi if it is not periodic? */
2327 time_cmd.interval = htole32(1);
2328 time_cmd.duration = htole32(duration);
2329 time_cmd.repeat = 1;
2330 time_cmd.policy
2331 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2332 IWM_TE_V2_NOTIF_HOST_EVENT_END |
2333 IWM_T2_V2_START_IMMEDIATELY);
2334
2335 iwm_send_time_event_cmd(sc, &time_cmd);
2336 }
2337
2338 /*
2339 * NVM read access and content parsing. We do not support
2340 * external NVM or writing NVM.
2341 */
2342
2343 /* list of NVM sections we are allowed/need to read */
2344 static const int iwm_nvm_to_read[] = {
2345 IWM_NVM_SECTION_TYPE_HW,
2346 IWM_NVM_SECTION_TYPE_SW,
2347 IWM_NVM_SECTION_TYPE_REGULATORY,
2348 IWM_NVM_SECTION_TYPE_CALIBRATION,
2349 IWM_NVM_SECTION_TYPE_PRODUCTION,
2350 IWM_NVM_SECTION_TYPE_HW_8000,
2351 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2352 IWM_NVM_SECTION_TYPE_PHY_SKU,
2353 };
2354
2355 /* Default NVM size to read */
2356 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2357 #define IWM_MAX_NVM_SECTION_SIZE 8192
2358
2359 #define IWM_NVM_WRITE_OPCODE 1
2360 #define IWM_NVM_READ_OPCODE 0
2361
2362 static int
2363 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2364 uint16_t length, uint8_t *data, uint16_t *len)
2365 {
2366 offset = 0;
2367 struct iwm_nvm_access_cmd nvm_access_cmd = {
2368 .offset = htole16(offset),
2369 .length = htole16(length),
2370 .type = htole16(section),
2371 .op_code = IWM_NVM_READ_OPCODE,
2372 };
2373 struct iwm_nvm_access_resp *nvm_resp;
2374 struct iwm_rx_packet *pkt;
2375 struct iwm_host_cmd cmd = {
2376 .id = IWM_NVM_ACCESS_CMD,
2377 .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2378 .data = { &nvm_access_cmd, },
2379 };
2380 int err, offset_read;
2381 size_t bytes_read;
2382 uint8_t *resp_data;
2383
2384 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2385
2386 err = iwm_send_cmd(sc, &cmd);
2387 if (err) {
2388 DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2389 DEVNAME(sc), err));
2390 return err;
2391 }
2392
2393 pkt = cmd.resp_pkt;
2394 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2395 err = EIO;
2396 goto exit;
2397 }
2398
2399 /* Extract NVM response */
2400 nvm_resp = (void *)pkt->data;
2401
2402 err = le16toh(nvm_resp->status);
2403 bytes_read = le16toh(nvm_resp->length);
2404 offset_read = le16toh(nvm_resp->offset);
2405 resp_data = nvm_resp->data;
2406 if (err) {
2407 err = EINVAL;
2408 goto exit;
2409 }
2410
2411 if (offset_read != offset) {
2412 err = EINVAL;
2413 goto exit;
2414 }
2415 if (bytes_read > length) {
2416 err = EINVAL;
2417 goto exit;
2418 }
2419
2420 memcpy(data + offset, resp_data, bytes_read);
2421 *len = bytes_read;
2422
2423 exit:
2424 iwm_free_resp(sc, &cmd);
2425 return err;
2426 }
2427
2428 /*
2429 * Reads an NVM section completely.
2430 * NICs prior to 7000 family doesn't have a real NVM, but just read
2431 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2432 * by uCode, we need to manually check in this case that we don't
2433 * overflow and try to read more than the EEPROM size.
2434 */
2435 static int
2436 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2437 uint16_t *len, size_t max_len)
2438 {
2439 uint16_t chunklen, seglen;
2440 int err;
2441
2442 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2443 *len = 0;
2444
2445 /* Read NVM chunks until exhausted (reading less than requested) */
2446 while (seglen == chunklen && *len < max_len) {
2447 err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2448 &seglen);
2449 if (err) {
2450 DPRINTF(("%s:Cannot read NVM from section %d "
2451 "offset %d, length %d\n",
2452 DEVNAME(sc), section, *len, chunklen));
2453 return err;
2454 }
2455 *len += seglen;
2456 }
2457
2458 DPRINTFN(4, ("NVM section %d read completed\n", section));
2459 return 0;
2460 }
2461
2462 static uint8_t
2463 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2464 {
2465 uint8_t tx_ant;
2466
2467 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2468 >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2469
2470 if (sc->sc_nvm.valid_tx_ant)
2471 tx_ant &= sc->sc_nvm.valid_tx_ant;
2472
2473 return tx_ant;
2474 }
2475
2476 static uint8_t
2477 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2478 {
2479 uint8_t rx_ant;
2480
2481 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2482 >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2483
2484 if (sc->sc_nvm.valid_rx_ant)
2485 rx_ant &= sc->sc_nvm.valid_rx_ant;
2486
2487 return rx_ant;
2488 }
2489
2490 static void
2491 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2492 const uint8_t *nvm_channels, size_t nchan)
2493 {
2494 struct ieee80211com *ic = &sc->sc_ic;
2495 struct iwm_nvm_data *data = &sc->sc_nvm;
2496 int ch_idx;
2497 struct ieee80211_channel *channel;
2498 uint16_t ch_flags;
2499 int is_5ghz;
2500 int flags, hw_value;
2501
2502 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2503 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2504
2505 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2506 !data->sku_cap_band_52GHz_enable)
2507 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2508
2509 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2510 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2511 iwm_nvm_channels[ch_idx],
2512 ch_flags,
2513 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2514 "5.2" : "2.4"));
2515 continue;
2516 }
2517
2518 hw_value = nvm_channels[ch_idx];
2519 channel = &ic->ic_channels[hw_value];
2520
2521 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2522 if (!is_5ghz) {
2523 flags = IEEE80211_CHAN_2GHZ;
2524 channel->ic_flags
2525 = IEEE80211_CHAN_CCK
2526 | IEEE80211_CHAN_OFDM
2527 | IEEE80211_CHAN_DYN
2528 | IEEE80211_CHAN_2GHZ;
2529 } else {
2530 flags = IEEE80211_CHAN_5GHZ;
2531 channel->ic_flags =
2532 IEEE80211_CHAN_A;
2533 }
2534 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2535
2536 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2537 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2538
2539 #ifndef IEEE80211_NO_HT
2540 if (data->sku_cap_11n_enable)
2541 channel->ic_flags |= IEEE80211_CHAN_HT;
2542 #endif
2543 }
2544 }
2545
2546 #ifndef IEEE80211_NO_HT
2547 static void
2548 iwm_setup_ht_rates(struct iwm_softc *sc)
2549 {
2550 struct ieee80211com *ic = &sc->sc_ic;
2551
2552 /* TX is supported with the same MCS as RX. */
2553 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2554
2555 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2556
2557 #ifdef notyet
2558 if (sc->sc_nvm.sku_cap_mimo_disable)
2559 return;
2560
2561 if (iwm_fw_valid_rx_ant(sc) > 1)
2562 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2563 if (iwm_fw_valid_rx_ant(sc) > 2)
2564 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2565 #endif
2566 }
2567
2568 #define IWM_MAX_RX_BA_SESSIONS 16
2569
2570 static void
2571 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2572 uint16_t ssn, int start)
2573 {
2574 struct ieee80211com *ic = &sc->sc_ic;
2575 struct iwm_add_sta_cmd_v7 cmd;
2576 struct iwm_node *in = (struct iwm_node *)ni;
2577 int err, s;
2578 uint32_t status;
2579
2580 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2581 ieee80211_addba_req_refuse(ic, ni, tid);
2582 return;
2583 }
2584
2585 memset(&cmd, 0, sizeof(cmd));
2586
2587 cmd.sta_id = IWM_STATION_ID;
2588 cmd.mac_id_n_color
2589 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2590 cmd.add_modify = IWM_STA_MODE_MODIFY;
2591
2592 if (start) {
2593 cmd.add_immediate_ba_tid = (uint8_t)tid;
2594 cmd.add_immediate_ba_ssn = ssn;
2595 } else {
2596 cmd.remove_immediate_ba_tid = (uint8_t)tid;
2597 }
2598 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2599 IWM_STA_MODIFY_REMOVE_BA_TID;
2600
2601 status = IWM_ADD_STA_SUCCESS;
2602 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2603 &status);
2604
2605 s = splnet();
2606 if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2607 if (start) {
2608 sc->sc_rx_ba_sessions++;
2609 ieee80211_addba_req_accept(ic, ni, tid);
2610 } else if (sc->sc_rx_ba_sessions > 0)
2611 sc->sc_rx_ba_sessions--;
2612 } else if (start)
2613 ieee80211_addba_req_refuse(ic, ni, tid);
2614
2615 splx(s);
2616 }
2617
2618 static void
2619 iwm_htprot_task(void *arg)
2620 {
2621 struct iwm_softc *sc = arg;
2622 struct ieee80211com *ic = &sc->sc_ic;
2623 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2624 int err;
2625
2626 /* This call updates HT protection based on in->in_ni.ni_htop1. */
2627 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2628 if (err)
2629 aprint_error_dev(sc->sc_dev,
2630 "could not change HT protection: error %d\n", err);
2631 }
2632
2633 /*
2634 * This function is called by upper layer when HT protection settings in
2635 * beacons have changed.
2636 */
2637 static void
2638 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2639 {
2640 struct iwm_softc *sc = ic->ic_softc;
2641
2642 /* assumes that ni == ic->ic_bss */
2643 task_add(systq, &sc->htprot_task);
2644 }
2645
2646 static void
2647 iwm_ba_task(void *arg)
2648 {
2649 struct iwm_softc *sc = arg;
2650 struct ieee80211com *ic = &sc->sc_ic;
2651 struct ieee80211_node *ni = ic->ic_bss;
2652
2653 if (sc->ba_start)
2654 iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2655 else
2656 iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2657 }
2658
2659 /*
2660 * This function is called by upper layer when an ADDBA request is received
2661 * from another STA and before the ADDBA response is sent.
2662 */
2663 static int
2664 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2665 uint8_t tid)
2666 {
2667 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2668 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2669
2670 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2671 return ENOSPC;
2672
2673 sc->ba_start = 1;
2674 sc->ba_tid = tid;
2675 sc->ba_ssn = htole16(ba->ba_winstart);
2676 task_add(systq, &sc->ba_task);
2677
2678 return EBUSY;
2679 }
2680
2681 /*
2682 * This function is called by upper layer on teardown of an HT-immediate
2683 * Block Ack agreement (eg. upon receipt of a DELBA frame).
2684 */
2685 static void
2686 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2687 uint8_t tid)
2688 {
2689 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2690
2691 sc->ba_start = 0;
2692 sc->ba_tid = tid;
2693 task_add(systq, &sc->ba_task);
2694 }
2695 #endif
2696
2697 static void
2698 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2699 const uint16_t *mac_override, const uint16_t *nvm_hw)
2700 {
2701 static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
2702 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2703 };
2704 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
2705 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
2706 };
2707 const uint8_t *hw_addr;
2708
2709 if (mac_override) {
2710 hw_addr = (const uint8_t *)(mac_override +
2711 IWM_MAC_ADDRESS_OVERRIDE_8000);
2712
2713 /*
2714 * Store the MAC address from MAO section.
2715 * No byte swapping is required in MAO section
2716 */
2717 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2718
2719 /*
2720 * Force the use of the OTP MAC address in case of reserved MAC
2721 * address in the NVM, or if address is given but invalid.
2722 */
2723 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2724 (memcmp(etherbroadcastaddr, data->hw_addr,
2725 sizeof(etherbroadcastaddr)) != 0) &&
2726 (memcmp(etheranyaddr, data->hw_addr,
2727 sizeof(etheranyaddr)) != 0) &&
2728 !ETHER_IS_MULTICAST(data->hw_addr))
2729 return;
2730 }
2731
2732 if (nvm_hw) {
2733 /* Read the mac address from WFMP registers. */
2734 uint32_t mac_addr0 =
2735 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2736 uint32_t mac_addr1 =
2737 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2738
2739 hw_addr = (const uint8_t *)&mac_addr0;
2740 data->hw_addr[0] = hw_addr[3];
2741 data->hw_addr[1] = hw_addr[2];
2742 data->hw_addr[2] = hw_addr[1];
2743 data->hw_addr[3] = hw_addr[0];
2744
2745 hw_addr = (const uint8_t *)&mac_addr1;
2746 data->hw_addr[4] = hw_addr[1];
2747 data->hw_addr[5] = hw_addr[0];
2748
2749 return;
2750 }
2751
2752 aprint_error_dev(sc->sc_dev, "mac address not found\n");
2753 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2754 }
2755
2756 static int
2757 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2758 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2759 const uint16_t *mac_override, const uint16_t *phy_sku,
2760 const uint16_t *regulatory)
2761 {
2762 struct iwm_nvm_data *data = &sc->sc_nvm;
2763 uint8_t hw_addr[ETHER_ADDR_LEN];
2764 uint32_t sku;
2765
2766 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2767
2768 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2769 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2770 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2771 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2772 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2773 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2774
2775 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2776 } else {
2777 uint32_t radio_cfg = le32_to_cpup(
2778 (const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2779 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2780 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2781 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2782 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2783 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2784 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2785
2786 sku = le32_to_cpup(
2787 (const uint32_t *)(phy_sku + IWM_SKU_8000));
2788 }
2789
2790 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2791 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2792 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2793 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2794
2795 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2796
2797 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2798 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2799 data->hw_addr[0] = hw_addr[1];
2800 data->hw_addr[1] = hw_addr[0];
2801 data->hw_addr[2] = hw_addr[3];
2802 data->hw_addr[3] = hw_addr[2];
2803 data->hw_addr[4] = hw_addr[5];
2804 data->hw_addr[5] = hw_addr[4];
2805 } else
2806 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2807
2808 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2809 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2810 iwm_nvm_channels, __arraycount(iwm_nvm_channels));
2811 else
2812 iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
2813 iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
2814
2815 data->calib_version = 255; /* TODO:
2816 this value will prevent some checks from
2817 failing, we need to check if this
2818 field is still needed, and if it does,
2819 where is it in the NVM */
2820
2821 return 0;
2822 }
2823
2824 static int
2825 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2826 {
2827 const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2828 const uint16_t *regulatory = NULL;
2829
2830 /* Checking for required sections */
2831 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2832 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2833 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2834 return ENOENT;
2835 }
2836
2837 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2838 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2839 /* SW and REGULATORY sections are mandatory */
2840 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2841 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2842 return ENOENT;
2843 }
2844 /* MAC_OVERRIDE or at least HW section must exist */
2845 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2846 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2847 return ENOENT;
2848 }
2849
2850 /* PHY_SKU section is mandatory in B0 */
2851 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2852 return ENOENT;
2853 }
2854
2855 regulatory = (const uint16_t *)
2856 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2857 hw = (const uint16_t *)
2858 sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2859 mac_override =
2860 (const uint16_t *)
2861 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2862 phy_sku = (const uint16_t *)
2863 sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2864 } else {
2865 panic("unknown device family %d\n", sc->sc_device_family);
2866 }
2867
2868 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2869 calib = (const uint16_t *)
2870 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2871
2872 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2873 phy_sku, regulatory);
2874 }
2875
2876 static int
2877 iwm_nvm_init(struct iwm_softc *sc)
2878 {
2879 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2880 int i, section, err;
2881 uint16_t len;
2882 uint8_t *buf;
2883 const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
2884
2885 /* Read From FW NVM */
2886 DPRINTF(("Read NVM\n"));
2887
2888 memset(nvm_sections, 0, sizeof(nvm_sections));
2889
2890 buf = kmem_alloc(bufsz, KM_SLEEP);
2891 if (buf == NULL)
2892 return ENOMEM;
2893
2894 for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
2895 section = iwm_nvm_to_read[i];
2896 KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
2897
2898 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
2899 if (err) {
2900 err = 0;
2901 continue;
2902 }
2903 nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
2904 if (nvm_sections[section].data == NULL) {
2905 err = ENOMEM;
2906 break;
2907 }
2908 memcpy(nvm_sections[section].data, buf, len);
2909 nvm_sections[section].length = len;
2910 }
2911 kmem_free(buf, bufsz);
2912 if (err == 0)
2913 err = iwm_parse_nvm_sections(sc, nvm_sections);
2914
2915 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2916 if (nvm_sections[i].data != NULL)
2917 kmem_free(nvm_sections[i].data, nvm_sections[i].length);
2918 }
2919
2920 return err;
2921 }
2922
2923 static int
2924 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
2925 const uint8_t *section, uint32_t byte_cnt)
2926 {
2927 int err = EINVAL;
2928 uint32_t chunk_sz, offset;
2929
2930 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
2931
2932 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
2933 uint32_t addr, len;
2934 const uint8_t *data;
2935
2936 addr = dst_addr + offset;
2937 len = MIN(chunk_sz, byte_cnt - offset);
2938 data = section + offset;
2939
2940 err = iwm_firmware_load_chunk(sc, addr, data, len);
2941 if (err)
2942 break;
2943 }
2944
2945 return err;
2946 }
2947
2948 static int
2949 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2950 const uint8_t *section, uint32_t byte_cnt)
2951 {
2952 struct iwm_dma_info *dma = &sc->fw_dma;
2953 bool is_extended = false;
2954 int err;
2955
2956 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
2957 memcpy(dma->vaddr, section, byte_cnt);
2958 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
2959 BUS_DMASYNC_PREWRITE);
2960
2961 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2962 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2963 is_extended = true;
2964
2965 if (is_extended) {
2966 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
2967 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2968 }
2969
2970 sc->sc_fw_chunk_done = 0;
2971
2972 if (!iwm_nic_lock(sc)) {
2973 if (is_extended)
2974 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
2975 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
2976 return EBUSY;
2977 }
2978
2979 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2980 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2981 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2982 dst_addr);
2983 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2984 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2985 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2986 (iwm_get_dma_hi_addr(dma->paddr)
2987 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2988 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2989 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2990 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2991 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2992 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2993 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2994 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2995 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2996
2997 iwm_nic_unlock(sc);
2998
2999 /* Wait for this segment to load. */
3000 err = 0;
3001 while (!sc->sc_fw_chunk_done) {
3002 err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3003 if (err)
3004 break;
3005 }
3006 if (!sc->sc_fw_chunk_done) {
3007 aprint_error_dev(sc->sc_dev,
3008 "fw chunk addr 0x%x len %d failed to load\n",
3009 dst_addr, byte_cnt);
3010 }
3011
3012 if (is_extended) {
3013 int rv = iwm_nic_lock(sc);
3014 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3015 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3016 if (rv == 0)
3017 iwm_nic_unlock(sc);
3018 }
3019
3020 return err;
3021 }
3022
3023 static int
3024 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3025 {
3026 struct iwm_fw_sects *fws;
3027 int err, i;
3028 void *data;
3029 uint32_t dlen;
3030 uint32_t offset;
3031
3032 fws = &sc->sc_fw.fw_sects[ucode_type];
3033 for (i = 0; i < fws->fw_count; i++) {
3034 data = fws->fw_sect[i].fws_data;
3035 dlen = fws->fw_sect[i].fws_len;
3036 offset = fws->fw_sect[i].fws_devoff;
3037 if (dlen > sc->sc_fwdmasegsz) {
3038 err = EFBIG;
3039 } else
3040 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3041 if (err) {
3042 aprint_error_dev(sc->sc_dev,
3043 "could not load firmware chunk %u of %u\n",
3044 i, fws->fw_count);
3045 return err;
3046 }
3047 }
3048
3049 IWM_WRITE(sc, IWM_CSR_RESET, 0);
3050
3051 return 0;
3052 }
3053
3054 static int
3055 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3056 int cpu, int *first_ucode_section)
3057 {
3058 int shift_param;
3059 int i, err = 0, sec_num = 0x1;
3060 uint32_t val, last_read_idx = 0;
3061 void *data;
3062 uint32_t dlen;
3063 uint32_t offset;
3064
3065 if (cpu == 1) {
3066 shift_param = 0;
3067 *first_ucode_section = 0;
3068 } else {
3069 shift_param = 16;
3070 (*first_ucode_section)++;
3071 }
3072
3073 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3074 last_read_idx = i;
3075 data = fws->fw_sect[i].fws_data;
3076 dlen = fws->fw_sect[i].fws_len;
3077 offset = fws->fw_sect[i].fws_devoff;
3078
3079 /*
3080 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3081 * CPU1 to CPU2.
3082 * PAGING_SEPARATOR_SECTION delimiter - separate between
3083 * CPU2 non paged to CPU2 paging sec.
3084 */
3085 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3086 offset == IWM_PAGING_SEPARATOR_SECTION)
3087 break;
3088
3089 if (dlen > sc->sc_fwdmasegsz) {
3090 err = EFBIG;
3091 } else
3092 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3093 if (err) {
3094 aprint_error_dev(sc->sc_dev,
3095 "could not load firmware chunk %d (error %d)\n",
3096 i, err);
3097 return err;
3098 }
3099
3100 /* Notify the ucode of the loaded section number and status */
3101 if (iwm_nic_lock(sc)) {
3102 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3103 val = val | (sec_num << shift_param);
3104 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3105 sec_num = (sec_num << 1) | 0x1;
3106 iwm_nic_unlock(sc);
3107
3108 /*
3109 * The firmware won't load correctly without this delay.
3110 */
3111 DELAY(8000);
3112 }
3113 }
3114
3115 *first_ucode_section = last_read_idx;
3116
3117 if (iwm_nic_lock(sc)) {
3118 if (cpu == 1)
3119 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3120 else
3121 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3122 iwm_nic_unlock(sc);
3123 }
3124
3125 return 0;
3126 }
3127
3128 static int
3129 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3130 {
3131 struct iwm_fw_sects *fws;
3132 int err = 0;
3133 int first_ucode_section;
3134
3135 fws = &sc->sc_fw.fw_sects[ucode_type];
3136
3137 /* configure the ucode to be ready to get the secured image */
3138 /* release CPU reset */
3139 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
3140
3141 /* load to FW the binary Secured sections of CPU1 */
3142 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3143 if (err)
3144 return err;
3145
3146 /* load to FW the binary sections of CPU2 */
3147 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3148 }
3149
3150 static int
3151 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3152 {
3153 int err, w;
3154
3155 sc->sc_uc.uc_intr = 0;
3156
3157 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3158 err = iwm_load_firmware_8000(sc, ucode_type);
3159 else
3160 err = iwm_load_firmware_7000(sc, ucode_type);
3161
3162 if (err)
3163 return err;
3164
3165 /* wait for the firmware to load */
3166 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3167 err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3168 if (err || !sc->sc_uc.uc_ok)
3169 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
3170
3171 return err;
3172 }
3173
3174 static int
3175 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3176 {
3177 int err;
3178
3179 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3180
3181 err = iwm_nic_init(sc);
3182 if (err) {
3183 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3184 return err;
3185 }
3186
3187 /* make sure rfkill handshake bits are cleared */
3188 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3189 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3190 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3191
3192 /* clear (again), then enable host interrupts */
3193 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3194 iwm_enable_interrupts(sc);
3195
3196 /* really make sure rfkill handshake bits are cleared */
3197 /* maybe we should write a few times more? just to make sure */
3198 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3199 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3200
3201 return iwm_load_firmware(sc, ucode_type);
3202 }
3203
3204 static int
3205 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3206 {
3207 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3208 .valid = htole32(valid_tx_ant),
3209 };
3210
3211 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3212 sizeof(tx_ant_cmd), &tx_ant_cmd);
3213 }
3214
3215 static int
3216 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3217 {
3218 struct iwm_phy_cfg_cmd phy_cfg_cmd;
3219 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3220
3221 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3222 phy_cfg_cmd.calib_control.event_trigger =
3223 sc->sc_default_calib[ucode_type].event_trigger;
3224 phy_cfg_cmd.calib_control.flow_trigger =
3225 sc->sc_default_calib[ucode_type].flow_trigger;
3226
3227 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3228 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3229 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3230 }
3231
3232 static int
3233 iwm_load_ucode_wait_alive(struct iwm_softc *sc,
3234 enum iwm_ucode_type ucode_type)
3235 {
3236 enum iwm_ucode_type old_type = sc->sc_uc_current;
3237 int err;
3238
3239 err = iwm_read_firmware(sc);
3240 if (err)
3241 return err;
3242
3243 sc->sc_uc_current = ucode_type;
3244 err = iwm_start_fw(sc, ucode_type);
3245 if (err) {
3246 sc->sc_uc_current = old_type;
3247 return err;
3248 }
3249
3250 return iwm_post_alive(sc);
3251 }
3252
3253 static int
3254 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3255 {
3256 int err;
3257
3258 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3259 aprint_error_dev(sc->sc_dev,
3260 "radio is disabled by hardware switch\n");
3261 return EPERM;
3262 }
3263
3264 sc->sc_init_complete = 0;
3265 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3266 if (err) {
3267 aprint_error_dev(sc->sc_dev, "failed to load init firmware\n");
3268 return err;
3269 }
3270
3271 if (justnvm) {
3272 err = iwm_nvm_init(sc);
3273 if (err) {
3274 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3275 return err;
3276 }
3277
3278 memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3279 ETHER_ADDR_LEN);
3280 return 0;
3281 }
3282
3283 err = iwm_send_bt_init_conf(sc);
3284 if (err)
3285 return err;
3286
3287 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3288 if (err)
3289 return err;
3290
3291 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3292 if (err)
3293 return err;
3294
3295 /*
3296 * Send phy configurations command to init uCode
3297 * to start the 16.0 uCode init image internal calibrations.
3298 */
3299 err = iwm_send_phy_cfg_cmd(sc);
3300 if (err)
3301 return err;
3302
3303 /*
3304 * Nothing to do but wait for the init complete notification
3305 * from the firmware
3306 */
3307 while (!sc->sc_init_complete) {
3308 err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3309 if (err)
3310 break;
3311 }
3312
3313 return err;
3314 }
3315
3316 static int
3317 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3318 {
3319 struct iwm_rx_ring *ring = &sc->rxq;
3320 struct iwm_rx_data *data = &ring->data[idx];
3321 struct mbuf *m;
3322 int err;
3323 int fatal = 0;
3324
3325 m = m_gethdr(M_DONTWAIT, MT_DATA);
3326 if (m == NULL)
3327 return ENOBUFS;
3328
3329 if (size <= MCLBYTES) {
3330 MCLGET(m, M_DONTWAIT);
3331 } else {
3332 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3333 }
3334 if ((m->m_flags & M_EXT) == 0) {
3335 m_freem(m);
3336 return ENOBUFS;
3337 }
3338
3339 if (data->m != NULL) {
3340 bus_dmamap_unload(sc->sc_dmat, data->map);
3341 fatal = 1;
3342 }
3343
3344 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3345 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3346 BUS_DMA_READ|BUS_DMA_NOWAIT);
3347 if (err) {
3348 /* XXX */
3349 if (fatal)
3350 panic("iwm: could not load RX mbuf");
3351 m_freem(m);
3352 return err;
3353 }
3354 data->m = m;
3355 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3356
3357 /* Update RX descriptor. */
3358 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3359 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3360 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3361
3362 return 0;
3363 }
3364
3365 #define IWM_RSSI_OFFSET 50
3366 static int
3367 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3368 {
3369 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3370 uint32_t agc_a, agc_b;
3371 uint32_t val;
3372
3373 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3374 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3375 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3376
3377 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3378 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3379 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3380
3381 /*
3382 * dBm = rssi dB - agc dB - constant.
3383 * Higher AGC (higher radio gain) means lower signal.
3384 */
3385 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3386 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3387 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3388
3389 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3390 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3391
3392 return max_rssi_dbm;
3393 }
3394
3395 /*
3396 * RSSI values are reported by the FW as positive values - need to negate
3397 * to obtain their dBM. Account for missing antennas by replacing 0
3398 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3399 */
3400 static int
3401 iwm_get_signal_strength(struct iwm_softc *sc,
3402 struct iwm_rx_phy_info *phy_info)
3403 {
3404 int energy_a, energy_b, energy_c, max_energy;
3405 uint32_t val;
3406
3407 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3408 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3409 IWM_RX_INFO_ENERGY_ANT_A_POS;
3410 energy_a = energy_a ? -energy_a : -256;
3411 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3412 IWM_RX_INFO_ENERGY_ANT_B_POS;
3413 energy_b = energy_b ? -energy_b : -256;
3414 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3415 IWM_RX_INFO_ENERGY_ANT_C_POS;
3416 energy_c = energy_c ? -energy_c : -256;
3417 max_energy = MAX(energy_a, energy_b);
3418 max_energy = MAX(max_energy, energy_c);
3419
3420 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3421 energy_a, energy_b, energy_c, max_energy));
3422
3423 return max_energy;
3424 }
3425
3426 static void
3427 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3428 struct iwm_rx_data *data)
3429 {
3430 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3431
3432 DPRINTFN(20, ("received PHY stats\n"));
3433 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3434 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3435
3436 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3437 }
3438
3439 /*
3440 * Retrieve the average noise (in dBm) among receivers.
3441 */
3442 static int
3443 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3444 {
3445 int i, total, nbant, noise;
3446
3447 total = nbant = noise = 0;
3448 for (i = 0; i < 3; i++) {
3449 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3450 if (noise) {
3451 total += noise;
3452 nbant++;
3453 }
3454 }
3455
3456 /* There should be at least one antenna but check anyway. */
3457 return (nbant == 0) ? -127 : (total / nbant) - 107;
3458 }
3459
3460 static void
3461 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3462 struct iwm_rx_data *data)
3463 {
3464 struct ieee80211com *ic = &sc->sc_ic;
3465 struct ieee80211_frame *wh;
3466 struct ieee80211_node *ni;
3467 struct ieee80211_channel *c = NULL;
3468 struct mbuf *m;
3469 struct iwm_rx_phy_info *phy_info;
3470 struct iwm_rx_mpdu_res_start *rx_res;
3471 int device_timestamp;
3472 uint32_t len;
3473 uint32_t rx_pkt_status;
3474 int rssi;
3475
3476 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3477 BUS_DMASYNC_POSTREAD);
3478
3479 phy_info = &sc->sc_last_phy_info;
3480 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3481 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3482 len = le16toh(rx_res->byte_count);
3483 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3484 sizeof(*rx_res) + len));
3485
3486 m = data->m;
3487 m->m_data = pkt->data + sizeof(*rx_res);
3488 m->m_pkthdr.len = m->m_len = len;
3489
3490 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3491 DPRINTF(("dsp size out of range [0,20]: %d\n",
3492 phy_info->cfg_phy_cnt));
3493 return;
3494 }
3495
3496 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3497 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3498 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3499 return; /* drop */
3500 }
3501
3502 device_timestamp = le32toh(phy_info->system_timestamp);
3503
3504 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3505 rssi = iwm_get_signal_strength(sc, phy_info);
3506 } else {
3507 rssi = iwm_calc_rssi(sc, phy_info);
3508 }
3509 rssi = -rssi;
3510
3511 if (ic->ic_state == IEEE80211_S_SCAN)
3512 iwm_fix_channel(sc, m);
3513
3514 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3515 return;
3516
3517 m_set_rcvif(m, IC2IFP(ic));
3518
3519 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3520 c = &ic->ic_channels[le32toh(phy_info->channel)];
3521
3522 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3523 if (c)
3524 ni->ni_chan = c;
3525
3526 if (sc->sc_drvbpf != NULL) {
3527 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3528
3529 tap->wr_flags = 0;
3530 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3531 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3532 tap->wr_chan_freq =
3533 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3534 tap->wr_chan_flags =
3535 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3536 tap->wr_dbm_antsignal = (int8_t)rssi;
3537 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3538 tap->wr_tsft = phy_info->system_timestamp;
3539 if (phy_info->phy_flags &
3540 htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3541 uint8_t mcs = (phy_info->rate_n_flags &
3542 htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK));
3543 tap->wr_rate = (0x80 | mcs);
3544 } else {
3545 uint8_t rate = (phy_info->rate_n_flags &
3546 htole32(IWM_RATE_LEGACY_RATE_MSK));
3547 switch (rate) {
3548 /* CCK rates. */
3549 case 10: tap->wr_rate = 2; break;
3550 case 20: tap->wr_rate = 4; break;
3551 case 55: tap->wr_rate = 11; break;
3552 case 110: tap->wr_rate = 22; break;
3553 /* OFDM rates. */
3554 case 0xd: tap->wr_rate = 12; break;
3555 case 0xf: tap->wr_rate = 18; break;
3556 case 0x5: tap->wr_rate = 24; break;
3557 case 0x7: tap->wr_rate = 36; break;
3558 case 0x9: tap->wr_rate = 48; break;
3559 case 0xb: tap->wr_rate = 72; break;
3560 case 0x1: tap->wr_rate = 96; break;
3561 case 0x3: tap->wr_rate = 108; break;
3562 /* Unknown rate: should not happen. */
3563 default: tap->wr_rate = 0;
3564 }
3565 }
3566
3567 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3568 }
3569 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3570 ieee80211_free_node(ni);
3571 }
3572
3573 static void
3574 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3575 struct iwm_node *in)
3576 {
3577 struct ieee80211com *ic = &sc->sc_ic;
3578 struct ifnet *ifp = IC2IFP(ic);
3579 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3580 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3581 int failack = tx_resp->failure_frame;
3582
3583 KASSERT(tx_resp->frame_count == 1);
3584
3585 /* Update rate control statistics. */
3586 in->in_amn.amn_txcnt++;
3587 if (failack > 0) {
3588 in->in_amn.amn_retrycnt++;
3589 }
3590
3591 if (status != IWM_TX_STATUS_SUCCESS &&
3592 status != IWM_TX_STATUS_DIRECT_DONE)
3593 ifp->if_oerrors++;
3594 else
3595 ifp->if_opackets++;
3596 }
3597
3598 static void
3599 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3600 struct iwm_rx_data *data)
3601 {
3602 struct ieee80211com *ic = &sc->sc_ic;
3603 struct ifnet *ifp = IC2IFP(ic);
3604 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3605 int idx = cmd_hdr->idx;
3606 int qid = cmd_hdr->qid;
3607 struct iwm_tx_ring *ring = &sc->txq[qid];
3608 struct iwm_tx_data *txd = &ring->data[idx];
3609 struct iwm_node *in = txd->in;
3610
3611 if (txd->done) {
3612 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3613 DEVNAME(sc)));
3614 return;
3615 }
3616
3617 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3618 BUS_DMASYNC_POSTREAD);
3619
3620 sc->sc_tx_timer = 0;
3621
3622 iwm_rx_tx_cmd_single(sc, pkt, in);
3623
3624 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3625 BUS_DMASYNC_POSTWRITE);
3626 bus_dmamap_unload(sc->sc_dmat, txd->map);
3627 m_freem(txd->m);
3628
3629 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3630 KASSERT(txd->done == 0);
3631 txd->done = 1;
3632 KASSERT(txd->in);
3633
3634 txd->m = NULL;
3635 txd->in = NULL;
3636 ieee80211_free_node(&in->in_ni);
3637
3638 if (--ring->queued < IWM_TX_RING_LOMARK) {
3639 sc->qfullmsk &= ~(1 << ring->qid);
3640 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3641 ifp->if_flags &= ~IFF_OACTIVE;
3642 /*
3643 * Well, we're in interrupt context, but then again
3644 * I guess net80211 does all sorts of stunts in
3645 * interrupt context, so maybe this is no biggie.
3646 */
3647 if_schedule_deferred_start(ifp);
3648 }
3649 }
3650 }
3651
3652 static int
3653 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3654 {
3655 struct iwm_binding_cmd cmd;
3656 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3657 int i, err;
3658 uint32_t status;
3659
3660 memset(&cmd, 0, sizeof(cmd));
3661
3662 cmd.id_and_color
3663 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3664 cmd.action = htole32(action);
3665 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3666
3667 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3668 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3669 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3670
3671 status = 0;
3672 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3673 sizeof(cmd), &cmd, &status);
3674 if (err == 0 && status != 0)
3675 err = EIO;
3676
3677 return err;
3678 }
3679
3680 static void
3681 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3682 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3683 {
3684 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3685
3686 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3687 ctxt->color));
3688 cmd->action = htole32(action);
3689 cmd->apply_time = htole32(apply_time);
3690 }
3691
3692 static void
3693 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3694 struct ieee80211_channel *chan, uint8_t chains_static,
3695 uint8_t chains_dynamic)
3696 {
3697 struct ieee80211com *ic = &sc->sc_ic;
3698 uint8_t active_cnt, idle_cnt;
3699
3700 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3701 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3702
3703 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3704 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3705 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3706
3707 /* Set rx the chains */
3708 idle_cnt = chains_static;
3709 active_cnt = chains_dynamic;
3710
3711 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3712 IWM_PHY_RX_CHAIN_VALID_POS);
3713 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3714 cmd->rxchain_info |= htole32(active_cnt <<
3715 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3716
3717 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3718 }
3719
3720 static int
3721 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3722 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3723 uint32_t apply_time)
3724 {
3725 struct iwm_phy_context_cmd cmd;
3726
3727 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3728
3729 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3730 chains_static, chains_dynamic);
3731
3732 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3733 sizeof(struct iwm_phy_context_cmd), &cmd);
3734 }
3735
3736 static int
3737 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3738 {
3739 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3740 struct iwm_tfd *desc;
3741 struct iwm_tx_data *txdata;
3742 struct iwm_device_cmd *cmd;
3743 struct mbuf *m;
3744 bus_addr_t paddr;
3745 uint32_t addr_lo;
3746 int err = 0, i, paylen, off, s;
3747 int code;
3748 int async, wantresp;
3749 int group_id;
3750 size_t hdrlen, datasz;
3751 uint8_t *data;
3752
3753 code = hcmd->id;
3754 async = hcmd->flags & IWM_CMD_ASYNC;
3755 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3756
3757 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3758 paylen += hcmd->len[i];
3759 }
3760
3761 /* if the command wants an answer, busy sc_cmd_resp */
3762 if (wantresp) {
3763 KASSERT(!async);
3764 while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3765 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3766 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3767 }
3768
3769 /*
3770 * Is the hardware still available? (after e.g. above wait).
3771 */
3772 s = splnet();
3773 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3774 err = ENXIO;
3775 goto out;
3776 }
3777
3778 desc = &ring->desc[ring->cur];
3779 txdata = &ring->data[ring->cur];
3780
3781 group_id = iwm_cmd_groupid(code);
3782 if (group_id != 0) {
3783 hdrlen = sizeof(cmd->hdr_wide);
3784 datasz = sizeof(cmd->data_wide);
3785 } else {
3786 hdrlen = sizeof(cmd->hdr);
3787 datasz = sizeof(cmd->data);
3788 }
3789
3790 if (paylen > datasz) {
3791 /* Command is too large to fit in pre-allocated space. */
3792 size_t totlen = hdrlen + paylen;
3793 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3794 aprint_error_dev(sc->sc_dev,
3795 "firmware command too long (%zd bytes)\n", totlen);
3796 err = EINVAL;
3797 goto out;
3798 }
3799 m = m_gethdr(M_DONTWAIT, MT_DATA);
3800 if (m == NULL) {
3801 err = ENOMEM;
3802 goto out;
3803 }
3804 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3805 if (!(m->m_flags & M_EXT)) {
3806 aprint_error_dev(sc->sc_dev,
3807 "could not get fw cmd mbuf (%zd bytes)\n", totlen);
3808 m_freem(m);
3809 err = ENOMEM;
3810 goto out;
3811 }
3812 cmd = mtod(m, struct iwm_device_cmd *);
3813 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3814 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3815 if (err) {
3816 aprint_error_dev(sc->sc_dev,
3817 "could not load fw cmd mbuf (%zd bytes)\n", totlen);
3818 m_freem(m);
3819 goto out;
3820 }
3821 txdata->m = m;
3822 paddr = txdata->map->dm_segs[0].ds_addr;
3823 } else {
3824 cmd = &ring->cmd[ring->cur];
3825 paddr = txdata->cmd_paddr;
3826 }
3827
3828 if (group_id != 0) {
3829 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3830 cmd->hdr_wide.group_id = group_id;
3831 cmd->hdr_wide.qid = ring->qid;
3832 cmd->hdr_wide.idx = ring->cur;
3833 cmd->hdr_wide.length = htole16(paylen);
3834 cmd->hdr_wide.version = iwm_cmd_version(code);
3835 data = cmd->data_wide;
3836 } else {
3837 cmd->hdr.code = code;
3838 cmd->hdr.flags = 0;
3839 cmd->hdr.qid = ring->qid;
3840 cmd->hdr.idx = ring->cur;
3841 data = cmd->data;
3842 }
3843
3844 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3845 if (hcmd->len[i] == 0)
3846 continue;
3847 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3848 off += hcmd->len[i];
3849 }
3850 KASSERT(off == paylen);
3851
3852 /* lo field is not aligned */
3853 addr_lo = htole32((uint32_t)paddr);
3854 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3855 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3856 | ((hdrlen + paylen) << 4));
3857 desc->num_tbs = 1;
3858
3859 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3860 code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
3861
3862 if (paylen > datasz) {
3863 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0,
3864 hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3865 } else {
3866 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3867 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3868 hdrlen + paylen, BUS_DMASYNC_PREWRITE);
3869 }
3870 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3871 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3872 sizeof(*desc), BUS_DMASYNC_PREWRITE);
3873
3874 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3875 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3876 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3877 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3878 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3879 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3880 aprint_error_dev(sc->sc_dev, "acquiring device failed\n");
3881 err = EBUSY;
3882 goto out;
3883 }
3884
3885 #if 0
3886 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3887 #endif
3888 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3889 code, ring->qid, ring->cur));
3890
3891 /* Kick command ring. */
3892 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3893 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3894
3895 if (!async) {
3896 int generation = sc->sc_generation;
3897 err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
3898 if (err == 0) {
3899 /* if hardware is no longer up, return error */
3900 if (generation != sc->sc_generation) {
3901 err = ENXIO;
3902 } else {
3903 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3904 }
3905 }
3906 }
3907 out:
3908 if (wantresp && err) {
3909 iwm_free_resp(sc, hcmd);
3910 }
3911 splx(s);
3912
3913 return err;
3914 }
3915
3916 static int
3917 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
3918 uint16_t len, const void *data)
3919 {
3920 struct iwm_host_cmd cmd = {
3921 .id = id,
3922 .len = { len, },
3923 .data = { data, },
3924 .flags = flags,
3925 };
3926
3927 return iwm_send_cmd(sc, &cmd);
3928 }
3929
3930 static int
3931 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
3932 uint32_t *status)
3933 {
3934 struct iwm_rx_packet *pkt;
3935 struct iwm_cmd_response *resp;
3936 int err, resp_len;
3937
3938 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3939 cmd->flags |= IWM_CMD_WANT_SKB;
3940
3941 err = iwm_send_cmd(sc, cmd);
3942 if (err)
3943 return err;
3944 pkt = cmd->resp_pkt;
3945
3946 /* Can happen if RFKILL is asserted */
3947 if (!pkt) {
3948 err = 0;
3949 goto out_free_resp;
3950 }
3951
3952 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3953 err = EIO;
3954 goto out_free_resp;
3955 }
3956
3957 resp_len = iwm_rx_packet_payload_len(pkt);
3958 if (resp_len != sizeof(*resp)) {
3959 err = EIO;
3960 goto out_free_resp;
3961 }
3962
3963 resp = (void *)pkt->data;
3964 *status = le32toh(resp->status);
3965 out_free_resp:
3966 iwm_free_resp(sc, cmd);
3967 return err;
3968 }
3969
3970 static int
3971 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
3972 const void *data, uint32_t *status)
3973 {
3974 struct iwm_host_cmd cmd = {
3975 .id = id,
3976 .len = { len, },
3977 .data = { data, },
3978 };
3979
3980 return iwm_send_cmd_status(sc, &cmd, status);
3981 }
3982
3983 static void
3984 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3985 {
3986 KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
3987 KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
3988 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
3989 wakeup(&sc->sc_wantresp);
3990 }
3991
3992 static void
3993 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
3994 {
3995 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3996 struct iwm_tx_data *data;
3997
3998 if (qid != IWM_CMD_QUEUE) {
3999 return; /* Not a command ack. */
4000 }
4001
4002 data = &ring->data[idx];
4003
4004 if (data->m != NULL) {
4005 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4006 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4007 bus_dmamap_unload(sc->sc_dmat, data->map);
4008 m_freem(data->m);
4009 data->m = NULL;
4010 }
4011 wakeup(&ring->desc[idx]);
4012 }
4013
4014 #if 0
4015 /*
4016 * necessary only for block ack mode
4017 */
4018 void
4019 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4020 uint16_t len)
4021 {
4022 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4023 uint16_t w_val;
4024
4025 scd_bc_tbl = sc->sched_dma.vaddr;
4026
4027 len += 8; /* magic numbers came naturally from paris */
4028 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4029 len = roundup(len, 4) / 4;
4030
4031 w_val = htole16(sta_id << 12 | len);
4032
4033 /* Update TX scheduler. */
4034 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4035 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4036 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4037 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4038
4039 /* I really wonder what this is ?!? */
4040 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4041 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4042 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4043 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4044 (char *)(void *)sc->sched_dma.vaddr,
4045 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4046 }
4047 }
4048 #endif
4049
4050 /*
4051 * Fill in various bit for management frames, and leave them
4052 * unfilled for data frames (firmware takes care of that).
4053 * Return the selected TX rate.
4054 */
4055 static const struct iwm_rate *
4056 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4057 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4058 {
4059 struct ieee80211com *ic = &sc->sc_ic;
4060 struct ieee80211_node *ni = &in->in_ni;
4061 const struct iwm_rate *rinfo;
4062 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4063 int ridx, rate_flags, i;
4064 int nrates = ni->ni_rates.rs_nrates;
4065
4066 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4067 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4068
4069 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4070 type != IEEE80211_FC0_TYPE_DATA) {
4071 /* for non-data, use the lowest supported rate */
4072 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4073 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4074 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4075 #ifndef IEEE80211_NO_HT
4076 } else if (ic->ic_fixed_mcs != -1) {
4077 ridx = sc->sc_fixed_ridx;
4078 #endif
4079 } else if (ic->ic_fixed_rate != -1) {
4080 ridx = sc->sc_fixed_ridx;
4081 } else {
4082 /* for data frames, use RS table */
4083 tx->initial_rate_index = 0;
4084 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4085 DPRINTFN(12, ("start with txrate %d\n",
4086 tx->initial_rate_index));
4087 #ifndef IEEE80211_NO_HT
4088 if (ni->ni_flags & IEEE80211_NODE_HT) {
4089 ridx = iwm_mcs2ridx[ni->ni_txmcs];
4090 return &iwm_rates[ridx];
4091 }
4092 #endif
4093 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4094 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4095 for (i = 0; i < nrates; i++) {
4096 if (iwm_rates[i].rate == (ni->ni_txrate &
4097 IEEE80211_RATE_VAL)) {
4098 ridx = i;
4099 break;
4100 }
4101 }
4102 return &iwm_rates[ridx];
4103 }
4104
4105 rinfo = &iwm_rates[ridx];
4106 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
4107 if (IWM_RIDX_IS_CCK(ridx))
4108 rate_flags |= IWM_RATE_MCS_CCK_MSK;
4109 #ifndef IEEE80211_NO_HT
4110 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4111 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4112 rate_flags |= IWM_RATE_MCS_HT_MSK;
4113 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4114 } else
4115 #endif
4116 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4117
4118 return rinfo;
4119 }
4120
4121 #define TB0_SIZE 16
4122 static int
4123 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4124 {
4125 struct ieee80211com *ic = &sc->sc_ic;
4126 struct iwm_node *in = (struct iwm_node *)ni;
4127 struct iwm_tx_ring *ring;
4128 struct iwm_tx_data *data;
4129 struct iwm_tfd *desc;
4130 struct iwm_device_cmd *cmd;
4131 struct iwm_tx_cmd *tx;
4132 struct ieee80211_frame *wh;
4133 struct ieee80211_key *k = NULL;
4134 struct mbuf *m1;
4135 const struct iwm_rate *rinfo;
4136 uint32_t flags;
4137 u_int hdrlen;
4138 bus_dma_segment_t *seg;
4139 uint8_t tid, type;
4140 int i, totlen, err, pad;
4141
4142 wh = mtod(m, struct ieee80211_frame *);
4143 hdrlen = ieee80211_anyhdrsize(wh);
4144 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4145
4146 tid = 0;
4147
4148 ring = &sc->txq[ac];
4149 desc = &ring->desc[ring->cur];
4150 memset(desc, 0, sizeof(*desc));
4151 data = &ring->data[ring->cur];
4152
4153 cmd = &ring->cmd[ring->cur];
4154 cmd->hdr.code = IWM_TX_CMD;
4155 cmd->hdr.flags = 0;
4156 cmd->hdr.qid = ring->qid;
4157 cmd->hdr.idx = ring->cur;
4158
4159 tx = (void *)cmd->data;
4160 memset(tx, 0, sizeof(*tx));
4161
4162 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4163
4164 if (sc->sc_drvbpf != NULL) {
4165 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4166
4167 tap->wt_flags = 0;
4168 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4169 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4170 #ifndef IEEE80211_NO_HT
4171 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4172 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4173 type == IEEE80211_FC0_TYPE_DATA &&
4174 rinfo->plcp == IWM_RATE_INVM_PLCP) {
4175 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4176 } else
4177 #endif
4178 tap->wt_rate = rinfo->rate;
4179 tap->wt_hwqueue = ac;
4180 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4181 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4182
4183 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4184 }
4185
4186 /* Encrypt the frame if need be. */
4187 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4188 k = ieee80211_crypto_encap(ic, ni, m);
4189 if (k == NULL) {
4190 m_freem(m);
4191 return ENOBUFS;
4192 }
4193 /* Packet header may have moved, reset our local pointer. */
4194 wh = mtod(m, struct ieee80211_frame *);
4195 }
4196 totlen = m->m_pkthdr.len;
4197
4198 flags = 0;
4199 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4200 flags |= IWM_TX_CMD_FLG_ACK;
4201 }
4202
4203 if (type == IEEE80211_FC0_TYPE_DATA &&
4204 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4205 (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4206 (ic->ic_flags & IEEE80211_F_USEPROT)))
4207 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4208
4209 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4210 type != IEEE80211_FC0_TYPE_DATA)
4211 tx->sta_id = IWM_AUX_STA_ID;
4212 else
4213 tx->sta_id = IWM_STATION_ID;
4214
4215 if (type == IEEE80211_FC0_TYPE_MGT) {
4216 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4217
4218 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4219 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4220 tx->pm_frame_timeout = htole16(3);
4221 else
4222 tx->pm_frame_timeout = htole16(2);
4223 } else {
4224 tx->pm_frame_timeout = htole16(0);
4225 }
4226
4227 if (hdrlen & 3) {
4228 /* First segment length must be a multiple of 4. */
4229 flags |= IWM_TX_CMD_FLG_MH_PAD;
4230 pad = 4 - (hdrlen & 3);
4231 } else
4232 pad = 0;
4233
4234 tx->driver_txop = 0;
4235 tx->next_frame_len = 0;
4236
4237 tx->len = htole16(totlen);
4238 tx->tid_tspec = tid;
4239 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4240
4241 /* Set physical address of "scratch area". */
4242 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4243 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4244
4245 /* Copy 802.11 header in TX command. */
4246 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
4247
4248 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4249
4250 tx->sec_ctl = 0;
4251 tx->tx_flags |= htole32(flags);
4252
4253 /* Trim 802.11 header. */
4254 m_adj(m, hdrlen);
4255
4256 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4257 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4258 if (err) {
4259 if (err != EFBIG) {
4260 aprint_error_dev(sc->sc_dev,
4261 "can't map mbuf (error %d)\n", err);
4262 m_freem(m);
4263 return err;
4264 }
4265 /* Too many DMA segments, linearize mbuf. */
4266 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4267 if (m1 == NULL) {
4268 m_freem(m);
4269 return ENOBUFS;
4270 }
4271 if (m->m_pkthdr.len > MHLEN) {
4272 MCLGET(m1, M_DONTWAIT);
4273 if (!(m1->m_flags & M_EXT)) {
4274 m_freem(m);
4275 m_freem(m1);
4276 return ENOBUFS;
4277 }
4278 }
4279 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4280 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4281 m_freem(m);
4282 m = m1;
4283
4284 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4285 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4286 if (err) {
4287 aprint_error_dev(sc->sc_dev,
4288 "can't map mbuf (error %d)\n", err);
4289 m_freem(m);
4290 return err;
4291 }
4292 }
4293 data->m = m;
4294 data->in = in;
4295 data->done = 0;
4296
4297 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4298 KASSERT(data->in != NULL);
4299
4300 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4301 ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4302
4303 /* Fill TX descriptor. */
4304 desc->num_tbs = 2 + data->map->dm_nsegs;
4305
4306 desc->tbs[0].lo = htole32(data->cmd_paddr);
4307 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4308 (TB0_SIZE << 4);
4309 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4310 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4311 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4312 + hdrlen + pad - TB0_SIZE) << 4);
4313
4314 /* Other DMA segments are for data payload. */
4315 seg = data->map->dm_segs;
4316 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4317 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4318 desc->tbs[i+2].hi_n_len = \
4319 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4320 | ((seg->ds_len) << 4);
4321 }
4322
4323 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4324 BUS_DMASYNC_PREWRITE);
4325 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4326 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4327 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4328 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4329 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4330 sizeof (*desc), BUS_DMASYNC_PREWRITE);
4331
4332 #if 0
4333 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4334 le16toh(tx->len));
4335 #endif
4336
4337 /* Kick TX ring. */
4338 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4339 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4340
4341 /* Mark TX ring as full if we reach a certain threshold. */
4342 if (++ring->queued > IWM_TX_RING_HIMARK) {
4343 sc->qfullmsk |= 1 << ring->qid;
4344 }
4345
4346 return 0;
4347 }
4348
4349 #if 0
4350 /* not necessary? */
4351 static int
4352 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4353 {
4354 struct iwm_tx_path_flush_cmd flush_cmd = {
4355 .queues_ctl = htole32(tfd_msk),
4356 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4357 };
4358 int err;
4359
4360 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4361 sizeof(flush_cmd), &flush_cmd);
4362 if (err)
4363 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4364 err);
4365 return err;
4366 }
4367 #endif
4368
4369 static void
4370 iwm_led_enable(struct iwm_softc *sc)
4371 {
4372 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4373 }
4374
4375 static void
4376 iwm_led_disable(struct iwm_softc *sc)
4377 {
4378 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4379 }
4380
4381 static int
4382 iwm_led_is_enabled(struct iwm_softc *sc)
4383 {
4384 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4385 }
4386
4387 static void
4388 iwm_led_blink_timeout(void *arg)
4389 {
4390 struct iwm_softc *sc = arg;
4391
4392 if (iwm_led_is_enabled(sc))
4393 iwm_led_disable(sc);
4394 else
4395 iwm_led_enable(sc);
4396
4397 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4398 }
4399
4400 static void
4401 iwm_led_blink_start(struct iwm_softc *sc)
4402 {
4403 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4404 }
4405
4406 static void
4407 iwm_led_blink_stop(struct iwm_softc *sc)
4408 {
4409 callout_stop(&sc->sc_led_blink_to);
4410 iwm_led_disable(sc);
4411 }
4412
4413 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4414
4415 static int
4416 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4417 struct iwm_beacon_filter_cmd *cmd)
4418 {
4419 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4420 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4421 }
4422
4423 static void
4424 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4425 struct iwm_beacon_filter_cmd *cmd)
4426 {
4427 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4428 }
4429
4430 static int
4431 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4432 {
4433 struct iwm_beacon_filter_cmd cmd = {
4434 IWM_BF_CMD_CONFIG_DEFAULTS,
4435 .bf_enable_beacon_filter = htole32(1),
4436 .ba_enable_beacon_abort = htole32(enable),
4437 };
4438
4439 if (!sc->sc_bf.bf_enabled)
4440 return 0;
4441
4442 sc->sc_bf.ba_enabled = enable;
4443 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4444 return iwm_beacon_filter_send_cmd(sc, &cmd);
4445 }
4446
4447 static void
4448 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4449 struct iwm_mac_power_cmd *cmd)
4450 {
4451 struct ieee80211_node *ni = &in->in_ni;
4452 int dtim_period, dtim_msec, keep_alive;
4453
4454 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4455 in->in_color));
4456 if (ni->ni_dtim_period)
4457 dtim_period = ni->ni_dtim_period;
4458 else
4459 dtim_period = 1;
4460
4461 /*
4462 * Regardless of power management state the driver must set
4463 * keep alive period. FW will use it for sending keep alive NDPs
4464 * immediately after association. Check that keep alive period
4465 * is at least 3 * DTIM.
4466 */
4467 dtim_msec = dtim_period * ni->ni_intval;
4468 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4469 keep_alive = roundup(keep_alive, 1000) / 1000;
4470 cmd->keep_alive_seconds = htole16(keep_alive);
4471
4472 #ifdef notyet
4473 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4474 cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4475 cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4476 #endif
4477 }
4478
4479 static int
4480 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4481 {
4482 int err;
4483 int ba_enable;
4484 struct iwm_mac_power_cmd cmd;
4485
4486 memset(&cmd, 0, sizeof(cmd));
4487
4488 iwm_power_build_cmd(sc, in, &cmd);
4489
4490 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4491 sizeof(cmd), &cmd);
4492 if (err)
4493 return err;
4494
4495 ba_enable = !!(cmd.flags &
4496 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4497 return iwm_update_beacon_abort(sc, in, ba_enable);
4498 }
4499
4500 static int
4501 iwm_power_update_device(struct iwm_softc *sc)
4502 {
4503 struct iwm_device_power_cmd cmd = {
4504 #ifdef notyet
4505 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4506 #endif
4507 };
4508
4509 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4510 return 0;
4511
4512 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4513 DPRINTF(("Sending device power command with flags = 0x%X\n",
4514 cmd.flags));
4515
4516 return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4517 }
4518
4519 #ifdef notyet
4520 static int
4521 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4522 {
4523 struct iwm_beacon_filter_cmd cmd = {
4524 IWM_BF_CMD_CONFIG_DEFAULTS,
4525 .bf_enable_beacon_filter = htole32(1),
4526 };
4527 int err;
4528
4529 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4530 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4531
4532 if (err == 0)
4533 sc->sc_bf.bf_enabled = 1;
4534
4535 return err;
4536 }
4537 #endif
4538
4539 static int
4540 iwm_disable_beacon_filter(struct iwm_softc *sc)
4541 {
4542 struct iwm_beacon_filter_cmd cmd;
4543 int err;
4544
4545 memset(&cmd, 0, sizeof(cmd));
4546 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4547 return 0;
4548
4549 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4550 if (err == 0)
4551 sc->sc_bf.bf_enabled = 0;
4552
4553 return err;
4554 }
4555
4556 static int
4557 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4558 {
4559 struct iwm_add_sta_cmd_v7 add_sta_cmd;
4560 int err;
4561 uint32_t status;
4562
4563 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4564
4565 add_sta_cmd.sta_id = IWM_STATION_ID;
4566 add_sta_cmd.mac_id_n_color
4567 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4568 if (!update) {
4569 int ac;
4570 for (ac = 0; ac < WME_NUM_AC; ac++) {
4571 add_sta_cmd.tfd_queue_msk |=
4572 htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
4573 }
4574 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4575 }
4576 add_sta_cmd.add_modify = update ? 1 : 0;
4577 add_sta_cmd.station_flags_msk
4578 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4579 add_sta_cmd.tid_disable_tx = htole16(0xffff);
4580 if (update)
4581 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4582
4583 #ifndef IEEE80211_NO_HT
4584 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4585 add_sta_cmd.station_flags_msk
4586 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4587 IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4588
4589 add_sta_cmd.station_flags
4590 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4591 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4592 case IEEE80211_AMPDU_PARAM_SS_2:
4593 add_sta_cmd.station_flags
4594 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4595 break;
4596 case IEEE80211_AMPDU_PARAM_SS_4:
4597 add_sta_cmd.station_flags
4598 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4599 break;
4600 case IEEE80211_AMPDU_PARAM_SS_8:
4601 add_sta_cmd.station_flags
4602 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4603 break;
4604 case IEEE80211_AMPDU_PARAM_SS_16:
4605 add_sta_cmd.station_flags
4606 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4607 break;
4608 default:
4609 break;
4610 }
4611 }
4612 #endif
4613
4614 status = IWM_ADD_STA_SUCCESS;
4615 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4616 &add_sta_cmd, &status);
4617 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4618 err = EIO;
4619
4620 return err;
4621 }
4622
4623 static int
4624 iwm_add_aux_sta(struct iwm_softc *sc)
4625 {
4626 struct iwm_add_sta_cmd_v7 cmd;
4627 int err;
4628 uint32_t status;
4629
4630 err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4631 if (err)
4632 return err;
4633
4634 memset(&cmd, 0, sizeof(cmd));
4635 cmd.sta_id = IWM_AUX_STA_ID;
4636 cmd.mac_id_n_color =
4637 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4638 cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4639 cmd.tid_disable_tx = htole16(0xffff);
4640
4641 status = IWM_ADD_STA_SUCCESS;
4642 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4643 &status);
4644 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4645 err = EIO;
4646
4647 return err;
4648 }
4649
4650 #define IWM_PLCP_QUIET_THRESH 1
4651 #define IWM_ACTIVE_QUIET_TIME 10
4652 #define LONG_OUT_TIME_PERIOD 600
4653 #define SHORT_OUT_TIME_PERIOD 200
4654 #define SUSPEND_TIME_PERIOD 100
4655
4656 static uint16_t
4657 iwm_scan_rx_chain(struct iwm_softc *sc)
4658 {
4659 uint16_t rx_chain;
4660 uint8_t rx_ant;
4661
4662 rx_ant = iwm_fw_valid_rx_ant(sc);
4663 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4664 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4665 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4666 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4667 return htole16(rx_chain);
4668 }
4669
4670 static uint32_t
4671 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4672 {
4673 uint32_t tx_ant;
4674 int i, ind;
4675
4676 for (i = 0, ind = sc->sc_scan_last_antenna;
4677 i < IWM_RATE_MCS_ANT_NUM; i++) {
4678 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4679 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4680 sc->sc_scan_last_antenna = ind;
4681 break;
4682 }
4683 }
4684 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4685
4686 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4687 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4688 tx_ant);
4689 else
4690 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4691 }
4692
4693 #ifdef notyet
4694 /*
4695 * If req->n_ssids > 0, it means we should do an active scan.
4696 * In case of active scan w/o directed scan, we receive a zero-length SSID
4697 * just to notify that this scan is active and not passive.
4698 * In order to notify the FW of the number of SSIDs we wish to scan (including
4699 * the zero-length one), we need to set the corresponding bits in chan->type,
4700 * one for each SSID, and set the active bit (first). If the first SSID is
4701 * already included in the probe template, so we need to set only
4702 * req->n_ssids - 1 bits in addition to the first bit.
4703 */
4704 static uint16_t
4705 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4706 {
4707 if (flags & IEEE80211_CHAN_2GHZ)
4708 return 30 + 3 * (n_ssids + 1);
4709 return 20 + 2 * (n_ssids + 1);
4710 }
4711
4712 static uint16_t
4713 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4714 {
4715 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4716 }
4717 #endif
4718
4719 static uint8_t
4720 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4721 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4722 {
4723 struct ieee80211com *ic = &sc->sc_ic;
4724 struct ieee80211_channel *c;
4725 uint8_t nchan;
4726
4727 for (nchan = 0, c = &ic->ic_channels[1];
4728 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4729 nchan < sc->sc_capa_n_scan_channels;
4730 c++) {
4731 if (c->ic_flags == 0)
4732 continue;
4733
4734 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4735 chan->iter_count = htole16(1);
4736 chan->iter_interval = 0;
4737 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4738 #if 0 /* makes scanning while associated less useful */
4739 if (n_ssids != 0)
4740 chan->flags |= htole32(1 << 1); /* select SSID 0 */
4741 #endif
4742 chan++;
4743 nchan++;
4744 }
4745
4746 return nchan;
4747 }
4748
4749 static uint8_t
4750 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4751 struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4752 {
4753 struct ieee80211com *ic = &sc->sc_ic;
4754 struct ieee80211_channel *c;
4755 uint8_t nchan;
4756
4757 for (nchan = 0, c = &ic->ic_channels[1];
4758 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4759 nchan < sc->sc_capa_n_scan_channels;
4760 c++) {
4761 if (c->ic_flags == 0)
4762 continue;
4763 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4764 chan->iter_count = 1;
4765 chan->iter_interval = htole16(0);
4766 #if 0 /* makes scanning while associated less useful */
4767 if (n_ssids != 0)
4768 chan->flags = htole32(1 << 0); /* select SSID 0 */
4769 #endif
4770 chan++;
4771 nchan++;
4772 }
4773
4774 return nchan;
4775 }
4776
4777 static int
4778 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4779 {
4780 struct ieee80211com *ic = &sc->sc_ic;
4781 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4782 struct ieee80211_rateset *rs;
4783 size_t remain = sizeof(preq->buf);
4784 uint8_t *frm, *pos;
4785
4786 memset(preq, 0, sizeof(*preq));
4787
4788 if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4789 return ENOBUFS;
4790
4791 /*
4792 * Build a probe request frame. Most of the following code is a
4793 * copy & paste of what is done in net80211.
4794 */
4795 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4796 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4797 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4798 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4799 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4800 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4801 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4802 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4803
4804 frm = (uint8_t *)(wh + 1);
4805 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4806
4807 /* Tell the firmware where the MAC header is. */
4808 preq->mac_header.offset = 0;
4809 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4810 remain -= frm - (uint8_t *)wh;
4811
4812 /* Fill in 2GHz IEs and tell firmware where they are. */
4813 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4814 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4815 if (remain < 4 + rs->rs_nrates)
4816 return ENOBUFS;
4817 } else if (remain < 2 + rs->rs_nrates)
4818 return ENOBUFS;
4819 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4820 pos = frm;
4821 frm = ieee80211_add_rates(frm, rs);
4822 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4823 frm = ieee80211_add_xrates(frm, rs);
4824 preq->band_data[0].len = htole16(frm - pos);
4825 remain -= frm - pos;
4826
4827 if (isset(sc->sc_enabled_capa,
4828 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4829 if (remain < 3)
4830 return ENOBUFS;
4831 *frm++ = IEEE80211_ELEMID_DSPARMS;
4832 *frm++ = 1;
4833 *frm++ = 0;
4834 remain -= 3;
4835 }
4836
4837 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4838 /* Fill in 5GHz IEs. */
4839 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4840 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4841 if (remain < 4 + rs->rs_nrates)
4842 return ENOBUFS;
4843 } else if (remain < 2 + rs->rs_nrates)
4844 return ENOBUFS;
4845 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4846 pos = frm;
4847 frm = ieee80211_add_rates(frm, rs);
4848 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4849 frm = ieee80211_add_xrates(frm, rs);
4850 preq->band_data[1].len = htole16(frm - pos);
4851 remain -= frm - pos;
4852 }
4853
4854 #ifndef IEEE80211_NO_HT
4855 /* Send 11n IEs on both 2GHz and 5GHz bands. */
4856 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4857 pos = frm;
4858 if (ic->ic_flags & IEEE80211_F_HTON) {
4859 if (remain < 28)
4860 return ENOBUFS;
4861 frm = ieee80211_add_htcaps(frm, ic);
4862 /* XXX add WME info? */
4863 }
4864 #endif
4865
4866 preq->common_data.len = htole16(frm - pos);
4867
4868 return 0;
4869 }
4870
4871 static int
4872 iwm_lmac_scan(struct iwm_softc *sc)
4873 {
4874 struct ieee80211com *ic = &sc->sc_ic;
4875 struct iwm_host_cmd hcmd = {
4876 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
4877 .len = { 0, },
4878 .data = { NULL, },
4879 .flags = 0,
4880 };
4881 struct iwm_scan_req_lmac *req;
4882 size_t req_len;
4883 int err;
4884
4885 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
4886
4887 req_len = sizeof(struct iwm_scan_req_lmac) +
4888 (sizeof(struct iwm_scan_channel_cfg_lmac) *
4889 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
4890 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
4891 return ENOMEM;
4892 req = kmem_zalloc(req_len, KM_SLEEP);
4893 if (req == NULL)
4894 return ENOMEM;
4895
4896 hcmd.len[0] = (uint16_t)req_len;
4897 hcmd.data[0] = (void *)req;
4898
4899 /* These timings correspond to iwlwifi's UNASSOC scan. */
4900 req->active_dwell = 10;
4901 req->passive_dwell = 110;
4902 req->fragmented_dwell = 44;
4903 req->extended_dwell = 90;
4904 req->max_out_time = 0;
4905 req->suspend_time = 0;
4906
4907 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
4908 req->rx_chain_select = iwm_scan_rx_chain(sc);
4909 req->iter_num = htole32(1);
4910 req->delay = 0;
4911
4912 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
4913 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
4914 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
4915 if (ic->ic_des_esslen == 0)
4916 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
4917 else
4918 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
4919 if (isset(sc->sc_enabled_capa,
4920 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
4921 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
4922
4923 req->flags = htole32(IWM_PHY_BAND_24);
4924 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
4925 req->flags |= htole32(IWM_PHY_BAND_5);
4926 req->filter_flags =
4927 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
4928
4929 /* Tx flags 2 GHz. */
4930 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4931 IWM_TX_CMD_FLG_BT_DIS);
4932 req->tx_cmd[0].rate_n_flags =
4933 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
4934 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
4935
4936 /* Tx flags 5 GHz. */
4937 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4938 IWM_TX_CMD_FLG_BT_DIS);
4939 req->tx_cmd[1].rate_n_flags =
4940 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
4941 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
4942
4943 /* Check if we're doing an active directed scan. */
4944 if (ic->ic_des_esslen != 0) {
4945 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
4946 req->direct_scan[0].len = ic->ic_des_esslen;
4947 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
4948 ic->ic_des_esslen);
4949 }
4950
4951 req->n_channels = iwm_lmac_scan_fill_channels(sc,
4952 (struct iwm_scan_channel_cfg_lmac *)req->data,
4953 ic->ic_des_esslen != 0);
4954
4955 err = iwm_fill_probe_req(sc,
4956 (struct iwm_scan_probe_req *)(req->data +
4957 (sizeof(struct iwm_scan_channel_cfg_lmac) *
4958 sc->sc_capa_n_scan_channels)));
4959 if (err) {
4960 kmem_free(req, req_len);
4961 return err;
4962 }
4963
4964 /* Specify the scan plan: We'll do one iteration. */
4965 req->schedule[0].iterations = 1;
4966 req->schedule[0].full_scan_mul = 1;
4967
4968 /* Disable EBS. */
4969 req->channel_opt[0].non_ebs_ratio = 1;
4970 req->channel_opt[1].non_ebs_ratio = 1;
4971
4972 err = iwm_send_cmd(sc, &hcmd);
4973 kmem_free(req, req_len);
4974 return err;
4975 }
4976
4977 static int
4978 iwm_config_umac_scan(struct iwm_softc *sc)
4979 {
4980 struct ieee80211com *ic = &sc->sc_ic;
4981 struct iwm_scan_config *scan_config;
4982 int err, nchan;
4983 size_t cmd_size;
4984 struct ieee80211_channel *c;
4985 struct iwm_host_cmd hcmd = {
4986 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
4987 .flags = 0,
4988 };
4989 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
4990 IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
4991 IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
4992 IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
4993 IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
4994 IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
4995 IWM_SCAN_CONFIG_RATE_54M);
4996
4997 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
4998
4999 scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5000 if (scan_config == NULL)
5001 return ENOMEM;
5002
5003 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5004 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5005 scan_config->legacy_rates = htole32(rates |
5006 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5007
5008 /* These timings correspond to iwlwifi's UNASSOC scan. */
5009 scan_config->dwell_active = 10;
5010 scan_config->dwell_passive = 110;
5011 scan_config->dwell_fragmented = 44;
5012 scan_config->dwell_extended = 90;
5013 scan_config->out_of_channel_time = htole32(0);
5014 scan_config->suspend_time = htole32(0);
5015
5016 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5017
5018 scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5019 scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5020 IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5021 IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5022
5023 for (c = &ic->ic_channels[1], nchan = 0;
5024 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5025 nchan < sc->sc_capa_n_scan_channels; c++) {
5026 if (c->ic_flags == 0)
5027 continue;
5028 scan_config->channel_array[nchan++] =
5029 ieee80211_mhz2ieee(c->ic_freq, 0);
5030 }
5031
5032 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5033 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5034 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5035 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5036 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5037 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5038 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5039 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5040 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5041 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5042 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5043
5044 hcmd.data[0] = scan_config;
5045 hcmd.len[0] = cmd_size;
5046
5047 err = iwm_send_cmd(sc, &hcmd);
5048 kmem_free(scan_config, cmd_size);
5049 return err;
5050 }
5051
5052 static int
5053 iwm_umac_scan(struct iwm_softc *sc)
5054 {
5055 struct ieee80211com *ic = &sc->sc_ic;
5056 struct iwm_host_cmd hcmd = {
5057 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5058 .len = { 0, },
5059 .data = { NULL, },
5060 .flags = 0,
5061 };
5062 struct iwm_scan_req_umac *req;
5063 struct iwm_scan_req_umac_tail *tail;
5064 size_t req_len;
5065 int err;
5066
5067 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5068
5069 req_len = sizeof(struct iwm_scan_req_umac) +
5070 (sizeof(struct iwm_scan_channel_cfg_umac) *
5071 sc->sc_capa_n_scan_channels) +
5072 sizeof(struct iwm_scan_req_umac_tail);
5073 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5074 return ENOMEM;
5075 req = kmem_zalloc(req_len, KM_SLEEP);
5076 if (req == NULL)
5077 return ENOMEM;
5078
5079 hcmd.len[0] = (uint16_t)req_len;
5080 hcmd.data[0] = (void *)req;
5081
5082 /* These timings correspond to iwlwifi's UNASSOC scan. */
5083 req->active_dwell = 10;
5084 req->passive_dwell = 110;
5085 req->fragmented_dwell = 44;
5086 req->extended_dwell = 90;
5087 req->max_out_time = 0;
5088 req->suspend_time = 0;
5089
5090 req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5091 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5092
5093 req->n_channels = iwm_umac_scan_fill_channels(sc,
5094 (struct iwm_scan_channel_cfg_umac *)req->data,
5095 ic->ic_des_esslen != 0);
5096
5097 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5098 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5099 IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5100
5101 tail = (struct iwm_scan_req_umac_tail *)(req->data +
5102 sizeof(struct iwm_scan_channel_cfg_umac) *
5103 sc->sc_capa_n_scan_channels);
5104
5105 /* Check if we're doing an active directed scan. */
5106 if (ic->ic_des_esslen != 0) {
5107 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5108 tail->direct_scan[0].len = ic->ic_des_esslen;
5109 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5110 ic->ic_des_esslen);
5111 req->general_flags |=
5112 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5113 } else
5114 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5115
5116 if (isset(sc->sc_enabled_capa,
5117 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5118 req->general_flags |=
5119 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5120
5121 err = iwm_fill_probe_req(sc, &tail->preq);
5122 if (err) {
5123 kmem_free(req, req_len);
5124 return err;
5125 }
5126
5127 /* Specify the scan plan: We'll do one iteration. */
5128 tail->schedule[0].interval = 0;
5129 tail->schedule[0].iter_count = 1;
5130
5131 err = iwm_send_cmd(sc, &hcmd);
5132 kmem_free(req, req_len);
5133 return err;
5134 }
5135
5136 static uint8_t
5137 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5138 {
5139 int i;
5140 uint8_t rval;
5141
5142 for (i = 0; i < rs->rs_nrates; i++) {
5143 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5144 if (rval == iwm_rates[ridx].rate)
5145 return rs->rs_rates[i];
5146 }
5147 return 0;
5148 }
5149
5150 static void
5151 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5152 int *ofdm_rates)
5153 {
5154 struct ieee80211_node *ni = &in->in_ni;
5155 struct ieee80211_rateset *rs = &ni->ni_rates;
5156 int lowest_present_ofdm = 100;
5157 int lowest_present_cck = 100;
5158 uint8_t cck = 0;
5159 uint8_t ofdm = 0;
5160 int i;
5161
5162 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5163 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5164 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5165 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5166 continue;
5167 cck |= (1 << i);
5168 if (lowest_present_cck > i)
5169 lowest_present_cck = i;
5170 }
5171 }
5172 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5173 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5174 continue;
5175 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5176 if (lowest_present_ofdm > i)
5177 lowest_present_ofdm = i;
5178 }
5179
5180 /*
5181 * Now we've got the basic rates as bitmaps in the ofdm and cck
5182 * variables. This isn't sufficient though, as there might not
5183 * be all the right rates in the bitmap. E.g. if the only basic
5184 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5185 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5186 *
5187 * [...] a STA responding to a received frame shall transmit
5188 * its Control Response frame [...] at the highest rate in the
5189 * BSSBasicRateSet parameter that is less than or equal to the
5190 * rate of the immediately previous frame in the frame exchange
5191 * sequence ([...]) and that is of the same modulation class
5192 * ([...]) as the received frame. If no rate contained in the
5193 * BSSBasicRateSet parameter meets these conditions, then the
5194 * control frame sent in response to a received frame shall be
5195 * transmitted at the highest mandatory rate of the PHY that is
5196 * less than or equal to the rate of the received frame, and
5197 * that is of the same modulation class as the received frame.
5198 *
5199 * As a consequence, we need to add all mandatory rates that are
5200 * lower than all of the basic rates to these bitmaps.
5201 */
5202
5203 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5204 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5205 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5206 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5207 /* 6M already there or needed so always add */
5208 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5209
5210 /*
5211 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5212 * Note, however:
5213 * - if no CCK rates are basic, it must be ERP since there must
5214 * be some basic rates at all, so they're OFDM => ERP PHY
5215 * (or we're in 5 GHz, and the cck bitmap will never be used)
5216 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5217 * - if 5.5M is basic, 1M and 2M are mandatory
5218 * - if 2M is basic, 1M is mandatory
5219 * - if 1M is basic, that's the only valid ACK rate.
5220 * As a consequence, it's not as complicated as it sounds, just add
5221 * any lower rates to the ACK rate bitmap.
5222 */
5223 if (IWM_RATE_11M_INDEX < lowest_present_cck)
5224 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5225 if (IWM_RATE_5M_INDEX < lowest_present_cck)
5226 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5227 if (IWM_RATE_2M_INDEX < lowest_present_cck)
5228 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5229 /* 1M already there or needed so always add */
5230 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5231
5232 *cck_rates = cck;
5233 *ofdm_rates = ofdm;
5234 }
5235
5236 static void
5237 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5238 struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5239 {
5240 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5241 struct ieee80211com *ic = &sc->sc_ic;
5242 struct ieee80211_node *ni = ic->ic_bss;
5243 int cck_ack_rates, ofdm_ack_rates;
5244 int i;
5245
5246 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5247 in->in_color));
5248 cmd->action = htole32(action);
5249
5250 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5251 cmd->tsf_id = htole32(IWM_TSF_ID_A);
5252
5253 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5254 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5255
5256 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5257 cmd->cck_rates = htole32(cck_ack_rates);
5258 cmd->ofdm_rates = htole32(ofdm_ack_rates);
5259
5260 cmd->cck_short_preamble
5261 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5262 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5263 cmd->short_slot
5264 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5265 ? IWM_MAC_FLG_SHORT_SLOT : 0);
5266
5267 for (i = 0; i < WME_NUM_AC; i++) {
5268 struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5269 int txf = iwm_ac_to_tx_fifo[i];
5270
5271 cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5272 cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5273 cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5274 cmd->ac[txf].fifos_mask = (1 << txf);
5275 cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5276 }
5277 if (ni->ni_flags & IEEE80211_NODE_QOS)
5278 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5279
5280 #ifndef IEEE80211_NO_HT
5281 if (ni->ni_flags & IEEE80211_NODE_HT) {
5282 enum ieee80211_htprot htprot =
5283 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5284 switch (htprot) {
5285 case IEEE80211_HTPROT_NONE:
5286 break;
5287 case IEEE80211_HTPROT_NONMEMBER:
5288 case IEEE80211_HTPROT_NONHT_MIXED:
5289 cmd->protection_flags |=
5290 htole32(IWM_MAC_PROT_FLG_HT_PROT);
5291 case IEEE80211_HTPROT_20MHZ:
5292 cmd->protection_flags |=
5293 htole32(IWM_MAC_PROT_FLG_HT_PROT |
5294 IWM_MAC_PROT_FLG_FAT_PROT);
5295 break;
5296 default:
5297 break;
5298 }
5299
5300 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5301 }
5302 #endif
5303
5304 if (ic->ic_flags & IEEE80211_F_USEPROT)
5305 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5306
5307 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5308 #undef IWM_EXP2
5309 }
5310
5311 static void
5312 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5313 struct iwm_mac_data_sta *sta, int assoc)
5314 {
5315 struct ieee80211_node *ni = &in->in_ni;
5316 uint32_t dtim_off;
5317 uint64_t tsf;
5318
5319 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5320 tsf = le64toh(ni->ni_tstamp.tsf);
5321
5322 sta->is_assoc = htole32(assoc);
5323 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5324 sta->dtim_tsf = htole64(tsf + dtim_off);
5325 sta->bi = htole32(ni->ni_intval);
5326 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5327 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5328 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5329 sta->listen_interval = htole32(10);
5330 sta->assoc_id = htole32(ni->ni_associd);
5331 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5332 }
5333
5334 static int
5335 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5336 int assoc)
5337 {
5338 struct ieee80211_node *ni = &in->in_ni;
5339 struct iwm_mac_ctx_cmd cmd;
5340
5341 memset(&cmd, 0, sizeof(cmd));
5342
5343 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5344
5345 /* Allow beacons to pass through as long as we are not associated or we
5346 * do not have dtim period information */
5347 if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5348 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5349 else
5350 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5351
5352 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5353 }
5354
5355 #define IWM_MISSED_BEACONS_THRESHOLD 8
5356
5357 static void
5358 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5359 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5360 {
5361 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5362
5363 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5364 le32toh(mb->mac_id),
5365 le32toh(mb->consec_missed_beacons),
5366 le32toh(mb->consec_missed_beacons_since_last_rx),
5367 le32toh(mb->num_recvd_beacons),
5368 le32toh(mb->num_expected_beacons)));
5369
5370 /*
5371 * TODO: the threshold should be adjusted based on latency conditions,
5372 * and/or in case of a CS flow on one of the other AP vifs.
5373 */
5374 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5375 IWM_MISSED_BEACONS_THRESHOLD)
5376 ieee80211_beacon_miss(&sc->sc_ic);
5377 }
5378
5379 static int
5380 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5381 {
5382 struct iwm_time_quota_cmd cmd;
5383 int i, idx, num_active_macs, quota, quota_rem;
5384 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5385 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5386 uint16_t id;
5387
5388 memset(&cmd, 0, sizeof(cmd));
5389
5390 /* currently, PHY ID == binding ID */
5391 if (in) {
5392 id = in->in_phyctxt->id;
5393 KASSERT(id < IWM_MAX_BINDINGS);
5394 colors[id] = in->in_phyctxt->color;
5395
5396 if (1)
5397 n_ifs[id] = 1;
5398 }
5399
5400 /*
5401 * The FW's scheduling session consists of
5402 * IWM_MAX_QUOTA fragments. Divide these fragments
5403 * equally between all the bindings that require quota
5404 */
5405 num_active_macs = 0;
5406 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5407 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5408 num_active_macs += n_ifs[i];
5409 }
5410
5411 quota = 0;
5412 quota_rem = 0;
5413 if (num_active_macs) {
5414 quota = IWM_MAX_QUOTA / num_active_macs;
5415 quota_rem = IWM_MAX_QUOTA % num_active_macs;
5416 }
5417
5418 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5419 if (colors[i] < 0)
5420 continue;
5421
5422 cmd.quotas[idx].id_and_color =
5423 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5424
5425 if (n_ifs[i] <= 0) {
5426 cmd.quotas[idx].quota = htole32(0);
5427 cmd.quotas[idx].max_duration = htole32(0);
5428 } else {
5429 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5430 cmd.quotas[idx].max_duration = htole32(0);
5431 }
5432 idx++;
5433 }
5434
5435 /* Give the remainder of the session to the first binding */
5436 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5437
5438 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5439 }
5440
5441 static int
5442 iwm_auth(struct iwm_softc *sc)
5443 {
5444 struct ieee80211com *ic = &sc->sc_ic;
5445 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5446 uint32_t duration;
5447 int err;
5448
5449 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5450 if (err)
5451 return err;
5452
5453 err = iwm_allow_mcast(sc);
5454 if (err)
5455 return err;
5456
5457 sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5458 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5459 IWM_FW_CTXT_ACTION_MODIFY, 0);
5460 if (err)
5461 return err;
5462 in->in_phyctxt = &sc->sc_phyctxt[0];
5463
5464 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5465 if (err) {
5466 aprint_error_dev(sc->sc_dev,
5467 "could not add MAC context (error %d)\n", err);
5468 return err;
5469 }
5470
5471 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5472 if (err)
5473 return err;
5474
5475 err = iwm_add_sta_cmd(sc, in, 0);
5476 if (err)
5477 return err;
5478
5479 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5480 if (err) {
5481 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5482 return err;
5483 }
5484
5485 /*
5486 * Prevent the FW from wandering off channel during association
5487 * by "protecting" the session with a time event.
5488 */
5489 if (in->in_ni.ni_intval)
5490 duration = in->in_ni.ni_intval * 2;
5491 else
5492 duration = IEEE80211_DUR_TU;
5493 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5494 DELAY(100);
5495
5496 return 0;
5497 }
5498
5499 static int
5500 iwm_assoc(struct iwm_softc *sc)
5501 {
5502 struct ieee80211com *ic = &sc->sc_ic;
5503 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5504 int err;
5505
5506 err = iwm_add_sta_cmd(sc, in, 1);
5507 if (err)
5508 return err;
5509
5510 return 0;
5511 }
5512
5513 static struct ieee80211_node *
5514 iwm_node_alloc(struct ieee80211_node_table *nt)
5515 {
5516 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5517 }
5518
5519 static void
5520 iwm_calib_timeout(void *arg)
5521 {
5522 struct iwm_softc *sc = arg;
5523 struct ieee80211com *ic = &sc->sc_ic;
5524 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5525 #ifndef IEEE80211_NO_HT
5526 struct ieee80211_node *ni = &in->in_ni;
5527 int otxrate;
5528 #endif
5529 int s;
5530
5531 s = splnet();
5532 if ((ic->ic_fixed_rate == -1
5533 #ifndef IEEE80211_NO_HT
5534 || ic->ic_fixed_mcs == -1
5535 #endif
5536 ) &&
5537 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5538 #ifndef IEEE80211_NO_HT
5539 if (ni->ni_flags & IEEE80211_NODE_HT)
5540 otxrate = ni->ni_txmcs;
5541 else
5542 otxrate = ni->ni_txrate;
5543 #endif
5544 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5545
5546 #ifndef IEEE80211_NO_HT
5547 /*
5548 * If AMRR has chosen a new TX rate we must update
5549 * the firwmare's LQ rate table from process context.
5550 */
5551 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5552 otxrate != ni->ni_txmcs)
5553 softint_schedule(sc->setrates_task);
5554 else if (otxrate != ni->ni_txrate)
5555 softint_schedule(sc->setrates_task);
5556 #endif
5557 }
5558 splx(s);
5559
5560 callout_schedule(&sc->sc_calib_to, mstohz(500));
5561 }
5562
5563 #ifndef IEEE80211_NO_HT
5564 static void
5565 iwm_setrates_task(void *arg)
5566 {
5567 struct iwm_softc *sc = arg;
5568 struct ieee80211com *ic = &sc->sc_ic;
5569 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5570
5571 /* Update rates table based on new TX rate determined by AMRR. */
5572 iwm_setrates(in);
5573 }
5574
5575 static int
5576 iwm_setrates(struct iwm_node *in)
5577 {
5578 struct ieee80211_node *ni = &in->in_ni;
5579 struct ieee80211com *ic = ni->ni_ic;
5580 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5581 struct iwm_lq_cmd *lq = &in->in_lq;
5582 struct ieee80211_rateset *rs = &ni->ni_rates;
5583 int i, j, ridx, ridx_min, tab = 0;
5584 #ifndef IEEE80211_NO_HT
5585 int sgi_ok;
5586 #endif
5587 struct iwm_host_cmd cmd = {
5588 .id = IWM_LQ_CMD,
5589 .len = { sizeof(in->in_lq), },
5590 };
5591
5592 memset(lq, 0, sizeof(*lq));
5593 lq->sta_id = IWM_STATION_ID;
5594
5595 if (ic->ic_flags & IEEE80211_F_USEPROT)
5596 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5597
5598 #ifndef IEEE80211_NO_HT
5599 sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5600 (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5601 #endif
5602
5603
5604 /*
5605 * Fill the LQ rate selection table with legacy and/or HT rates
5606 * in descending order, i.e. with the node's current TX rate first.
5607 * In cases where throughput of an HT rate corresponds to a legacy
5608 * rate it makes no sense to add both. We rely on the fact that
5609 * iwm_rates is laid out such that equivalent HT/legacy rates share
5610 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5611 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5612 */
5613 j = 0;
5614 ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5615 IWM_RIDX_OFDM : IWM_RIDX_CCK;
5616 for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5617 if (j >= __arraycount(lq->rs_table))
5618 break;
5619 tab = 0;
5620 #ifndef IEEE80211_NO_HT
5621 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5622 iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5623 for (i = ni->ni_txmcs; i >= 0; i--) {
5624 if (isclr(ni->ni_rxmcs, i))
5625 continue;
5626 if (ridx == iwm_mcs2ridx[i]) {
5627 tab = iwm_rates[ridx].ht_plcp;
5628 tab |= IWM_RATE_MCS_HT_MSK;
5629 if (sgi_ok)
5630 tab |= IWM_RATE_MCS_SGI_MSK;
5631 break;
5632 }
5633 }
5634 }
5635 #endif
5636 if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5637 for (i = ni->ni_txrate; i >= 0; i--) {
5638 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5639 IEEE80211_RATE_VAL)) {
5640 tab = iwm_rates[ridx].plcp;
5641 break;
5642 }
5643 }
5644 }
5645
5646 if (tab == 0)
5647 continue;
5648
5649 tab |= 1 << IWM_RATE_MCS_ANT_POS;
5650 if (IWM_RIDX_IS_CCK(ridx))
5651 tab |= IWM_RATE_MCS_CCK_MSK;
5652 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5653 lq->rs_table[j++] = htole32(tab);
5654 }
5655
5656 /* Fill the rest with the lowest possible rate */
5657 i = j > 0 ? j - 1 : 0;
5658 while (j < __arraycount(lq->rs_table))
5659 lq->rs_table[j++] = lq->rs_table[i];
5660
5661 lq->single_stream_ant_msk = IWM_ANT_A;
5662 lq->dual_stream_ant_msk = IWM_ANT_AB;
5663
5664 lq->agg_time_limit = htole16(4000); /* 4ms */
5665 lq->agg_disable_start_th = 3;
5666 #ifdef notyet
5667 lq->agg_frame_cnt_limit = 0x3f;
5668 #else
5669 lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5670 #endif
5671
5672 cmd.data[0] = &in->in_lq;
5673 return iwm_send_cmd(sc, &cmd);
5674 }
5675 #endif
5676
5677 static int
5678 iwm_media_change(struct ifnet *ifp)
5679 {
5680 struct iwm_softc *sc = ifp->if_softc;
5681 struct ieee80211com *ic = &sc->sc_ic;
5682 uint8_t rate, ridx;
5683 int err;
5684
5685 err = ieee80211_media_change(ifp);
5686 if (err != ENETRESET)
5687 return err;
5688
5689 #ifndef IEEE80211_NO_HT
5690 if (ic->ic_fixed_mcs != -1)
5691 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5692 else
5693 #endif
5694 if (ic->ic_fixed_rate != -1) {
5695 rate = ic->ic_sup_rates[ic->ic_curmode].
5696 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5697 /* Map 802.11 rate to HW rate index. */
5698 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5699 if (iwm_rates[ridx].rate == rate)
5700 break;
5701 sc->sc_fixed_ridx = ridx;
5702 }
5703
5704 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5705 (IFF_UP | IFF_RUNNING)) {
5706 iwm_stop(ifp, 0);
5707 err = iwm_init(ifp);
5708 }
5709 return err;
5710 }
5711
5712 static void
5713 iwm_newstate_cb(struct work *wk, void *v)
5714 {
5715 struct iwm_softc *sc = v;
5716 struct ieee80211com *ic = &sc->sc_ic;
5717 struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
5718 enum ieee80211_state nstate = iwmns->ns_nstate;
5719 enum ieee80211_state ostate = ic->ic_state;
5720 int generation = iwmns->ns_generation;
5721 struct iwm_node *in;
5722 int arg = iwmns->ns_arg;
5723 int err;
5724
5725 kmem_free(iwmns, sizeof(*iwmns));
5726
5727 DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
5728 if (sc->sc_generation != generation) {
5729 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5730 if (nstate == IEEE80211_S_INIT) {
5731 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5732 sc->sc_newstate(ic, nstate, arg);
5733 }
5734 return;
5735 }
5736
5737 DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
5738 ieee80211_state_name[nstate]));
5739
5740 if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5741 iwm_led_blink_stop(sc);
5742
5743 if (ostate == IEEE80211_S_RUN && nstate != ostate)
5744 iwm_disable_beacon_filter(sc);
5745
5746 /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5747 /* XXX Is there a way to switch states without a full reset? */
5748 if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5749 iwm_stop_device(sc);
5750 iwm_init_hw(sc);
5751
5752 /*
5753 * Upon receiving a deauth frame from AP the net80211 stack
5754 * puts the driver into AUTH state. This will fail with this
5755 * driver so bring the FSM from RUN to SCAN in this case.
5756 */
5757 if (nstate == IEEE80211_S_SCAN ||
5758 nstate == IEEE80211_S_AUTH ||
5759 nstate == IEEE80211_S_ASSOC) {
5760 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5761 /* Always pass arg as -1 since we can't Tx right now. */
5762 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5763 DPRINTF(("Going INIT->SCAN\n"));
5764 nstate = IEEE80211_S_SCAN;
5765 }
5766 }
5767
5768 switch (nstate) {
5769 case IEEE80211_S_INIT:
5770 break;
5771
5772 case IEEE80211_S_SCAN:
5773 if (ostate == nstate &&
5774 ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
5775 return;
5776 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5777 err = iwm_umac_scan(sc);
5778 else
5779 err = iwm_lmac_scan(sc);
5780 if (err) {
5781 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5782 return;
5783 }
5784 SET(sc->sc_flags, IWM_FLAG_SCANNING);
5785 ic->ic_state = nstate;
5786 iwm_led_blink_start(sc);
5787 return;
5788
5789 case IEEE80211_S_AUTH:
5790 err = iwm_auth(sc);
5791 if (err) {
5792 DPRINTF(("%s: could not move to auth state: %d\n",
5793 DEVNAME(sc), err));
5794 return;
5795 }
5796 break;
5797
5798 case IEEE80211_S_ASSOC:
5799 err = iwm_assoc(sc);
5800 if (err) {
5801 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5802 err));
5803 return;
5804 }
5805 break;
5806
5807 case IEEE80211_S_RUN:
5808 in = (struct iwm_node *)ic->ic_bss;
5809
5810 /* We have now been assigned an associd by the AP. */
5811 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5812 if (err) {
5813 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5814 return;
5815 }
5816
5817 err = iwm_power_update_device(sc);
5818 if (err) {
5819 aprint_error_dev(sc->sc_dev,
5820 "could send power command (error %d)\n", err);
5821 return;
5822 }
5823 #ifdef notyet
5824 /*
5825 * Disabled for now. Default beacon filter settings
5826 * prevent net80211 from getting ERP and HT protection
5827 * updates from beacons.
5828 */
5829 err = iwm_enable_beacon_filter(sc, in);
5830 if (err) {
5831 aprint_error_dev(sc->sc_dev,
5832 "could not enable beacon filter\n");
5833 return;
5834 }
5835 #endif
5836 err = iwm_power_mac_update_mode(sc, in);
5837 if (err) {
5838 aprint_error_dev(sc->sc_dev,
5839 "could not update MAC power (error %d)\n", err);
5840 return;
5841 }
5842
5843 err = iwm_update_quotas(sc, in);
5844 if (err) {
5845 aprint_error_dev(sc->sc_dev,
5846 "could not update quotas (error %d)\n", err);
5847 return;
5848 }
5849
5850 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5851
5852 /* Start at lowest available bit-rate, AMRR will raise. */
5853 in->in_ni.ni_txrate = 0;
5854 #ifndef IEEE80211_NO_HT
5855 in->in_ni.ni_txmcs = 0;
5856 iwm_setrates(in);
5857 #endif
5858
5859 callout_schedule(&sc->sc_calib_to, mstohz(500));
5860 iwm_led_enable(sc);
5861 break;
5862
5863 default:
5864 break;
5865 }
5866
5867 sc->sc_newstate(ic, nstate, arg);
5868 }
5869
5870 static int
5871 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5872 {
5873 struct iwm_newstate_state *iwmns;
5874 struct ifnet *ifp = IC2IFP(ic);
5875 struct iwm_softc *sc = ifp->if_softc;
5876
5877 callout_stop(&sc->sc_calib_to);
5878
5879 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5880 if (!iwmns) {
5881 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5882 return ENOMEM;
5883 }
5884
5885 iwmns->ns_nstate = nstate;
5886 iwmns->ns_arg = arg;
5887 iwmns->ns_generation = sc->sc_generation;
5888
5889 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5890
5891 return 0;
5892 }
5893
5894 static void
5895 iwm_endscan_cb(struct work *work __unused, void *arg)
5896 {
5897 struct iwm_softc *sc = arg;
5898 struct ieee80211com *ic = &sc->sc_ic;
5899
5900 DPRINTF(("scan ended\n"));
5901
5902 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
5903 ieee80211_end_scan(ic);
5904 }
5905
5906 /*
5907 * Aging and idle timeouts for the different possible scenarios
5908 * in default configuration
5909 */
5910 static const uint32_t
5911 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5912 {
5913 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
5914 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
5915 },
5916 {
5917 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
5918 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
5919 },
5920 {
5921 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
5922 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
5923 },
5924 {
5925 htole32(IWM_SF_BA_AGING_TIMER_DEF),
5926 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
5927 },
5928 {
5929 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
5930 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
5931 },
5932 };
5933
5934 /*
5935 * Aging and idle timeouts for the different possible scenarios
5936 * in single BSS MAC configuration.
5937 */
5938 static const uint32_t
5939 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
5940 {
5941 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
5942 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
5943 },
5944 {
5945 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
5946 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
5947 },
5948 {
5949 htole32(IWM_SF_MCAST_AGING_TIMER),
5950 htole32(IWM_SF_MCAST_IDLE_TIMER)
5951 },
5952 {
5953 htole32(IWM_SF_BA_AGING_TIMER),
5954 htole32(IWM_SF_BA_IDLE_TIMER)
5955 },
5956 {
5957 htole32(IWM_SF_TX_RE_AGING_TIMER),
5958 htole32(IWM_SF_TX_RE_IDLE_TIMER)
5959 },
5960 };
5961
5962 static void
5963 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
5964 struct ieee80211_node *ni)
5965 {
5966 int i, j, watermark;
5967
5968 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
5969
5970 /*
5971 * If we are in association flow - check antenna configuration
5972 * capabilities of the AP station, and choose the watermark accordingly.
5973 */
5974 if (ni) {
5975 #ifndef IEEE80211_NO_HT
5976 if (ni->ni_flags & IEEE80211_NODE_HT) {
5977 #ifdef notyet
5978 if (ni->ni_rxmcs[2] != 0)
5979 watermark = IWM_SF_W_MARK_MIMO3;
5980 else if (ni->ni_rxmcs[1] != 0)
5981 watermark = IWM_SF_W_MARK_MIMO2;
5982 else
5983 #endif
5984 watermark = IWM_SF_W_MARK_SISO;
5985 } else
5986 #endif
5987 watermark = IWM_SF_W_MARK_LEGACY;
5988 /* default watermark value for unassociated mode. */
5989 } else {
5990 watermark = IWM_SF_W_MARK_MIMO2;
5991 }
5992 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
5993
5994 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
5995 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
5996 sf_cmd->long_delay_timeouts[i][j] =
5997 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
5998 }
5999 }
6000
6001 if (ni) {
6002 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6003 sizeof(iwm_sf_full_timeout));
6004 } else {
6005 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6006 sizeof(iwm_sf_full_timeout_def));
6007 }
6008 }
6009
6010 static int
6011 iwm_sf_config(struct iwm_softc *sc, int new_state)
6012 {
6013 struct ieee80211com *ic = &sc->sc_ic;
6014 struct iwm_sf_cfg_cmd sf_cmd = {
6015 .state = htole32(IWM_SF_FULL_ON),
6016 };
6017
6018 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6019 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6020
6021 switch (new_state) {
6022 case IWM_SF_UNINIT:
6023 case IWM_SF_INIT_OFF:
6024 iwm_fill_sf_command(sc, &sf_cmd, NULL);
6025 break;
6026 case IWM_SF_FULL_ON:
6027 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6028 break;
6029 default:
6030 return EINVAL;
6031 }
6032
6033 return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6034 sizeof(sf_cmd), &sf_cmd);
6035 }
6036
6037 static int
6038 iwm_send_bt_init_conf(struct iwm_softc *sc)
6039 {
6040 struct iwm_bt_coex_cmd bt_cmd;
6041
6042 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6043 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6044
6045 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6046 }
6047
6048 static int
6049 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6050 {
6051 struct iwm_mcc_update_cmd mcc_cmd;
6052 struct iwm_host_cmd hcmd = {
6053 .id = IWM_MCC_UPDATE_CMD,
6054 .flags = IWM_CMD_WANT_SKB,
6055 .data = { &mcc_cmd },
6056 };
6057 int resp_v2 = isset(sc->sc_enabled_capa,
6058 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6059 int err;
6060
6061 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6062 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6063 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6064 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6065 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6066 else
6067 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6068
6069 if (resp_v2)
6070 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6071 else
6072 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6073
6074 err = iwm_send_cmd(sc, &hcmd);
6075 if (err)
6076 return err;
6077
6078 iwm_free_resp(sc, &hcmd);
6079
6080 return 0;
6081 }
6082
6083 static void
6084 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6085 {
6086 struct iwm_host_cmd cmd = {
6087 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6088 .len = { sizeof(uint32_t), },
6089 .data = { &backoff, },
6090 };
6091
6092 iwm_send_cmd(sc, &cmd);
6093 }
6094
6095 static int
6096 iwm_init_hw(struct iwm_softc *sc)
6097 {
6098 struct ieee80211com *ic = &sc->sc_ic;
6099 int err, i, ac;
6100
6101 err = iwm_preinit(sc);
6102 if (err)
6103 return err;
6104
6105 err = iwm_start_hw(sc);
6106 if (err) {
6107 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6108 return err;
6109 }
6110
6111 err = iwm_run_init_mvm_ucode(sc, 0);
6112 if (err)
6113 return err;
6114
6115 /* Should stop and start HW since INIT image just loaded. */
6116 iwm_stop_device(sc);
6117 err = iwm_start_hw(sc);
6118 if (err) {
6119 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6120 return err;
6121 }
6122
6123 /* Restart, this time with the regular firmware */
6124 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6125 if (err) {
6126 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
6127 goto err;
6128 }
6129
6130 err = iwm_send_bt_init_conf(sc);
6131 if (err) {
6132 aprint_error_dev(sc->sc_dev,
6133 "could not init bt coex (error %d)\n", err);
6134 goto err;
6135 }
6136
6137 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6138 if (err) {
6139 aprint_error_dev(sc->sc_dev,
6140 "could not init tx ant config (error %d)\n", err);
6141 goto err;
6142 }
6143
6144 /* Send phy db control command and then phy db calibration*/
6145 err = iwm_send_phy_db_data(sc);
6146 if (err) {
6147 aprint_error_dev(sc->sc_dev,
6148 "could not init phy db (error %d)\n", err);
6149 goto err;
6150 }
6151
6152 err = iwm_send_phy_cfg_cmd(sc);
6153 if (err) {
6154 aprint_error_dev(sc->sc_dev,
6155 "could not send phy config (error %d)\n", err);
6156 goto err;
6157 }
6158
6159 /* Add auxiliary station for scanning */
6160 err = iwm_add_aux_sta(sc);
6161 if (err) {
6162 aprint_error_dev(sc->sc_dev,
6163 "could not add aux station (error %d)\n", err);
6164 goto err;
6165 }
6166
6167 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6168 /*
6169 * The channel used here isn't relevant as it's
6170 * going to be overwritten in the other flows.
6171 * For now use the first channel we have.
6172 */
6173 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6174 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6175 IWM_FW_CTXT_ACTION_ADD, 0);
6176 if (err) {
6177 aprint_error_dev(sc->sc_dev,
6178 "could not add phy context %d (error %d)\n",
6179 i, err);
6180 goto err;
6181 }
6182 }
6183
6184 /* Initialize tx backoffs to the minimum. */
6185 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6186 iwm_tt_tx_backoff(sc, 0);
6187
6188 err = iwm_power_update_device(sc);
6189 if (err) {
6190 aprint_error_dev(sc->sc_dev,
6191 "could send power command (error %d)\n", err);
6192 goto err;
6193 }
6194
6195 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
6196 err = iwm_send_update_mcc_cmd(sc, "ZZ");
6197 if (err) {
6198 aprint_error_dev(sc->sc_dev,
6199 "could not init LAR (error %d)\n", err);
6200 goto err;
6201 }
6202 }
6203
6204 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6205 err = iwm_config_umac_scan(sc);
6206 if (err) {
6207 aprint_error_dev(sc->sc_dev,
6208 "could not configure scan (error %d)\n", err);
6209 goto err;
6210 }
6211 }
6212
6213 for (ac = 0; ac < WME_NUM_AC; ac++) {
6214 err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6215 iwm_ac_to_tx_fifo[ac]);
6216 if (err) {
6217 aprint_error_dev(sc->sc_dev,
6218 "could not enable Tx queue %d (error %d)\n",
6219 i, err);
6220 goto err;
6221 }
6222 }
6223
6224 err = iwm_disable_beacon_filter(sc);
6225 if (err) {
6226 aprint_error_dev(sc->sc_dev,
6227 "could not disable beacon filter (error %d)\n", err);
6228 goto err;
6229 }
6230
6231 return 0;
6232
6233 err:
6234 iwm_stop_device(sc);
6235 return err;
6236 }
6237
6238 /* Allow multicast from our BSSID. */
6239 static int
6240 iwm_allow_mcast(struct iwm_softc *sc)
6241 {
6242 struct ieee80211com *ic = &sc->sc_ic;
6243 struct ieee80211_node *ni = ic->ic_bss;
6244 struct iwm_mcast_filter_cmd *cmd;
6245 size_t size;
6246 int err;
6247
6248 size = roundup(sizeof(*cmd), 4);
6249 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6250 if (cmd == NULL)
6251 return ENOMEM;
6252 cmd->filter_own = 1;
6253 cmd->port_id = 0;
6254 cmd->count = 0;
6255 cmd->pass_all = 1;
6256 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6257
6258 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6259 kmem_intr_free(cmd, size);
6260 return err;
6261 }
6262
6263 static int
6264 iwm_init(struct ifnet *ifp)
6265 {
6266 struct iwm_softc *sc = ifp->if_softc;
6267 int err;
6268
6269 if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6270 return 0;
6271
6272 sc->sc_generation++;
6273 sc->sc_flags &= ~IWM_FLAG_STOPPED;
6274
6275 err = iwm_init_hw(sc);
6276 if (err) {
6277 iwm_stop(ifp, 1);
6278 return err;
6279 }
6280
6281 ifp->if_flags &= ~IFF_OACTIVE;
6282 ifp->if_flags |= IFF_RUNNING;
6283
6284 ieee80211_begin_scan(&sc->sc_ic, 0);
6285 SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6286
6287 return 0;
6288 }
6289
6290 static void
6291 iwm_start(struct ifnet *ifp)
6292 {
6293 struct iwm_softc *sc = ifp->if_softc;
6294 struct ieee80211com *ic = &sc->sc_ic;
6295 struct ieee80211_node *ni;
6296 struct ether_header *eh;
6297 struct mbuf *m;
6298 int ac;
6299
6300 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6301 return;
6302
6303 for (;;) {
6304 /* why isn't this done per-queue? */
6305 if (sc->qfullmsk != 0) {
6306 ifp->if_flags |= IFF_OACTIVE;
6307 break;
6308 }
6309
6310 /* need to send management frames even if we're not RUNning */
6311 IF_DEQUEUE(&ic->ic_mgtq, m);
6312 if (m) {
6313 ni = M_GETCTX(m, struct ieee80211_node *);
6314 ac = WME_AC_BE;
6315 goto sendit;
6316 }
6317 if (ic->ic_state != IEEE80211_S_RUN) {
6318 break;
6319 }
6320
6321 IFQ_DEQUEUE(&ifp->if_snd, m);
6322 if (!m)
6323 break;
6324 if (m->m_len < sizeof (*eh) &&
6325 (m = m_pullup(m, sizeof (*eh))) == NULL) {
6326 ifp->if_oerrors++;
6327 continue;
6328 }
6329 if (ifp->if_bpf != NULL)
6330 bpf_mtap(ifp, m);
6331
6332 eh = mtod(m, struct ether_header *);
6333 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6334 if (ni == NULL) {
6335 m_freem(m);
6336 ifp->if_oerrors++;
6337 continue;
6338 }
6339 /* classify mbuf so we can find which tx ring to use */
6340 if (ieee80211_classify(ic, m, ni) != 0) {
6341 m_freem(m);
6342 ieee80211_free_node(ni);
6343 ifp->if_oerrors++;
6344 continue;
6345 }
6346
6347 /* No QoS encapsulation for EAPOL frames. */
6348 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6349 M_WME_GETAC(m) : WME_AC_BE;
6350
6351 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6352 ieee80211_free_node(ni);
6353 ifp->if_oerrors++;
6354 continue;
6355 }
6356
6357 sendit:
6358 if (ic->ic_rawbpf != NULL)
6359 bpf_mtap3(ic->ic_rawbpf, m);
6360 if (iwm_tx(sc, m, ni, ac) != 0) {
6361 ieee80211_free_node(ni);
6362 ifp->if_oerrors++;
6363 continue;
6364 }
6365
6366 if (ifp->if_flags & IFF_UP) {
6367 sc->sc_tx_timer = 15;
6368 ifp->if_timer = 1;
6369 }
6370 }
6371
6372 return;
6373 }
6374
6375 static void
6376 iwm_stop(struct ifnet *ifp, int disable)
6377 {
6378 struct iwm_softc *sc = ifp->if_softc;
6379 struct ieee80211com *ic = &sc->sc_ic;
6380 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6381
6382 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6383 sc->sc_flags |= IWM_FLAG_STOPPED;
6384 sc->sc_generation++;
6385 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6386
6387 if (in)
6388 in->in_phyctxt = NULL;
6389
6390 if (ic->ic_state != IEEE80211_S_INIT)
6391 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6392
6393 callout_stop(&sc->sc_calib_to);
6394 iwm_led_blink_stop(sc);
6395 ifp->if_timer = sc->sc_tx_timer = 0;
6396 iwm_stop_device(sc);
6397 }
6398
6399 static void
6400 iwm_watchdog(struct ifnet *ifp)
6401 {
6402 struct iwm_softc *sc = ifp->if_softc;
6403
6404 ifp->if_timer = 0;
6405 if (sc->sc_tx_timer > 0) {
6406 if (--sc->sc_tx_timer == 0) {
6407 aprint_error_dev(sc->sc_dev, "device timeout\n");
6408 #ifdef IWM_DEBUG
6409 iwm_nic_error(sc);
6410 #endif
6411 ifp->if_flags &= ~IFF_UP;
6412 iwm_stop(ifp, 1);
6413 ifp->if_oerrors++;
6414 return;
6415 }
6416 ifp->if_timer = 1;
6417 }
6418
6419 ieee80211_watchdog(&sc->sc_ic);
6420 }
6421
6422 static int
6423 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6424 {
6425 struct iwm_softc *sc = ifp->if_softc;
6426 struct ieee80211com *ic = &sc->sc_ic;
6427 const struct sockaddr *sa;
6428 int s, err = 0;
6429
6430 s = splnet();
6431
6432 switch (cmd) {
6433 case SIOCSIFADDR:
6434 ifp->if_flags |= IFF_UP;
6435 /* FALLTHROUGH */
6436 case SIOCSIFFLAGS:
6437 err = ifioctl_common(ifp, cmd, data);
6438 if (err)
6439 break;
6440 if (ifp->if_flags & IFF_UP) {
6441 if (!(ifp->if_flags & IFF_RUNNING)) {
6442 err = iwm_init(ifp);
6443 if (err)
6444 ifp->if_flags &= ~IFF_UP;
6445 }
6446 } else {
6447 if (ifp->if_flags & IFF_RUNNING)
6448 iwm_stop(ifp, 1);
6449 }
6450 break;
6451
6452 case SIOCADDMULTI:
6453 case SIOCDELMULTI:
6454 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6455 err = ENXIO;
6456 break;
6457 }
6458 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6459 err = (cmd == SIOCADDMULTI) ?
6460 ether_addmulti(sa, &sc->sc_ec) :
6461 ether_delmulti(sa, &sc->sc_ec);
6462 if (err == ENETRESET)
6463 err = 0;
6464 break;
6465
6466 default:
6467 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6468 err = ether_ioctl(ifp, cmd, data);
6469 break;
6470 }
6471 err = ieee80211_ioctl(ic, cmd, data);
6472 break;
6473 }
6474
6475 if (err == ENETRESET) {
6476 err = 0;
6477 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6478 (IFF_UP | IFF_RUNNING)) {
6479 iwm_stop(ifp, 0);
6480 err = iwm_init(ifp);
6481 }
6482 }
6483
6484 splx(s);
6485 return err;
6486 }
6487
6488 /*
6489 * Note: This structure is read from the device with IO accesses,
6490 * and the reading already does the endian conversion. As it is
6491 * read with uint32_t-sized accesses, any members with a different size
6492 * need to be ordered correctly though!
6493 */
6494 struct iwm_error_event_table {
6495 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6496 uint32_t error_id; /* type of error */
6497 uint32_t trm_hw_status0; /* TRM HW status */
6498 uint32_t trm_hw_status1; /* TRM HW status */
6499 uint32_t blink2; /* branch link */
6500 uint32_t ilink1; /* interrupt link */
6501 uint32_t ilink2; /* interrupt link */
6502 uint32_t data1; /* error-specific data */
6503 uint32_t data2; /* error-specific data */
6504 uint32_t data3; /* error-specific data */
6505 uint32_t bcon_time; /* beacon timer */
6506 uint32_t tsf_low; /* network timestamp function timer */
6507 uint32_t tsf_hi; /* network timestamp function timer */
6508 uint32_t gp1; /* GP1 timer register */
6509 uint32_t gp2; /* GP2 timer register */
6510 uint32_t fw_rev_type; /* firmware revision type */
6511 uint32_t major; /* uCode version major */
6512 uint32_t minor; /* uCode version minor */
6513 uint32_t hw_ver; /* HW Silicon version */
6514 uint32_t brd_ver; /* HW board version */
6515 uint32_t log_pc; /* log program counter */
6516 uint32_t frame_ptr; /* frame pointer */
6517 uint32_t stack_ptr; /* stack pointer */
6518 uint32_t hcmd; /* last host command header */
6519 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
6520 * rxtx_flag */
6521 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
6522 * host_flag */
6523 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
6524 * enc_flag */
6525 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
6526 * time_flag */
6527 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
6528 * wico interrupt */
6529 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
6530 uint32_t wait_event; /* wait event() caller address */
6531 uint32_t l2p_control; /* L2pControlField */
6532 uint32_t l2p_duration; /* L2pDurationField */
6533 uint32_t l2p_mhvalid; /* L2pMhValidBits */
6534 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
6535 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
6536 * (LMPM_PMG_SEL) */
6537 uint32_t u_timestamp; /* indicate when the date and time of the
6538 * compilation */
6539 uint32_t flow_handler; /* FH read/write pointers, RX credit */
6540 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6541
6542 /*
6543 * UMAC error struct - relevant starting from family 8000 chip.
6544 * Note: This structure is read from the device with IO accesses,
6545 * and the reading already does the endian conversion. As it is
6546 * read with u32-sized accesses, any members with a different size
6547 * need to be ordered correctly though!
6548 */
6549 struct iwm_umac_error_event_table {
6550 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6551 uint32_t error_id; /* type of error */
6552 uint32_t blink1; /* branch link */
6553 uint32_t blink2; /* branch link */
6554 uint32_t ilink1; /* interrupt link */
6555 uint32_t ilink2; /* interrupt link */
6556 uint32_t data1; /* error-specific data */
6557 uint32_t data2; /* error-specific data */
6558 uint32_t data3; /* error-specific data */
6559 uint32_t umac_major;
6560 uint32_t umac_minor;
6561 uint32_t frame_pointer; /* core register 27 */
6562 uint32_t stack_pointer; /* core register 28 */
6563 uint32_t cmd_header; /* latest host cmd sent to UMAC */
6564 uint32_t nic_isr_pref; /* ISR status register */
6565 } __packed;
6566
6567 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6568 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6569
6570 #ifdef IWM_DEBUG
6571 static const struct {
6572 const char *name;
6573 uint8_t num;
6574 } advanced_lookup[] = {
6575 { "NMI_INTERRUPT_WDG", 0x34 },
6576 { "SYSASSERT", 0x35 },
6577 { "UCODE_VERSION_MISMATCH", 0x37 },
6578 { "BAD_COMMAND", 0x38 },
6579 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6580 { "FATAL_ERROR", 0x3D },
6581 { "NMI_TRM_HW_ERR", 0x46 },
6582 { "NMI_INTERRUPT_TRM", 0x4C },
6583 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6584 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6585 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6586 { "NMI_INTERRUPT_HOST", 0x66 },
6587 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6588 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6589 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6590 { "ADVANCED_SYSASSERT", 0 },
6591 };
6592
6593 static const char *
6594 iwm_desc_lookup(uint32_t num)
6595 {
6596 int i;
6597
6598 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6599 if (advanced_lookup[i].num == num)
6600 return advanced_lookup[i].name;
6601
6602 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6603 return advanced_lookup[i].name;
6604 }
6605
6606 /*
6607 * Support for dumping the error log seemed like a good idea ...
6608 * but it's mostly hex junk and the only sensible thing is the
6609 * hw/ucode revision (which we know anyway). Since it's here,
6610 * I'll just leave it in, just in case e.g. the Intel guys want to
6611 * help us decipher some "ADVANCED_SYSASSERT" later.
6612 */
6613 static void
6614 iwm_nic_error(struct iwm_softc *sc)
6615 {
6616 struct iwm_error_event_table t;
6617 uint32_t base;
6618
6619 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6620 base = sc->sc_uc.uc_error_event_table;
6621 if (base < 0x800000) {
6622 aprint_error_dev(sc->sc_dev,
6623 "Invalid error log pointer 0x%08x\n", base);
6624 return;
6625 }
6626
6627 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6628 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6629 return;
6630 }
6631
6632 if (!t.valid) {
6633 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6634 return;
6635 }
6636
6637 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6638 aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
6639 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6640 sc->sc_flags, t.valid);
6641 }
6642
6643 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
6644 iwm_desc_lookup(t.error_id));
6645 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
6646 t.trm_hw_status0);
6647 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
6648 t.trm_hw_status1);
6649 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
6650 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
6651 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
6652 aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
6653 aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
6654 aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
6655 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
6656 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
6657 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
6658 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
6659 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
6660 aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
6661 t.fw_rev_type);
6662 aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
6663 t.major);
6664 aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
6665 t.minor);
6666 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
6667 aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
6668 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
6669 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
6670 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
6671 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
6672 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
6673 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
6674 aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
6675 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
6676 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
6677 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
6678 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
6679 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6680 t.l2p_addr_match);
6681 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
6682 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
6683 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
6684
6685 if (sc->sc_uc.uc_umac_error_event_table)
6686 iwm_nic_umac_error(sc);
6687 }
6688
6689 static void
6690 iwm_nic_umac_error(struct iwm_softc *sc)
6691 {
6692 struct iwm_umac_error_event_table t;
6693 uint32_t base;
6694
6695 base = sc->sc_uc.uc_umac_error_event_table;
6696
6697 if (base < 0x800000) {
6698 aprint_error_dev(sc->sc_dev,
6699 "Invalid error log pointer 0x%08x\n", base);
6700 return;
6701 }
6702
6703 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6704 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6705 return;
6706 }
6707
6708 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6709 aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
6710 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6711 sc->sc_flags, t.valid);
6712 }
6713
6714 aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
6715 iwm_desc_lookup(t.error_id));
6716 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
6717 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
6718 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
6719 t.ilink1);
6720 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
6721 t.ilink2);
6722 aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
6723 aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
6724 aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
6725 aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
6726 aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
6727 aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
6728 t.frame_pointer);
6729 aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
6730 t.stack_pointer);
6731 aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
6732 aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
6733 t.nic_isr_pref);
6734 }
6735 #endif
6736
6737 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6738 do { \
6739 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6740 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6741 _var_ = (void *)((_pkt_)+1); \
6742 } while (/*CONSTCOND*/0)
6743
6744 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6745 do { \
6746 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6747 sizeof(len), BUS_DMASYNC_POSTREAD); \
6748 _ptr_ = (void *)((_pkt_)+1); \
6749 } while (/*CONSTCOND*/0)
6750
6751 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6752
6753 static void
6754 iwm_notif_intr(struct iwm_softc *sc)
6755 {
6756 uint16_t hw;
6757
6758 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6759 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6760
6761 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6762 while (sc->rxq.cur != hw) {
6763 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6764 struct iwm_rx_packet *pkt;
6765 struct iwm_cmd_response *cresp;
6766 int orig_qid, qid, idx, code;
6767
6768 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6769 BUS_DMASYNC_POSTREAD);
6770 pkt = mtod(data->m, struct iwm_rx_packet *);
6771
6772 orig_qid = pkt->hdr.qid;
6773 qid = orig_qid & ~0x80;
6774 idx = pkt->hdr.idx;
6775
6776 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6777
6778 /*
6779 * randomly get these from the firmware, no idea why.
6780 * they at least seem harmless, so just ignore them for now
6781 */
6782 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6783 || pkt->len_n_flags == htole32(0x55550000))) {
6784 ADVANCE_RXQ(sc);
6785 continue;
6786 }
6787
6788 switch (code) {
6789 case IWM_REPLY_RX_PHY_CMD:
6790 iwm_rx_rx_phy_cmd(sc, pkt, data);
6791 break;
6792
6793 case IWM_REPLY_RX_MPDU_CMD:
6794 iwm_rx_rx_mpdu(sc, pkt, data);
6795 break;
6796
6797 case IWM_TX_CMD:
6798 iwm_rx_tx_cmd(sc, pkt, data);
6799 break;
6800
6801 case IWM_MISSED_BEACONS_NOTIFICATION:
6802 iwm_rx_missed_beacons_notif(sc, pkt, data);
6803 break;
6804
6805 case IWM_MFUART_LOAD_NOTIFICATION:
6806 break;
6807
6808 case IWM_ALIVE: {
6809 struct iwm_alive_resp_v1 *resp1;
6810 struct iwm_alive_resp_v2 *resp2;
6811 struct iwm_alive_resp_v3 *resp3;
6812
6813 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6814 SYNC_RESP_STRUCT(resp1, pkt);
6815 sc->sc_uc.uc_error_event_table
6816 = le32toh(resp1->error_event_table_ptr);
6817 sc->sc_uc.uc_log_event_table
6818 = le32toh(resp1->log_event_table_ptr);
6819 sc->sched_base = le32toh(resp1->scd_base_ptr);
6820 if (resp1->status == IWM_ALIVE_STATUS_OK)
6821 sc->sc_uc.uc_ok = 1;
6822 else
6823 sc->sc_uc.uc_ok = 0;
6824 }
6825 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
6826 SYNC_RESP_STRUCT(resp2, pkt);
6827 sc->sc_uc.uc_error_event_table
6828 = le32toh(resp2->error_event_table_ptr);
6829 sc->sc_uc.uc_log_event_table
6830 = le32toh(resp2->log_event_table_ptr);
6831 sc->sched_base = le32toh(resp2->scd_base_ptr);
6832 sc->sc_uc.uc_umac_error_event_table
6833 = le32toh(resp2->error_info_addr);
6834 if (resp2->status == IWM_ALIVE_STATUS_OK)
6835 sc->sc_uc.uc_ok = 1;
6836 else
6837 sc->sc_uc.uc_ok = 0;
6838 }
6839 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
6840 SYNC_RESP_STRUCT(resp3, pkt);
6841 sc->sc_uc.uc_error_event_table
6842 = le32toh(resp3->error_event_table_ptr);
6843 sc->sc_uc.uc_log_event_table
6844 = le32toh(resp3->log_event_table_ptr);
6845 sc->sched_base = le32toh(resp3->scd_base_ptr);
6846 sc->sc_uc.uc_umac_error_event_table
6847 = le32toh(resp3->error_info_addr);
6848 if (resp3->status == IWM_ALIVE_STATUS_OK)
6849 sc->sc_uc.uc_ok = 1;
6850 else
6851 sc->sc_uc.uc_ok = 0;
6852 }
6853
6854 sc->sc_uc.uc_intr = 1;
6855 wakeup(&sc->sc_uc);
6856 break;
6857 }
6858
6859 case IWM_CALIB_RES_NOTIF_PHY_DB: {
6860 struct iwm_calib_res_notif_phy_db *phy_db_notif;
6861 SYNC_RESP_STRUCT(phy_db_notif, pkt);
6862 uint16_t size = le16toh(phy_db_notif->length);
6863 bus_dmamap_sync(sc->sc_dmat, data->map,
6864 sizeof(*pkt) + sizeof(*phy_db_notif),
6865 size, BUS_DMASYNC_POSTREAD);
6866 iwm_phy_db_set_section(sc, phy_db_notif, size);
6867 break;
6868 }
6869
6870 case IWM_STATISTICS_NOTIFICATION: {
6871 struct iwm_notif_statistics *stats;
6872 SYNC_RESP_STRUCT(stats, pkt);
6873 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6874 sc->sc_noise = iwm_get_noise(&stats->rx.general);
6875 break;
6876 }
6877
6878 case IWM_NVM_ACCESS_CMD:
6879 case IWM_MCC_UPDATE_CMD:
6880 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6881 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6882 sizeof(sc->sc_cmd_resp),
6883 BUS_DMASYNC_POSTREAD);
6884 memcpy(sc->sc_cmd_resp,
6885 pkt, sizeof(sc->sc_cmd_resp));
6886 }
6887 break;
6888
6889 case IWM_MCC_CHUB_UPDATE_CMD: {
6890 struct iwm_mcc_chub_notif *notif;
6891 SYNC_RESP_STRUCT(notif, pkt);
6892
6893 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
6894 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
6895 sc->sc_fw_mcc[2] = '\0';
6896 break;
6897 }
6898
6899 case IWM_DTS_MEASUREMENT_NOTIFICATION:
6900 break;
6901
6902 case IWM_PHY_CONFIGURATION_CMD:
6903 case IWM_TX_ANT_CONFIGURATION_CMD:
6904 case IWM_ADD_STA:
6905 case IWM_MAC_CONTEXT_CMD:
6906 case IWM_REPLY_SF_CFG_CMD:
6907 case IWM_POWER_TABLE_CMD:
6908 case IWM_PHY_CONTEXT_CMD:
6909 case IWM_BINDING_CONTEXT_CMD:
6910 case IWM_TIME_EVENT_CMD:
6911 case IWM_SCAN_REQUEST_CMD:
6912 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
6913 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
6914 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
6915 case IWM_REPLY_BEACON_FILTERING_CMD:
6916 case IWM_MAC_PM_POWER_TABLE:
6917 case IWM_TIME_QUOTA_CMD:
6918 case IWM_REMOVE_STA:
6919 case IWM_TXPATH_FLUSH:
6920 case IWM_LQ_CMD:
6921 case IWM_BT_CONFIG:
6922 case IWM_REPLY_THERMAL_MNG_BACKOFF:
6923 SYNC_RESP_STRUCT(cresp, pkt);
6924 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6925 memcpy(sc->sc_cmd_resp,
6926 pkt, sizeof(*pkt) + sizeof(*cresp));
6927 }
6928 break;
6929
6930 /* ignore */
6931 case 0x6c: /* IWM_PHY_DB_CMD */
6932 break;
6933
6934 case IWM_INIT_COMPLETE_NOTIF:
6935 sc->sc_init_complete = 1;
6936 wakeup(&sc->sc_init_complete);
6937 break;
6938
6939 case IWM_SCAN_OFFLOAD_COMPLETE: {
6940 struct iwm_periodic_scan_complete *notif;
6941 SYNC_RESP_STRUCT(notif, pkt);
6942 break;
6943 }
6944
6945 case IWM_SCAN_ITERATION_COMPLETE: {
6946 struct iwm_lmac_scan_complete_notif *notif;
6947 SYNC_RESP_STRUCT(notif, pkt);
6948 workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6949 break;
6950 }
6951
6952 case IWM_SCAN_COMPLETE_UMAC: {
6953 struct iwm_umac_scan_complete *notif;
6954 SYNC_RESP_STRUCT(notif, pkt);
6955 workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6956 break;
6957 }
6958
6959 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
6960 struct iwm_umac_scan_iter_complete_notif *notif;
6961 SYNC_RESP_STRUCT(notif, pkt);
6962 workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6963 break;
6964 }
6965
6966 case IWM_REPLY_ERROR: {
6967 struct iwm_error_resp *resp;
6968 SYNC_RESP_STRUCT(resp, pkt);
6969 aprint_error_dev(sc->sc_dev,
6970 "firmware error 0x%x, cmd 0x%x\n",
6971 le32toh(resp->error_type), resp->cmd_id);
6972 break;
6973 }
6974
6975 case IWM_TIME_EVENT_NOTIFICATION: {
6976 struct iwm_time_event_notif *notif;
6977 SYNC_RESP_STRUCT(notif, pkt);
6978 break;
6979 }
6980
6981 case IWM_MCAST_FILTER_CMD:
6982 break;
6983
6984 case IWM_SCD_QUEUE_CFG: {
6985 struct iwm_scd_txq_cfg_rsp *rsp;
6986 SYNC_RESP_STRUCT(rsp, pkt);
6987 break;
6988 }
6989
6990 default:
6991 aprint_error_dev(sc->sc_dev,
6992 "unhandled firmware response 0x%x 0x%x/0x%x "
6993 "rx ring %d[%d]\n",
6994 code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
6995 break;
6996 }
6997
6998 /*
6999 * uCode sets bit 0x80 when it originates the notification,
7000 * i.e. when the notification is not a direct response to a
7001 * command sent by the driver.
7002 * For example, uCode issues IWM_REPLY_RX when it sends a
7003 * received frame to the driver.
7004 */
7005 if (!(orig_qid & (1 << 7))) {
7006 iwm_cmd_done(sc, qid, idx);
7007 }
7008
7009 ADVANCE_RXQ(sc);
7010 }
7011
7012 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
7013 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
7014
7015 /*
7016 * Seems like the hardware gets upset unless we align the write by 8??
7017 */
7018 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7019 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7020 }
7021
7022 static int
7023 iwm_intr(void *arg)
7024 {
7025 struct iwm_softc *sc = arg;
7026 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7027 int handled = 0;
7028 int r1, r2, rv = 0;
7029 int isperiodic = 0;
7030
7031 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7032
7033 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
7034 uint32_t *ict = sc->ict_dma.vaddr;
7035 int tmp;
7036
7037 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7038 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7039 tmp = htole32(ict[sc->ict_cur]);
7040 if (!tmp)
7041 goto out_ena;
7042
7043 /*
7044 * ok, there was something. keep plowing until we have all.
7045 */
7046 r1 = r2 = 0;
7047 while (tmp) {
7048 r1 |= tmp;
7049 ict[sc->ict_cur] = 0; /* Acknowledge. */
7050 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7051 &ict[sc->ict_cur] - ict, sizeof(*ict),
7052 BUS_DMASYNC_PREWRITE);
7053 sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7054 tmp = htole32(ict[sc->ict_cur]);
7055 }
7056
7057 /* this is where the fun begins. don't ask */
7058 if (r1 == 0xffffffff)
7059 r1 = 0;
7060
7061 /* i am not expected to understand this */
7062 if (r1 & 0xc0000)
7063 r1 |= 0x8000;
7064 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7065 } else {
7066 r1 = IWM_READ(sc, IWM_CSR_INT);
7067 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7068 goto out;
7069 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7070 }
7071 if (r1 == 0 && r2 == 0) {
7072 goto out_ena;
7073 }
7074
7075 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7076
7077 /* ignored */
7078 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
7079
7080 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7081 #ifdef IWM_DEBUG
7082 int i;
7083
7084 iwm_nic_error(sc);
7085
7086 /* Dump driver status (TX and RX rings) while we're here. */
7087 DPRINTF(("driver status:\n"));
7088 for (i = 0; i < IWM_MAX_QUEUES; i++) {
7089 struct iwm_tx_ring *ring = &sc->txq[i];
7090 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7091 "queued=%-3d\n",
7092 i, ring->qid, ring->cur, ring->queued));
7093 }
7094 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7095 DPRINTF((" 802.11 state %s\n",
7096 ieee80211_state_name[sc->sc_ic.ic_state]));
7097 #endif
7098
7099 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7100 ifp->if_flags &= ~IFF_UP;
7101 iwm_stop(ifp, 1);
7102 rv = 1;
7103 goto out;
7104
7105 }
7106
7107 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7108 handled |= IWM_CSR_INT_BIT_HW_ERR;
7109 aprint_error_dev(sc->sc_dev,
7110 "hardware error, stopping device\n");
7111 ifp->if_flags &= ~IFF_UP;
7112 iwm_stop(ifp, 1);
7113 rv = 1;
7114 goto out;
7115 }
7116
7117 /* firmware chunk loaded */
7118 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7119 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7120 handled |= IWM_CSR_INT_BIT_FH_TX;
7121 sc->sc_fw_chunk_done = 1;
7122 wakeup(&sc->sc_fw);
7123 }
7124
7125 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7126 handled |= IWM_CSR_INT_BIT_RF_KILL;
7127 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
7128 ifp->if_flags &= ~IFF_UP;
7129 iwm_stop(ifp, 1);
7130 }
7131 }
7132
7133 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7134 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
7135 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7136 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7137 IWM_WRITE_1(sc,
7138 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7139 isperiodic = 1;
7140 }
7141
7142 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7143 isperiodic) {
7144 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
7145 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7146
7147 iwm_notif_intr(sc);
7148
7149 /* enable periodic interrupt, see above */
7150 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7151 !isperiodic)
7152 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7153 IWM_CSR_INT_PERIODIC_ENA);
7154 }
7155
7156 rv = 1;
7157
7158 out_ena:
7159 iwm_restore_interrupts(sc);
7160 out:
7161 return rv;
7162 }
7163
7164 /*
7165 * Autoconf glue-sniffing
7166 */
7167
7168 static const pci_product_id_t iwm_devices[] = {
7169 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7170 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7171 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7172 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7173 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7174 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7175 #if 0
7176 PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7177 PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7178 PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7179 PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7180 #endif
7181 };
7182
7183 static int
7184 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7185 {
7186 struct pci_attach_args *pa = aux;
7187
7188 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7189 return 0;
7190
7191 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7192 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7193 return 1;
7194
7195 return 0;
7196 }
7197
7198 static int
7199 iwm_preinit(struct iwm_softc *sc)
7200 {
7201 struct ieee80211com *ic = &sc->sc_ic;
7202 int err;
7203
7204 if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7205 return 0;
7206
7207 err = iwm_start_hw(sc);
7208 if (err) {
7209 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7210 return err;
7211 }
7212
7213 err = iwm_run_init_mvm_ucode(sc, 1);
7214 iwm_stop_device(sc);
7215 if (err)
7216 return err;
7217
7218 sc->sc_flags |= IWM_FLAG_ATTACHED;
7219
7220 aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7221 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7222 ether_sprintf(sc->sc_nvm.hw_addr));
7223
7224 #ifndef IEEE80211_NO_HT
7225 if (sc->sc_nvm.sku_cap_11n_enable)
7226 iwm_setup_ht_rates(sc);
7227 #endif
7228
7229 /* not all hardware can do 5GHz band */
7230 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7231 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7232
7233 ieee80211_ifattach(ic);
7234
7235 ic->ic_node_alloc = iwm_node_alloc;
7236
7237 /* Override 802.11 state transition machine. */
7238 sc->sc_newstate = ic->ic_newstate;
7239 ic->ic_newstate = iwm_newstate;
7240 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7241 ieee80211_announce(ic);
7242
7243 iwm_radiotap_attach(sc);
7244
7245 return 0;
7246 }
7247
7248 static void
7249 iwm_attach_hook(device_t dev)
7250 {
7251 struct iwm_softc *sc = device_private(dev);
7252
7253 iwm_preinit(sc);
7254 }
7255
7256 static void
7257 iwm_attach(device_t parent, device_t self, void *aux)
7258 {
7259 struct iwm_softc *sc = device_private(self);
7260 struct pci_attach_args *pa = aux;
7261 struct ieee80211com *ic = &sc->sc_ic;
7262 struct ifnet *ifp = &sc->sc_ec.ec_if;
7263 pcireg_t reg, memtype;
7264 char intrbuf[PCI_INTRSTR_LEN];
7265 const char *intrstr;
7266 int err;
7267 int txq_i;
7268 const struct sysctlnode *node;
7269
7270 sc->sc_dev = self;
7271 sc->sc_pct = pa->pa_pc;
7272 sc->sc_pcitag = pa->pa_tag;
7273 sc->sc_dmat = pa->pa_dmat;
7274 sc->sc_pciid = pa->pa_id;
7275
7276 pci_aprint_devinfo(pa, NULL);
7277
7278 if (workqueue_create(&sc->sc_eswq, "iwmes",
7279 iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0))
7280 panic("%s: could not create workqueue: scan",
7281 device_xname(self));
7282 if (workqueue_create(&sc->sc_nswq, "iwmns",
7283 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7284 panic("%s: could not create workqueue: newstate",
7285 device_xname(self));
7286
7287 /*
7288 * Get the offset of the PCI Express Capability Structure in PCI
7289 * Configuration Space.
7290 */
7291 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7292 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7293 if (err == 0) {
7294 aprint_error_dev(self,
7295 "PCIe capability structure not found!\n");
7296 return;
7297 }
7298
7299 /* Clear device-specific "PCI retry timeout" register (41h). */
7300 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7301 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7302
7303 /* Enable bus-mastering */
7304 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7305 reg |= PCI_COMMAND_MASTER_ENABLE;
7306 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7307
7308 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7309 err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7310 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7311 if (err) {
7312 aprint_error_dev(self, "can't map mem space\n");
7313 return;
7314 }
7315
7316 /* Install interrupt handler. */
7317 err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7318 if (err) {
7319 aprint_error_dev(self, "can't allocate interrupt\n");
7320 return;
7321 }
7322 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX) {
7323 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
7324 PCI_COMMAND_STATUS_REG);
7325 if (ISSET(reg, PCI_COMMAND_INTERRUPT_DISABLE)) {
7326 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7327 pci_conf_write(sc->sc_pct, sc->sc_pcitag,
7328 PCI_COMMAND_STATUS_REG, reg);
7329 }
7330 }
7331 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7332 sizeof(intrbuf));
7333 sc->sc_ih = pci_intr_establish(sc->sc_pct, sc->sc_pihp[0], IPL_NET,
7334 iwm_intr, sc);
7335 if (sc->sc_ih == NULL) {
7336 aprint_error_dev(self, "can't establish interrupt");
7337 if (intrstr != NULL)
7338 aprint_error(" at %s", intrstr);
7339 aprint_error("\n");
7340 return;
7341 }
7342 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7343
7344 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7345
7346 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7347 switch (PCI_PRODUCT(sc->sc_pciid)) {
7348 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7349 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7350 sc->sc_fwname = "iwlwifi-3160-16.ucode";
7351 sc->host_interrupt_operation_mode = 1;
7352 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7353 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7354 break;
7355 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7356 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7357 sc->sc_fwname = "iwlwifi-7265D-16.ucode";
7358 sc->host_interrupt_operation_mode = 0;
7359 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7360 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7361 break;
7362 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7363 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7364 sc->sc_fwname = "iwlwifi-7260-16.ucode";
7365 sc->host_interrupt_operation_mode = 1;
7366 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7367 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7368 break;
7369 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7370 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7371 sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7372 IWM_CSR_HW_REV_TYPE_7265D ?
7373 "iwlwifi-7265D-16.ucode": "iwlwifi-7265-16.ucode";
7374 sc->host_interrupt_operation_mode = 0;
7375 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7376 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7377 break;
7378 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7379 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7380 sc->sc_fwname = "iwlwifi-8000C-16.ucode";
7381 sc->host_interrupt_operation_mode = 0;
7382 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7383 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7384 break;
7385 default:
7386 aprint_error_dev(self, "unknown product %#x",
7387 PCI_PRODUCT(sc->sc_pciid));
7388 return;
7389 }
7390 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7391
7392 /*
7393 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7394 * changed, and now the revision step also includes bit 0-1 (no more
7395 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7396 * in the old format.
7397 */
7398
7399 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7400 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7401 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7402
7403 if (iwm_prepare_card_hw(sc) != 0) {
7404 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7405 return;
7406 }
7407
7408 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7409 uint32_t hw_step;
7410
7411 /*
7412 * In order to recognize C step the driver should read the
7413 * chip version id located at the AUX bus MISC address.
7414 */
7415 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7416 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7417 DELAY(2);
7418
7419 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7420 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7421 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7422 25000);
7423 if (!err) {
7424 aprint_error_dev(sc->sc_dev,
7425 "failed to wake up the nic\n");
7426 return;
7427 }
7428
7429 if (iwm_nic_lock(sc)) {
7430 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7431 hw_step |= IWM_ENABLE_WFPM;
7432 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7433 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7434 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7435 if (hw_step == 0x3)
7436 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7437 (IWM_SILICON_C_STEP << 2);
7438 iwm_nic_unlock(sc);
7439 } else {
7440 aprint_error_dev(sc->sc_dev,
7441 "failed to lock the nic\n");
7442 return;
7443 }
7444 }
7445
7446 /*
7447 * Allocate DMA memory for firmware transfers.
7448 * Must be aligned on a 16-byte boundary.
7449 */
7450 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7451 16);
7452 if (err) {
7453 aprint_error_dev(sc->sc_dev,
7454 "could not allocate memory for firmware\n");
7455 return;
7456 }
7457
7458 /* Allocate "Keep Warm" page, used internally by the card. */
7459 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7460 if (err) {
7461 aprint_error_dev(sc->sc_dev,
7462 "could not allocate keep warm page\n");
7463 goto fail1;
7464 }
7465
7466 /* Allocate interrupt cause table (ICT).*/
7467 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7468 1 << IWM_ICT_PADDR_SHIFT);
7469 if (err) {
7470 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7471 goto fail2;
7472 }
7473
7474 /* TX scheduler rings must be aligned on a 1KB boundary. */
7475 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7476 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7477 if (err) {
7478 aprint_error_dev(sc->sc_dev,
7479 "could not allocate TX scheduler rings\n");
7480 goto fail3;
7481 }
7482
7483 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7484 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7485 if (err) {
7486 aprint_error_dev(sc->sc_dev,
7487 "could not allocate TX ring %d\n", txq_i);
7488 goto fail4;
7489 }
7490 }
7491
7492 err = iwm_alloc_rx_ring(sc, &sc->rxq);
7493 if (err) {
7494 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
7495 goto fail4;
7496 }
7497
7498 /* Clear pending interrupts. */
7499 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7500
7501 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7502 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
7503 SYSCTL_DESCR("iwm per-controller controls"),
7504 NULL, 0, NULL, 0,
7505 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
7506 CTL_EOL)) != 0) {
7507 aprint_normal_dev(sc->sc_dev,
7508 "couldn't create iwm per-controller sysctl node\n");
7509 }
7510 if (err == 0) {
7511 int iwm_nodenum = node->sysctl_num;
7512
7513 /* Reload firmware sysctl node */
7514 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7515 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
7516 SYSCTL_DESCR("Reload firmware"),
7517 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
7518 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
7519 CTL_EOL)) != 0) {
7520 aprint_normal_dev(sc->sc_dev,
7521 "couldn't create load_fw sysctl node\n");
7522 }
7523 }
7524
7525 /*
7526 * Attach interface
7527 */
7528 ic->ic_ifp = ifp;
7529 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
7530 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
7531 ic->ic_state = IEEE80211_S_INIT;
7532
7533 /* Set device capabilities. */
7534 ic->ic_caps =
7535 IEEE80211_C_WEP | /* WEP */
7536 IEEE80211_C_WPA | /* 802.11i */
7537 #ifdef notyet
7538 IEEE80211_C_SCANALL | /* device scans all channels at once */
7539 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
7540 #endif
7541 IEEE80211_C_SHSLOT | /* short slot time supported */
7542 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
7543
7544 #ifndef IEEE80211_NO_HT
7545 ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7546 ic->ic_htxcaps = 0;
7547 ic->ic_txbfcaps = 0;
7548 ic->ic_aselcaps = 0;
7549 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7550 #endif
7551
7552 /* all hardware can do 2.4GHz band */
7553 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7554 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7555
7556 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
7557 sc->sc_phyctxt[i].id = i;
7558 }
7559
7560 sc->sc_amrr.amrr_min_success_threshold = 1;
7561 sc->sc_amrr.amrr_max_success_threshold = 15;
7562
7563 /* IBSS channel undefined for now. */
7564 ic->ic_ibss_chan = &ic->ic_channels[1];
7565
7566 #if 0
7567 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7568 #endif
7569
7570 ifp->if_softc = sc;
7571 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7572 ifp->if_init = iwm_init;
7573 ifp->if_stop = iwm_stop;
7574 ifp->if_ioctl = iwm_ioctl;
7575 ifp->if_start = iwm_start;
7576 ifp->if_watchdog = iwm_watchdog;
7577 IFQ_SET_READY(&ifp->if_snd);
7578 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7579
7580 if_initialize(ifp);
7581 #if 0
7582 ieee80211_ifattach(ic);
7583 #else
7584 ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
7585 #endif
7586 /* Use common softint-based if_input */
7587 ifp->if_percpuq = if_percpuq_create(ifp);
7588 if_deferred_start_init(ifp, NULL);
7589 if_register(ifp);
7590
7591 callout_init(&sc->sc_calib_to, 0);
7592 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
7593 callout_init(&sc->sc_led_blink_to, 0);
7594 callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7595 #ifndef IEEE80211_NO_HT
7596 if (workqueue_create(&sc->sc_setratewq, "iwmsr",
7597 iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
7598 panic("%s: could not create workqueue: setrates",
7599 device_xname(self));
7600 if (workqueue_create(&sc->sc_bawq, "iwmba",
7601 iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
7602 panic("%s: could not create workqueue: blockack",
7603 device_xname(self));
7604 if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
7605 iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
7606 panic("%s: could not create workqueue: htprot",
7607 device_xname(self));
7608 #endif
7609
7610 if (pmf_device_register(self, NULL, NULL))
7611 pmf_class_network_register(self, ifp);
7612 else
7613 aprint_error_dev(self, "couldn't establish power handler\n");
7614
7615 /*
7616 * We can't do normal attach before the file system is mounted
7617 * because we cannot read the MAC address without loading the
7618 * firmware from disk. So we postpone until mountroot is done.
7619 * Notably, this will require a full driver unload/load cycle
7620 * (or reboot) in case the firmware is not present when the
7621 * hook runs.
7622 */
7623 config_mountroot(self, iwm_attach_hook);
7624
7625 return;
7626
7627 fail4: while (--txq_i >= 0)
7628 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7629 iwm_free_rx_ring(sc, &sc->rxq);
7630 iwm_dma_contig_free(&sc->sched_dma);
7631 fail3: if (sc->ict_dma.vaddr != NULL)
7632 iwm_dma_contig_free(&sc->ict_dma);
7633 fail2: iwm_dma_contig_free(&sc->kw_dma);
7634 fail1: iwm_dma_contig_free(&sc->fw_dma);
7635 }
7636
7637 void
7638 iwm_radiotap_attach(struct iwm_softc *sc)
7639 {
7640 struct ifnet *ifp = sc->sc_ic.ic_ifp;
7641
7642 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
7643 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
7644 &sc->sc_drvbpf);
7645
7646 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7647 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7648 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7649
7650 sc->sc_txtap_len = sizeof sc->sc_txtapu;
7651 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7652 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7653 }
7654
7655 #if 0
7656 static void
7657 iwm_init_task(void *arg1)
7658 {
7659 struct iwm_softc *sc = arg1;
7660 struct ifnet *ifp = &sc->sc_ic.ic_if;
7661 int s;
7662
7663 rw_enter_write(&sc->ioctl_rwl);
7664 s = splnet();
7665
7666 iwm_stop(ifp, 0);
7667 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7668 iwm_init(ifp);
7669
7670 splx(s);
7671 rw_exit(&sc->ioctl_rwl);
7672 }
7673
7674 static void
7675 iwm_wakeup(struct iwm_softc *sc)
7676 {
7677 pcireg_t reg;
7678
7679 /* Clear device-specific "PCI retry timeout" register (41h). */
7680 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7681 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7682
7683 iwm_init_task(sc);
7684 }
7685
7686 static int
7687 iwm_activate(device_t self, enum devact act)
7688 {
7689 struct iwm_softc *sc = device_private(self);
7690 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7691
7692 switch (act) {
7693 case DVACT_DEACTIVATE:
7694 if (ifp->if_flags & IFF_RUNNING)
7695 iwm_stop(ifp, 0);
7696 return 0;
7697 default:
7698 return EOPNOTSUPP;
7699 }
7700 }
7701 #endif
7702
7703 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
7704 NULL, NULL);
7705
7706 static int
7707 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
7708 {
7709 struct sysctlnode node;
7710 struct iwm_softc *sc;
7711 int err, t;
7712
7713 node = *rnode;
7714 sc = node.sysctl_data;
7715 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
7716 node.sysctl_data = &t;
7717 err = sysctl_lookup(SYSCTLFN_CALL(&node));
7718 if (err || newp == NULL)
7719 return err;
7720
7721 if (t == 0)
7722 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
7723 return 0;
7724 }
7725
7726 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
7727 {
7728 const struct sysctlnode *rnode;
7729 #ifdef IWM_DEBUG
7730 const struct sysctlnode *cnode;
7731 #endif /* IWM_DEBUG */
7732 int rc;
7733
7734 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
7735 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
7736 SYSCTL_DESCR("iwm global controls"),
7737 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
7738 goto err;
7739
7740 iwm_sysctl_root_num = rnode->sysctl_num;
7741
7742 #ifdef IWM_DEBUG
7743 /* control debugging printfs */
7744 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
7745 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
7746 "debug", SYSCTL_DESCR("Enable debugging output"),
7747 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
7748 goto err;
7749 #endif /* IWM_DEBUG */
7750
7751 return;
7752
7753 err:
7754 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
7755 }
7756