if_iwm.c revision 1.64 1 /* $NetBSD: if_iwm.c,v 1.64 2017/01/17 08:47:32 nonaka Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp */
3 #define IEEE80211_NO_HT
4 /*
5 * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 * Author: Stefan Sperling <stsp (at) openbsd.org>
7 * Copyright (c) 2014 Fixup Software Ltd.
8 *
9 * Permission to use, copy, modify, and distribute this software for any
10 * purpose with or without fee is hereby granted, provided that the above
11 * copyright notice and this permission notice appear in all copies.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 */
21
22 /*-
23 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 * which were used as the reference documentation for this implementation.
25 *
26 ***********************************************************************
27 *
28 * This file is provided under a dual BSD/GPLv2 license. When using or
29 * redistributing this file, you may do so under either license.
30 *
31 * GPL LICENSE SUMMARY
32 *
33 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2016 Intel Deutschland GmbH
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
63 * Copyright(c) 2016 Intel Deutschland GmbH
64 * All rights reserved.
65 *
66 * Redistribution and use in source and binary forms, with or without
67 * modification, are permitted provided that the following conditions
68 * are met:
69 *
70 * * Redistributions of source code must retain the above copyright
71 * notice, this list of conditions and the following disclaimer.
72 * * Redistributions in binary form must reproduce the above copyright
73 * notice, this list of conditions and the following disclaimer in
74 * the documentation and/or other materials provided with the
75 * distribution.
76 * * Neither the name Intel Corporation nor the names of its
77 * contributors may be used to endorse or promote products derived
78 * from this software without specific prior written permission.
79 *
80 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
81 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
82 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
83 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
84 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
86 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
87 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
88 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
89 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
90 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93 /*-
94 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
95 *
96 * Permission to use, copy, modify, and distribute this software for any
97 * purpose with or without fee is hereby granted, provided that the above
98 * copyright notice and this permission notice appear in all copies.
99 *
100 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
101 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
102 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
103 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
104 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
105 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
106 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
107 */
108
109 #include <sys/cdefs.h>
110 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.64 2017/01/17 08:47:32 nonaka Exp $");
111
112 #include <sys/param.h>
113 #include <sys/conf.h>
114 #include <sys/kernel.h>
115 #include <sys/kmem.h>
116 #include <sys/mbuf.h>
117 #include <sys/mutex.h>
118 #include <sys/proc.h>
119 #include <sys/socket.h>
120 #include <sys/sockio.h>
121 #include <sys/sysctl.h>
122 #include <sys/systm.h>
123
124 #include <sys/cpu.h>
125 #include <sys/bus.h>
126 #include <sys/workqueue.h>
127 #include <machine/endian.h>
128 #include <machine/intr.h>
129
130 #include <dev/pci/pcireg.h>
131 #include <dev/pci/pcivar.h>
132 #include <dev/pci/pcidevs.h>
133 #include <dev/firmload.h>
134
135 #include <net/bpf.h>
136 #include <net/if.h>
137 #include <net/if_dl.h>
138 #include <net/if_media.h>
139 #include <net/if_ether.h>
140
141 #include <netinet/in.h>
142 #include <netinet/ip.h>
143
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147
148 #define DEVNAME(_s) device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
150
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 0;
158 #else
159 #define DPRINTF(x) do { ; } while (0)
160 #define DPRINTFN(n, x) do { ; } while (0)
161 #endif
162
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165
166 static const uint8_t iwm_nvm_channels[] = {
167 /* 2.4 GHz */
168 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 /* 5 GHz */
170 36, 40, 44, 48, 52, 56, 60, 64,
171 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 149, 153, 157, 161, 165
173 };
174
175 static const uint8_t iwm_nvm_channels_8000[] = {
176 /* 2.4 GHz */
177 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
178 /* 5 GHz */
179 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
180 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
181 149, 153, 157, 161, 165, 169, 173, 177, 181
182 };
183
184 #define IWM_NUM_2GHZ_CHANNELS 14
185
186 static const struct iwm_rate {
187 uint8_t rate;
188 uint8_t plcp;
189 uint8_t ht_plcp;
190 } iwm_rates[] = {
191 /* Legacy */ /* HT */
192 { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
196 { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
197 { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
198 { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
199 { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
200 { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
201 { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
202 { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
203 { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
204 { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
205 };
206 #define IWM_RIDX_CCK 0
207 #define IWM_RIDX_OFDM 4
208 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
209 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
210 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
211
212 #ifndef IEEE80211_NO_HT
213 /* Convert an MCS index into an iwm_rates[] index. */
214 static const int iwm_mcs2ridx[] = {
215 IWM_RATE_MCS_0_INDEX,
216 IWM_RATE_MCS_1_INDEX,
217 IWM_RATE_MCS_2_INDEX,
218 IWM_RATE_MCS_3_INDEX,
219 IWM_RATE_MCS_4_INDEX,
220 IWM_RATE_MCS_5_INDEX,
221 IWM_RATE_MCS_6_INDEX,
222 IWM_RATE_MCS_7_INDEX,
223 };
224 #endif
225
226 struct iwm_nvm_section {
227 uint16_t length;
228 uint8_t *data;
229 };
230
231 struct iwm_newstate_state {
232 struct work ns_wk;
233 enum ieee80211_state ns_nstate;
234 int ns_arg;
235 int ns_generation;
236 };
237
238 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
239 static int iwm_firmware_store_section(struct iwm_softc *,
240 enum iwm_ucode_type, uint8_t *, size_t);
241 static int iwm_set_default_calib(struct iwm_softc *, const void *);
242 static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
243 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
244 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
245 #ifdef IWM_DEBUG
246 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
247 #endif
248 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
249 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
250 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
251 static int iwm_nic_lock(struct iwm_softc *);
252 static void iwm_nic_unlock(struct iwm_softc *);
253 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
254 uint32_t);
255 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
257 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
258 bus_size_t, bus_size_t);
259 static void iwm_dma_contig_free(struct iwm_dma_info *);
260 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
261 static void iwm_disable_rx_dma(struct iwm_softc *);
262 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
264 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
265 int);
266 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
268 static void iwm_enable_rfkill_int(struct iwm_softc *);
269 static int iwm_check_rfkill(struct iwm_softc *);
270 static void iwm_enable_interrupts(struct iwm_softc *);
271 static void iwm_restore_interrupts(struct iwm_softc *);
272 static void iwm_disable_interrupts(struct iwm_softc *);
273 static void iwm_ict_reset(struct iwm_softc *);
274 static int iwm_set_hw_ready(struct iwm_softc *);
275 static int iwm_prepare_card_hw(struct iwm_softc *);
276 static void iwm_apm_config(struct iwm_softc *);
277 static int iwm_apm_init(struct iwm_softc *);
278 static void iwm_apm_stop(struct iwm_softc *);
279 static int iwm_allow_mcast(struct iwm_softc *);
280 static int iwm_start_hw(struct iwm_softc *);
281 static void iwm_stop_device(struct iwm_softc *);
282 static void iwm_nic_config(struct iwm_softc *);
283 static int iwm_nic_rx_init(struct iwm_softc *);
284 static int iwm_nic_tx_init(struct iwm_softc *);
285 static int iwm_nic_init(struct iwm_softc *);
286 static int iwm_enable_txq(struct iwm_softc *, int, int, int);
287 static int iwm_post_alive(struct iwm_softc *);
288 static struct iwm_phy_db_entry *
289 iwm_phy_db_get_section(struct iwm_softc *,
290 enum iwm_phy_db_section_type, uint16_t);
291 static int iwm_phy_db_set_section(struct iwm_softc *,
292 struct iwm_calib_res_notif_phy_db *, uint16_t);
293 static int iwm_is_valid_channel(uint16_t);
294 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
295 static uint16_t iwm_channel_id_to_papd(uint16_t);
296 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
297 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
298 uint8_t **, uint16_t *, uint16_t);
299 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
300 void *);
301 static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
302 enum iwm_phy_db_section_type, uint8_t);
303 static int iwm_send_phy_db_data(struct iwm_softc *);
304 static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
305 struct iwm_time_event_cmd_v1 *);
306 static int iwm_send_time_event_cmd(struct iwm_softc *,
307 const struct iwm_time_event_cmd_v2 *);
308 static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
309 uint32_t, uint32_t);
310 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
311 uint16_t, uint8_t *, uint16_t *);
312 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
313 uint16_t *, size_t);
314 static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
315 const uint8_t *, size_t);
316 #ifndef IEEE80211_NO_HT
317 static void iwm_setup_ht_rates(struct iwm_softc *);
318 static void iwm_htprot_task(void *);
319 static void iwm_update_htprot(struct ieee80211com *,
320 struct ieee80211_node *);
321 static int iwm_ampdu_rx_start(struct ieee80211com *,
322 struct ieee80211_node *, uint8_t);
323 static void iwm_ampdu_rx_stop(struct ieee80211com *,
324 struct ieee80211_node *, uint8_t);
325 static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
326 uint8_t, uint16_t, int);
327 #ifdef notyet
328 static int iwm_ampdu_tx_start(struct ieee80211com *,
329 struct ieee80211_node *, uint8_t);
330 static void iwm_ampdu_tx_stop(struct ieee80211com *,
331 struct ieee80211_node *, uint8_t);
332 #endif
333 static void iwm_ba_task(void *);
334 #endif
335
336 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
337 const uint16_t *, const uint16_t *, const uint16_t *,
338 const uint16_t *, const uint16_t *);
339 static void iwm_set_hw_address_8000(struct iwm_softc *,
340 struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
341 static int iwm_parse_nvm_sections(struct iwm_softc *,
342 struct iwm_nvm_section *);
343 static int iwm_nvm_init(struct iwm_softc *);
344 static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
345 const uint8_t *, uint32_t);
346 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
347 const uint8_t *, uint32_t);
348 static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
349 static int iwm_load_cpu_sections_8000(struct iwm_softc *,
350 struct iwm_fw_sects *, int , int *);
351 static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
352 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
353 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
354 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
355 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
356 static int iwm_load_ucode_wait_alive(struct iwm_softc *,
357 enum iwm_ucode_type);
358 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
359 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
360 static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
361 static int iwm_get_signal_strength(struct iwm_softc *,
362 struct iwm_rx_phy_info *);
363 static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
364 struct iwm_rx_packet *, struct iwm_rx_data *);
365 static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
366 static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
367 struct iwm_rx_data *);
368 static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
369 static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
370 struct iwm_rx_data *);
371 static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
372 uint32_t);
373 #if 0
374 static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
375 static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
376 #endif
377 static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
378 struct iwm_phy_context_cmd *, uint32_t, uint32_t);
379 static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
380 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
381 uint8_t, uint8_t);
382 static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
383 uint8_t, uint8_t, uint32_t, uint32_t);
384 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
385 static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
386 uint16_t, const void *);
387 static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
388 uint32_t *);
389 static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
390 const void *, uint32_t *);
391 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
392 static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
393 #if 0
394 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
395 uint16_t);
396 #endif
397 static const struct iwm_rate *
398 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
399 struct ieee80211_frame *, struct iwm_tx_cmd *);
400 static int iwm_tx(struct iwm_softc *, struct mbuf *,
401 struct ieee80211_node *, int);
402 static void iwm_led_enable(struct iwm_softc *);
403 static void iwm_led_disable(struct iwm_softc *);
404 static int iwm_led_is_enabled(struct iwm_softc *);
405 static void iwm_led_blink_timeout(void *);
406 static void iwm_led_blink_start(struct iwm_softc *);
407 static void iwm_led_blink_stop(struct iwm_softc *);
408 static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
409 struct iwm_beacon_filter_cmd *);
410 static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
411 struct iwm_node *, struct iwm_beacon_filter_cmd *);
412 static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
413 int);
414 static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
415 struct iwm_mac_power_cmd *);
416 static int iwm_power_mac_update_mode(struct iwm_softc *,
417 struct iwm_node *);
418 static int iwm_power_update_device(struct iwm_softc *);
419 #ifdef notyet
420 static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
421 #endif
422 static int iwm_disable_beacon_filter(struct iwm_softc *);
423 static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
424 static int iwm_add_aux_sta(struct iwm_softc *);
425 static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
426 static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
427 #ifdef notyet
428 static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
429 static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
430 #endif
431 static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
432 struct iwm_scan_channel_cfg_lmac *, int);
433 static int iwm_fill_probe_req(struct iwm_softc *,
434 struct iwm_scan_probe_req *);
435 static int iwm_lmac_scan(struct iwm_softc *);
436 static int iwm_config_umac_scan(struct iwm_softc *);
437 static int iwm_umac_scan(struct iwm_softc *);
438 static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
439 static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
440 int *);
441 static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
442 struct iwm_mac_ctx_cmd *, uint32_t, int);
443 static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
444 struct iwm_mac_data_sta *, int);
445 static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
446 uint32_t, int);
447 static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
448 static int iwm_auth(struct iwm_softc *);
449 static int iwm_assoc(struct iwm_softc *);
450 static void iwm_calib_timeout(void *);
451 #ifndef IEEE80211_NO_HT
452 static void iwm_setrates_task(void *);
453 static int iwm_setrates(struct iwm_node *);
454 #endif
455 static int iwm_media_change(struct ifnet *);
456 static void iwm_newstate_cb(struct work *, void *);
457 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
458 static void iwm_endscan(struct iwm_softc *);
459 static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
460 struct ieee80211_node *);
461 static int iwm_sf_config(struct iwm_softc *, int);
462 static int iwm_send_bt_init_conf(struct iwm_softc *);
463 static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
464 static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
465 static int iwm_init_hw(struct iwm_softc *);
466 static int iwm_init(struct ifnet *);
467 static void iwm_start(struct ifnet *);
468 static void iwm_stop(struct ifnet *, int);
469 static void iwm_watchdog(struct ifnet *);
470 static int iwm_ioctl(struct ifnet *, u_long, void *);
471 #ifdef IWM_DEBUG
472 static const char *iwm_desc_lookup(uint32_t);
473 static void iwm_nic_error(struct iwm_softc *);
474 static void iwm_nic_umac_error(struct iwm_softc *);
475 #endif
476 static void iwm_notif_intr(struct iwm_softc *);
477 static int iwm_intr(void *);
478 static void iwm_softintr(void *);
479 static int iwm_preinit(struct iwm_softc *);
480 static void iwm_attach_hook(device_t);
481 static void iwm_attach(device_t, device_t, void *);
482 #if 0
483 static void iwm_init_task(void *);
484 static int iwm_activate(device_t, enum devact);
485 static void iwm_wakeup(struct iwm_softc *);
486 #endif
487 static void iwm_radiotap_attach(struct iwm_softc *);
488 static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
489
490 static int iwm_sysctl_root_num;
491 static int iwm_lar_disable;
492
493 #ifndef IWM_DEFAULT_MCC
494 #define IWM_DEFAULT_MCC "ZZ"
495 #endif
496 static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
497
498 static int
499 iwm_firmload(struct iwm_softc *sc)
500 {
501 struct iwm_fw_info *fw = &sc->sc_fw;
502 firmware_handle_t fwh;
503 int err;
504
505 if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
506 return 0;
507
508 /* Open firmware image. */
509 err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
510 if (err) {
511 aprint_error_dev(sc->sc_dev,
512 "could not get firmware handle %s\n", sc->sc_fwname);
513 return err;
514 }
515
516 if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
517 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
518 fw->fw_rawdata = NULL;
519 }
520
521 fw->fw_rawsize = firmware_get_size(fwh);
522 /*
523 * Well, this is how the Linux driver checks it ....
524 */
525 if (fw->fw_rawsize < sizeof(uint32_t)) {
526 aprint_error_dev(sc->sc_dev,
527 "firmware too short: %zd bytes\n", fw->fw_rawsize);
528 err = EINVAL;
529 goto out;
530 }
531
532 /* Read the firmware. */
533 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
534 if (fw->fw_rawdata == NULL) {
535 aprint_error_dev(sc->sc_dev,
536 "not enough memory to stock firmware %s\n", sc->sc_fwname);
537 err = ENOMEM;
538 goto out;
539 }
540 err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
541 if (err) {
542 aprint_error_dev(sc->sc_dev,
543 "could not read firmware %s\n", sc->sc_fwname);
544 goto out;
545 }
546
547 SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
548 out:
549 /* caller will release memory, if necessary */
550
551 firmware_close(fwh);
552 return err;
553 }
554
555 /*
556 * just maintaining status quo.
557 */
558 static void
559 iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
560 {
561 struct ieee80211com *ic = &sc->sc_ic;
562 struct ieee80211_frame *wh;
563 uint8_t subtype;
564
565 wh = mtod(m, struct ieee80211_frame *);
566
567 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
568 return;
569
570 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
571
572 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
573 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
574 return;
575
576 int chan = le32toh(sc->sc_last_phy_info.channel);
577 if (chan < __arraycount(ic->ic_channels))
578 ic->ic_curchan = &ic->ic_channels[chan];
579 }
580
581 static int
582 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
583 {
584 struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
585
586 if (dlen < sizeof(*l) ||
587 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
588 return EINVAL;
589
590 /* we don't actually store anything for now, always use s/w crypto */
591
592 return 0;
593 }
594
595 static int
596 iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
597 uint8_t *data, size_t dlen)
598 {
599 struct iwm_fw_sects *fws;
600 struct iwm_fw_onesect *fwone;
601
602 if (type >= IWM_UCODE_TYPE_MAX)
603 return EINVAL;
604 if (dlen < sizeof(uint32_t))
605 return EINVAL;
606
607 fws = &sc->sc_fw.fw_sects[type];
608 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
609 return EINVAL;
610
611 fwone = &fws->fw_sect[fws->fw_count];
612
613 /* first 32bit are device load offset */
614 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
615
616 /* rest is data */
617 fwone->fws_data = data + sizeof(uint32_t);
618 fwone->fws_len = dlen - sizeof(uint32_t);
619
620 /* for freeing the buffer during driver unload */
621 fwone->fws_alloc = data;
622 fwone->fws_allocsize = dlen;
623
624 fws->fw_count++;
625 fws->fw_totlen += fwone->fws_len;
626
627 return 0;
628 }
629
630 struct iwm_tlv_calib_data {
631 uint32_t ucode_type;
632 struct iwm_tlv_calib_ctrl calib;
633 } __packed;
634
635 static int
636 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
637 {
638 const struct iwm_tlv_calib_data *def_calib = data;
639 uint32_t ucode_type = le32toh(def_calib->ucode_type);
640
641 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
642 DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
643 DEVNAME(sc), ucode_type));
644 return EINVAL;
645 }
646
647 sc->sc_default_calib[ucode_type].flow_trigger =
648 def_calib->calib.flow_trigger;
649 sc->sc_default_calib[ucode_type].event_trigger =
650 def_calib->calib.event_trigger;
651
652 return 0;
653 }
654
655 static int
656 iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
657 {
658 struct iwm_fw_info *fw = &sc->sc_fw;
659 struct iwm_tlv_ucode_header *uhdr;
660 struct iwm_ucode_tlv tlv;
661 enum iwm_ucode_tlv_type tlv_type;
662 uint8_t *data;
663 int err, status;
664 size_t len;
665
666 if (ucode_type != IWM_UCODE_TYPE_INIT &&
667 fw->fw_status == IWM_FW_STATUS_DONE)
668 return 0;
669
670 if (fw->fw_status == IWM_FW_STATUS_NONE) {
671 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
672 } else {
673 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
674 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
675 }
676 status = fw->fw_status;
677
678 if (status == IWM_FW_STATUS_DONE)
679 return 0;
680
681 err = iwm_firmload(sc);
682 if (err) {
683 aprint_error_dev(sc->sc_dev,
684 "could not read firmware %s (error %d)\n",
685 sc->sc_fwname, err);
686 goto out;
687 }
688
689 sc->sc_capaflags = 0;
690 sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
691 memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
692 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
693
694 uhdr = (void *)fw->fw_rawdata;
695 if (*(uint32_t *)fw->fw_rawdata != 0
696 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
697 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
698 sc->sc_fwname);
699 err = EINVAL;
700 goto out;
701 }
702
703 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
704 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
705 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
706 IWM_UCODE_API(le32toh(uhdr->ver)));
707 data = uhdr->data;
708 len = fw->fw_rawsize - sizeof(*uhdr);
709
710 while (len >= sizeof(tlv)) {
711 size_t tlv_len;
712 void *tlv_data;
713
714 memcpy(&tlv, data, sizeof(tlv));
715 tlv_len = le32toh(tlv.length);
716 tlv_type = le32toh(tlv.type);
717
718 len -= sizeof(tlv);
719 data += sizeof(tlv);
720 tlv_data = data;
721
722 if (len < tlv_len) {
723 aprint_error_dev(sc->sc_dev,
724 "firmware too short: %zu bytes\n", len);
725 err = EINVAL;
726 goto parse_out;
727 }
728
729 switch (tlv_type) {
730 case IWM_UCODE_TLV_PROBE_MAX_LEN:
731 if (tlv_len < sizeof(uint32_t)) {
732 err = EINVAL;
733 goto parse_out;
734 }
735 sc->sc_capa_max_probe_len
736 = le32toh(*(uint32_t *)tlv_data);
737 /* limit it to something sensible */
738 if (sc->sc_capa_max_probe_len >
739 IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
740 err = EINVAL;
741 goto parse_out;
742 }
743 break;
744 case IWM_UCODE_TLV_PAN:
745 if (tlv_len) {
746 err = EINVAL;
747 goto parse_out;
748 }
749 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
750 break;
751 case IWM_UCODE_TLV_FLAGS:
752 if (tlv_len < sizeof(uint32_t)) {
753 err = EINVAL;
754 goto parse_out;
755 }
756 /*
757 * Apparently there can be many flags, but Linux driver
758 * parses only the first one, and so do we.
759 *
760 * XXX: why does this override IWM_UCODE_TLV_PAN?
761 * Intentional or a bug? Observations from
762 * current firmware file:
763 * 1) TLV_PAN is parsed first
764 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
765 * ==> this resets TLV_PAN to itself... hnnnk
766 */
767 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
768 break;
769 case IWM_UCODE_TLV_CSCHEME:
770 err = iwm_store_cscheme(sc, tlv_data, tlv_len);
771 if (err)
772 goto parse_out;
773 break;
774 case IWM_UCODE_TLV_NUM_OF_CPU: {
775 uint32_t num_cpu;
776 if (tlv_len != sizeof(uint32_t)) {
777 err = EINVAL;
778 goto parse_out;
779 }
780 num_cpu = le32toh(*(uint32_t *)tlv_data);
781 if (num_cpu < 1 || num_cpu > 2) {
782 err = EINVAL;
783 goto parse_out;
784 }
785 break;
786 }
787 case IWM_UCODE_TLV_SEC_RT:
788 err = iwm_firmware_store_section(sc,
789 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
790 if (err)
791 goto parse_out;
792 break;
793 case IWM_UCODE_TLV_SEC_INIT:
794 err = iwm_firmware_store_section(sc,
795 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
796 if (err)
797 goto parse_out;
798 break;
799 case IWM_UCODE_TLV_SEC_WOWLAN:
800 err = iwm_firmware_store_section(sc,
801 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
802 if (err)
803 goto parse_out;
804 break;
805 case IWM_UCODE_TLV_DEF_CALIB:
806 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
807 err = EINVAL;
808 goto parse_out;
809 }
810 err = iwm_set_default_calib(sc, tlv_data);
811 if (err)
812 goto parse_out;
813 break;
814 case IWM_UCODE_TLV_PHY_SKU:
815 if (tlv_len != sizeof(uint32_t)) {
816 err = EINVAL;
817 goto parse_out;
818 }
819 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
820 break;
821
822 case IWM_UCODE_TLV_API_CHANGES_SET: {
823 struct iwm_ucode_api *api;
824 if (tlv_len != sizeof(*api)) {
825 err = EINVAL;
826 goto parse_out;
827 }
828 api = (struct iwm_ucode_api *)tlv_data;
829 /* Flags may exceed 32 bits in future firmware. */
830 if (le32toh(api->api_index) > 0) {
831 err = EINVAL;
832 goto parse_out;
833 }
834 sc->sc_ucode_api = le32toh(api->api_flags);
835 break;
836 }
837
838 case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
839 struct iwm_ucode_capa *capa;
840 int idx, i;
841 if (tlv_len != sizeof(*capa)) {
842 err = EINVAL;
843 goto parse_out;
844 }
845 capa = (struct iwm_ucode_capa *)tlv_data;
846 idx = le32toh(capa->api_index);
847 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
848 err = EINVAL;
849 goto parse_out;
850 }
851 for (i = 0; i < 32; i++) {
852 if (!ISSET(le32toh(capa->api_capa), __BIT(i)))
853 continue;
854 setbit(sc->sc_enabled_capa, i + (32 * idx));
855 }
856 break;
857 }
858
859 case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
860 case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
861 case IWM_UCODE_TLV_FW_GSCAN_CAPA:
862 /* ignore, not used by current driver */
863 break;
864
865 case IWM_UCODE_TLV_SEC_RT_USNIFFER:
866 err = iwm_firmware_store_section(sc,
867 IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
868 tlv_len);
869 if (err)
870 goto parse_out;
871 break;
872
873 case IWM_UCODE_TLV_N_SCAN_CHANNELS:
874 if (tlv_len != sizeof(uint32_t)) {
875 err = EINVAL;
876 goto parse_out;
877 }
878 sc->sc_capa_n_scan_channels =
879 le32toh(*(uint32_t *)tlv_data);
880 break;
881
882 case IWM_UCODE_TLV_FW_VERSION:
883 if (tlv_len != sizeof(uint32_t) * 3) {
884 err = EINVAL;
885 goto parse_out;
886 }
887 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
888 "%d.%d.%d",
889 le32toh(((uint32_t *)tlv_data)[0]),
890 le32toh(((uint32_t *)tlv_data)[1]),
891 le32toh(((uint32_t *)tlv_data)[2]));
892 break;
893
894 default:
895 DPRINTF(("%s: unknown firmware section %d, abort\n",
896 DEVNAME(sc), tlv_type));
897 err = EINVAL;
898 goto parse_out;
899 }
900
901 len -= roundup(tlv_len, 4);
902 data += roundup(tlv_len, 4);
903 }
904
905 KASSERT(err == 0);
906
907 parse_out:
908 if (err) {
909 aprint_error_dev(sc->sc_dev,
910 "firmware parse error, section type %d\n", tlv_type);
911 }
912
913 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
914 aprint_error_dev(sc->sc_dev,
915 "device uses unsupported power ops\n");
916 err = ENOTSUP;
917 }
918
919 out:
920 if (err)
921 fw->fw_status = IWM_FW_STATUS_NONE;
922 else
923 fw->fw_status = IWM_FW_STATUS_DONE;
924 wakeup(&sc->sc_fw);
925
926 if (err && fw->fw_rawdata != NULL) {
927 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
928 fw->fw_rawdata = NULL;
929 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
930 /* don't touch fw->fw_status */
931 memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
932 }
933 return err;
934 }
935
936 static uint32_t
937 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
938 {
939 IWM_WRITE(sc,
940 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
941 IWM_BARRIER_READ_WRITE(sc);
942 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
943 }
944
945 static void
946 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
947 {
948 IWM_WRITE(sc,
949 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
950 IWM_BARRIER_WRITE(sc);
951 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
952 }
953
954 #ifdef IWM_DEBUG
955 static int
956 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
957 {
958 int offs;
959 uint32_t *vals = buf;
960
961 if (iwm_nic_lock(sc)) {
962 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
963 for (offs = 0; offs < dwords; offs++)
964 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
965 iwm_nic_unlock(sc);
966 return 0;
967 }
968 return EBUSY;
969 }
970 #endif
971
972 static int
973 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
974 {
975 int offs;
976 const uint32_t *vals = buf;
977
978 if (iwm_nic_lock(sc)) {
979 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
980 /* WADDR auto-increments */
981 for (offs = 0; offs < dwords; offs++) {
982 uint32_t val = vals ? vals[offs] : 0;
983 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
984 }
985 iwm_nic_unlock(sc);
986 return 0;
987 }
988 return EBUSY;
989 }
990
991 static int
992 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
993 {
994 return iwm_write_mem(sc, addr, &val, 1);
995 }
996
997 static int
998 iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
999 int timo)
1000 {
1001 for (;;) {
1002 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1003 return 1;
1004 }
1005 if (timo < 10) {
1006 return 0;
1007 }
1008 timo -= 10;
1009 DELAY(10);
1010 }
1011 }
1012
1013 static int
1014 iwm_nic_lock(struct iwm_softc *sc)
1015 {
1016 int rv = 0;
1017
1018 if (sc->sc_cmd_hold_nic_awake)
1019 return 1;
1020
1021 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1022 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1023
1024 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1025 DELAY(2);
1026
1027 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1028 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1029 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1030 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1031 rv = 1;
1032 } else {
1033 aprint_error_dev(sc->sc_dev, "resetting device via NMI\n");
1034 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1035 }
1036
1037 return rv;
1038 }
1039
1040 static void
1041 iwm_nic_unlock(struct iwm_softc *sc)
1042 {
1043
1044 if (sc->sc_cmd_hold_nic_awake)
1045 return;
1046
1047 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1048 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1049 }
1050
1051 static void
1052 iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1053 uint32_t mask)
1054 {
1055 uint32_t val;
1056
1057 /* XXX: no error path? */
1058 if (iwm_nic_lock(sc)) {
1059 val = iwm_read_prph(sc, reg) & mask;
1060 val |= bits;
1061 iwm_write_prph(sc, reg, val);
1062 iwm_nic_unlock(sc);
1063 }
1064 }
1065
1066 static void
1067 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1068 {
1069 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1070 }
1071
1072 static void
1073 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1074 {
1075 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1076 }
1077
1078 static int
1079 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1080 bus_size_t size, bus_size_t alignment)
1081 {
1082 int nsegs, err;
1083 void *va;
1084
1085 dma->tag = tag;
1086 dma->size = size;
1087
1088 err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1089 &dma->map);
1090 if (err)
1091 goto fail;
1092
1093 err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1094 BUS_DMA_NOWAIT);
1095 if (err)
1096 goto fail;
1097
1098 err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1099 if (err)
1100 goto fail;
1101 dma->vaddr = va;
1102
1103 err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1104 BUS_DMA_NOWAIT);
1105 if (err)
1106 goto fail;
1107
1108 memset(dma->vaddr, 0, size);
1109 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1110 dma->paddr = dma->map->dm_segs[0].ds_addr;
1111
1112 return 0;
1113
1114 fail: iwm_dma_contig_free(dma);
1115 return err;
1116 }
1117
1118 static void
1119 iwm_dma_contig_free(struct iwm_dma_info *dma)
1120 {
1121 if (dma->map != NULL) {
1122 if (dma->vaddr != NULL) {
1123 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1124 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1125 bus_dmamap_unload(dma->tag, dma->map);
1126 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1127 bus_dmamem_free(dma->tag, &dma->seg, 1);
1128 dma->vaddr = NULL;
1129 }
1130 bus_dmamap_destroy(dma->tag, dma->map);
1131 dma->map = NULL;
1132 }
1133 }
1134
1135 static int
1136 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1137 {
1138 bus_size_t size;
1139 int i, err;
1140
1141 ring->cur = 0;
1142
1143 /* Allocate RX descriptors (256-byte aligned). */
1144 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1145 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1146 if (err) {
1147 aprint_error_dev(sc->sc_dev,
1148 "could not allocate RX ring DMA memory\n");
1149 goto fail;
1150 }
1151 ring->desc = ring->desc_dma.vaddr;
1152
1153 /* Allocate RX status area (16-byte aligned). */
1154 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1155 sizeof(*ring->stat), 16);
1156 if (err) {
1157 aprint_error_dev(sc->sc_dev,
1158 "could not allocate RX status DMA memory\n");
1159 goto fail;
1160 }
1161 ring->stat = ring->stat_dma.vaddr;
1162
1163 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1164 struct iwm_rx_data *data = &ring->data[i];
1165
1166 memset(data, 0, sizeof(*data));
1167 err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1168 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1169 &data->map);
1170 if (err) {
1171 aprint_error_dev(sc->sc_dev,
1172 "could not create RX buf DMA map\n");
1173 goto fail;
1174 }
1175
1176 err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1177 if (err)
1178 goto fail;
1179 }
1180 return 0;
1181
1182 fail: iwm_free_rx_ring(sc, ring);
1183 return err;
1184 }
1185
1186 static void
1187 iwm_disable_rx_dma(struct iwm_softc *sc)
1188 {
1189 int ntries;
1190
1191 if (iwm_nic_lock(sc)) {
1192 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1193 for (ntries = 0; ntries < 1000; ntries++) {
1194 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1195 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1196 break;
1197 DELAY(10);
1198 }
1199 iwm_nic_unlock(sc);
1200 }
1201 }
1202
1203 void
1204 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1205 {
1206 ring->cur = 0;
1207 memset(ring->stat, 0, sizeof(*ring->stat));
1208 bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1209 ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1210 }
1211
1212 static void
1213 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1214 {
1215 int i;
1216
1217 iwm_dma_contig_free(&ring->desc_dma);
1218 iwm_dma_contig_free(&ring->stat_dma);
1219
1220 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1221 struct iwm_rx_data *data = &ring->data[i];
1222
1223 if (data->m != NULL) {
1224 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1225 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1226 bus_dmamap_unload(sc->sc_dmat, data->map);
1227 m_freem(data->m);
1228 data->m = NULL;
1229 }
1230 if (data->map != NULL) {
1231 bus_dmamap_destroy(sc->sc_dmat, data->map);
1232 data->map = NULL;
1233 }
1234 }
1235 }
1236
1237 static int
1238 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1239 {
1240 bus_addr_t paddr;
1241 bus_size_t size;
1242 int i, err, nsegs;
1243
1244 ring->qid = qid;
1245 ring->queued = 0;
1246 ring->cur = 0;
1247
1248 /* Allocate TX descriptors (256-byte aligned). */
1249 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1250 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1251 if (err) {
1252 aprint_error_dev(sc->sc_dev,
1253 "could not allocate TX ring DMA memory\n");
1254 goto fail;
1255 }
1256 ring->desc = ring->desc_dma.vaddr;
1257
1258 /*
1259 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1260 * to allocate commands space for other rings.
1261 */
1262 if (qid > IWM_CMD_QUEUE)
1263 return 0;
1264
1265 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1266 err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1267 if (err) {
1268 aprint_error_dev(sc->sc_dev,
1269 "could not allocate TX cmd DMA memory\n");
1270 goto fail;
1271 }
1272 ring->cmd = ring->cmd_dma.vaddr;
1273
1274 paddr = ring->cmd_dma.paddr;
1275 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1276 struct iwm_tx_data *data = &ring->data[i];
1277 size_t mapsize;
1278
1279 data->cmd_paddr = paddr;
1280 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1281 + offsetof(struct iwm_tx_cmd, scratch);
1282 paddr += sizeof(struct iwm_device_cmd);
1283
1284 /* FW commands may require more mapped space than packets. */
1285 if (qid == IWM_CMD_QUEUE) {
1286 mapsize = IWM_RBUF_SIZE;
1287 nsegs = 1;
1288 } else {
1289 mapsize = MCLBYTES;
1290 nsegs = IWM_NUM_OF_TBS - 2;
1291 }
1292 err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1293 0, BUS_DMA_NOWAIT, &data->map);
1294 if (err) {
1295 aprint_error_dev(sc->sc_dev,
1296 "could not create TX buf DMA map\n");
1297 goto fail;
1298 }
1299 }
1300 KASSERT(paddr == ring->cmd_dma.paddr + size);
1301 return 0;
1302
1303 fail: iwm_free_tx_ring(sc, ring);
1304 return err;
1305 }
1306
1307 static void
1308 iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1309 {
1310
1311 if (!sc->apmg_wake_up_wa)
1312 return;
1313
1314 if (!sc->sc_cmd_hold_nic_awake) {
1315 aprint_error_dev(sc->sc_dev,
1316 "cmd_hold_nic_awake not set\n");
1317 return;
1318 }
1319
1320 sc->sc_cmd_hold_nic_awake = 0;
1321 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1322 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1323 }
1324
1325 static int
1326 iwm_set_cmd_in_flight(struct iwm_softc *sc)
1327 {
1328 int ret;
1329
1330 /*
1331 * wake up the NIC to make sure that the firmware will see the host
1332 * command - we will let the NIC sleep once all the host commands
1333 * returned. This needs to be done only on NICs that have
1334 * apmg_wake_up_wa set.
1335 */
1336 if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1337
1338 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1339 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1340
1341 ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1342 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1343 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1344 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1345 15000);
1346 if (ret == 0) {
1347 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1348 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1349 aprint_error_dev(sc->sc_dev,
1350 "failed to wake NIC for hcmd\n");
1351 return EIO;
1352 }
1353 sc->sc_cmd_hold_nic_awake = 1;
1354 }
1355
1356 return 0;
1357 }
1358 static void
1359 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1360 {
1361 int i;
1362
1363 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1364 struct iwm_tx_data *data = &ring->data[i];
1365
1366 if (data->m != NULL) {
1367 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1368 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1369 bus_dmamap_unload(sc->sc_dmat, data->map);
1370 m_freem(data->m);
1371 data->m = NULL;
1372 }
1373 }
1374 /* Clear TX descriptors. */
1375 memset(ring->desc, 0, ring->desc_dma.size);
1376 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1377 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1378 sc->qfullmsk &= ~(1 << ring->qid);
1379 ring->queued = 0;
1380 ring->cur = 0;
1381
1382 if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1383 iwm_clear_cmd_in_flight(sc);
1384 }
1385
1386 static void
1387 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1388 {
1389 int i;
1390
1391 iwm_dma_contig_free(&ring->desc_dma);
1392 iwm_dma_contig_free(&ring->cmd_dma);
1393
1394 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1395 struct iwm_tx_data *data = &ring->data[i];
1396
1397 if (data->m != NULL) {
1398 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1399 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1400 bus_dmamap_unload(sc->sc_dmat, data->map);
1401 m_freem(data->m);
1402 data->m = NULL;
1403 }
1404 if (data->map != NULL) {
1405 bus_dmamap_destroy(sc->sc_dmat, data->map);
1406 data->map = NULL;
1407 }
1408 }
1409 }
1410
1411 static void
1412 iwm_enable_rfkill_int(struct iwm_softc *sc)
1413 {
1414 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1415 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1416 }
1417
1418 static int
1419 iwm_check_rfkill(struct iwm_softc *sc)
1420 {
1421 uint32_t v;
1422 int s;
1423 int rv;
1424
1425 s = splnet();
1426
1427 /*
1428 * "documentation" is not really helpful here:
1429 * 27: HW_RF_KILL_SW
1430 * Indicates state of (platform's) hardware RF-Kill switch
1431 *
1432 * But apparently when it's off, it's on ...
1433 */
1434 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1435 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1436 if (rv) {
1437 sc->sc_flags |= IWM_FLAG_RFKILL;
1438 } else {
1439 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1440 }
1441
1442 splx(s);
1443 return rv;
1444 }
1445
1446 static void
1447 iwm_enable_interrupts(struct iwm_softc *sc)
1448 {
1449 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1450 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1451 }
1452
1453 static void
1454 iwm_restore_interrupts(struct iwm_softc *sc)
1455 {
1456 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1457 }
1458
1459 static void
1460 iwm_disable_interrupts(struct iwm_softc *sc)
1461 {
1462 int s = splnet();
1463
1464 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1465
1466 /* acknowledge all interrupts */
1467 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1468 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1469
1470 splx(s);
1471 }
1472
1473 static void
1474 iwm_ict_reset(struct iwm_softc *sc)
1475 {
1476 iwm_disable_interrupts(sc);
1477
1478 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1479 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1480 BUS_DMASYNC_PREWRITE);
1481 sc->ict_cur = 0;
1482
1483 /* Set physical address of ICT (4KB aligned). */
1484 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1485 IWM_CSR_DRAM_INT_TBL_ENABLE
1486 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1487 | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1488 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1489
1490 /* Switch to ICT interrupt mode in driver. */
1491 sc->sc_flags |= IWM_FLAG_USE_ICT;
1492
1493 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1494 iwm_enable_interrupts(sc);
1495 }
1496
1497 #define IWM_HW_READY_TIMEOUT 50
1498 static int
1499 iwm_set_hw_ready(struct iwm_softc *sc)
1500 {
1501 int ready;
1502
1503 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1504 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1505
1506 ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1507 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1508 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1509 IWM_HW_READY_TIMEOUT);
1510 if (ready)
1511 IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1512 IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1513
1514 return ready;
1515 }
1516 #undef IWM_HW_READY_TIMEOUT
1517
1518 static int
1519 iwm_prepare_card_hw(struct iwm_softc *sc)
1520 {
1521 int t = 0;
1522
1523 if (iwm_set_hw_ready(sc))
1524 return 0;
1525
1526 DELAY(100);
1527
1528 /* If HW is not ready, prepare the conditions to check again */
1529 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1530 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1531
1532 do {
1533 if (iwm_set_hw_ready(sc))
1534 return 0;
1535 DELAY(200);
1536 t += 200;
1537 } while (t < 150000);
1538
1539 return ETIMEDOUT;
1540 }
1541
1542 static void
1543 iwm_apm_config(struct iwm_softc *sc)
1544 {
1545 pcireg_t reg;
1546
1547 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1548 sc->sc_cap_off + PCIE_LCSR);
1549 if (reg & PCIE_LCSR_ASPM_L1) {
1550 /* Um the Linux driver prints "Disabling L0S for this one ... */
1551 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1552 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1553 } else {
1554 /* ... and "Enabling" here */
1555 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1556 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1557 }
1558 }
1559
1560 /*
1561 * Start up NIC's basic functionality after it has been reset
1562 * e.g. after platform boot or shutdown.
1563 * NOTE: This does not load uCode nor start the embedded processor
1564 */
1565 static int
1566 iwm_apm_init(struct iwm_softc *sc)
1567 {
1568 int err = 0;
1569
1570 /* Disable L0S exit timer (platform NMI workaround) */
1571 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1572 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1573 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1574 }
1575
1576 /*
1577 * Disable L0s without affecting L1;
1578 * don't wait for ICH L0s (ICH bug W/A)
1579 */
1580 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1581 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1582
1583 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1584 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1585
1586 /*
1587 * Enable HAP INTA (interrupt from management bus) to
1588 * wake device's PCI Express link L1a -> L0s
1589 */
1590 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1591 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1592
1593 iwm_apm_config(sc);
1594
1595 #if 0 /* not for 7k/8k */
1596 /* Configure analog phase-lock-loop before activating to D0A */
1597 if (trans->cfg->base_params->pll_cfg_val)
1598 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1599 trans->cfg->base_params->pll_cfg_val);
1600 #endif
1601
1602 /*
1603 * Set "initialization complete" bit to move adapter from
1604 * D0U* --> D0A* (powered-up active) state.
1605 */
1606 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1607
1608 /*
1609 * Wait for clock stabilization; once stabilized, access to
1610 * device-internal resources is supported, e.g. iwm_write_prph()
1611 * and accesses to uCode SRAM.
1612 */
1613 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1614 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1615 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1616 aprint_error_dev(sc->sc_dev,
1617 "timeout waiting for clock stabilization\n");
1618 err = ETIMEDOUT;
1619 goto out;
1620 }
1621
1622 if (sc->host_interrupt_operation_mode) {
1623 /*
1624 * This is a bit of an abuse - This is needed for 7260 / 3160
1625 * only check host_interrupt_operation_mode even if this is
1626 * not related to host_interrupt_operation_mode.
1627 *
1628 * Enable the oscillator to count wake up time for L1 exit. This
1629 * consumes slightly more power (100uA) - but allows to be sure
1630 * that we wake up from L1 on time.
1631 *
1632 * This looks weird: read twice the same register, discard the
1633 * value, set a bit, and yet again, read that same register
1634 * just to discard the value. But that's the way the hardware
1635 * seems to like it.
1636 */
1637 iwm_read_prph(sc, IWM_OSC_CLK);
1638 iwm_read_prph(sc, IWM_OSC_CLK);
1639 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1640 iwm_read_prph(sc, IWM_OSC_CLK);
1641 iwm_read_prph(sc, IWM_OSC_CLK);
1642 }
1643
1644 /*
1645 * Enable DMA clock and wait for it to stabilize.
1646 *
1647 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1648 * do not disable clocks. This preserves any hardware bits already
1649 * set by default in "CLK_CTRL_REG" after reset.
1650 */
1651 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1652 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1653 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1654 DELAY(20);
1655
1656 /* Disable L1-Active */
1657 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1658 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1659
1660 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1661 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1662 IWM_APMG_RTC_INT_STT_RFKILL);
1663 }
1664 out:
1665 if (err)
1666 aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1667 return err;
1668 }
1669
1670 static void
1671 iwm_apm_stop(struct iwm_softc *sc)
1672 {
1673 /* stop device's busmaster DMA activity */
1674 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1675
1676 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1677 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1678 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1679 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1680 DPRINTF(("iwm apm stop\n"));
1681 }
1682
1683 static int
1684 iwm_start_hw(struct iwm_softc *sc)
1685 {
1686 int err;
1687
1688 err = iwm_prepare_card_hw(sc);
1689 if (err)
1690 return err;
1691
1692 /* Reset the entire device */
1693 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1694 DELAY(10);
1695
1696 err = iwm_apm_init(sc);
1697 if (err)
1698 return err;
1699
1700 iwm_enable_rfkill_int(sc);
1701 iwm_check_rfkill(sc);
1702
1703 return 0;
1704 }
1705
1706 static void
1707 iwm_stop_device(struct iwm_softc *sc)
1708 {
1709 int chnl, ntries;
1710 int qid;
1711
1712 iwm_disable_interrupts(sc);
1713 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1714
1715 /* Deactivate TX scheduler. */
1716 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1717
1718 /* Stop all DMA channels. */
1719 if (iwm_nic_lock(sc)) {
1720 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1721 IWM_WRITE(sc,
1722 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1723 for (ntries = 0; ntries < 200; ntries++) {
1724 uint32_t r;
1725
1726 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1727 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1728 chnl))
1729 break;
1730 DELAY(20);
1731 }
1732 }
1733 iwm_nic_unlock(sc);
1734 }
1735 iwm_disable_rx_dma(sc);
1736
1737 iwm_reset_rx_ring(sc, &sc->rxq);
1738
1739 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1740 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1741
1742 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1743 /* Power-down device's busmaster DMA clocks */
1744 if (iwm_nic_lock(sc)) {
1745 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1746 IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1747 DELAY(5);
1748 iwm_nic_unlock(sc);
1749 }
1750 }
1751
1752 /* Make sure (redundant) we've released our request to stay awake */
1753 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1754 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1755
1756 /* Stop the device, and put it in low power state */
1757 iwm_apm_stop(sc);
1758
1759 /*
1760 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1761 * Clean again the interrupt here
1762 */
1763 iwm_disable_interrupts(sc);
1764
1765 /* Reset the on-board processor. */
1766 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1767
1768 /* Even though we stop the HW we still want the RF kill interrupt. */
1769 iwm_enable_rfkill_int(sc);
1770 iwm_check_rfkill(sc);
1771 }
1772
1773 static void
1774 iwm_nic_config(struct iwm_softc *sc)
1775 {
1776 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1777 uint32_t reg_val = 0;
1778
1779 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1780 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1781 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1782 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1783 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1784 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1785
1786 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1787 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1788 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1789 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1790
1791 /* radio configuration */
1792 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1793 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1794 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1795
1796 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1797
1798 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1799 radio_cfg_step, radio_cfg_dash));
1800
1801 /*
1802 * W/A : NIC is stuck in a reset state after Early PCIe power off
1803 * (PCIe power is lost before PERST# is asserted), causing ME FW
1804 * to lose ownership and not being able to obtain it back.
1805 */
1806 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1807 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1808 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1809 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1810 }
1811 }
1812
1813 static int
1814 iwm_nic_rx_init(struct iwm_softc *sc)
1815 {
1816 if (!iwm_nic_lock(sc))
1817 return EBUSY;
1818
1819 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1820 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1821 0, sc->rxq.stat_dma.size,
1822 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1823
1824 iwm_disable_rx_dma(sc);
1825 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1826 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1827 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1828 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1829
1830 /* Set physical address of RX ring (256-byte aligned). */
1831 IWM_WRITE(sc,
1832 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1833
1834 /* Set physical address of RX status (16-byte aligned). */
1835 IWM_WRITE(sc,
1836 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1837
1838 /* Enable RX. */
1839 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1840 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1841 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1842 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1843 IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1844 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1845 (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1846 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1847
1848 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1849
1850 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1851 if (sc->host_interrupt_operation_mode)
1852 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1853
1854 /*
1855 * This value should initially be 0 (before preparing any RBs),
1856 * and should be 8 after preparing the first 8 RBs (for example).
1857 */
1858 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1859
1860 iwm_nic_unlock(sc);
1861
1862 return 0;
1863 }
1864
1865 static int
1866 iwm_nic_tx_init(struct iwm_softc *sc)
1867 {
1868 int qid;
1869
1870 if (!iwm_nic_lock(sc))
1871 return EBUSY;
1872
1873 /* Deactivate TX scheduler. */
1874 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1875
1876 /* Set physical address of "keep warm" page (16-byte aligned). */
1877 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1878
1879 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1880 struct iwm_tx_ring *txq = &sc->txq[qid];
1881
1882 /* Set physical address of TX ring (256-byte aligned). */
1883 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1884 txq->desc_dma.paddr >> 8);
1885 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1886 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1887 }
1888
1889 iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1890
1891 iwm_nic_unlock(sc);
1892
1893 return 0;
1894 }
1895
1896 static int
1897 iwm_nic_init(struct iwm_softc *sc)
1898 {
1899 int err;
1900
1901 iwm_apm_init(sc);
1902 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1903 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1904 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
1905 ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1906 }
1907
1908 iwm_nic_config(sc);
1909
1910 err = iwm_nic_rx_init(sc);
1911 if (err)
1912 return err;
1913
1914 err = iwm_nic_tx_init(sc);
1915 if (err)
1916 return err;
1917
1918 DPRINTF(("shadow registers enabled\n"));
1919 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1920
1921 return 0;
1922 }
1923
1924 static const uint8_t iwm_ac_to_tx_fifo[] = {
1925 IWM_TX_FIFO_VO,
1926 IWM_TX_FIFO_VI,
1927 IWM_TX_FIFO_BE,
1928 IWM_TX_FIFO_BK,
1929 };
1930
1931 static int
1932 iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1933 {
1934 if (!iwm_nic_lock(sc)) {
1935 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1936 return EBUSY;
1937 }
1938
1939 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1940
1941 if (qid == IWM_CMD_QUEUE) {
1942 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1943 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1944 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1945
1946 iwm_nic_unlock(sc);
1947
1948 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1949
1950 if (!iwm_nic_lock(sc))
1951 return EBUSY;
1952 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1953 iwm_nic_unlock(sc);
1954
1955 iwm_write_mem32(sc,
1956 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1957
1958 /* Set scheduler window size and frame limit. */
1959 iwm_write_mem32(sc,
1960 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1961 sizeof(uint32_t),
1962 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1963 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1964 ((IWM_FRAME_LIMIT
1965 << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1966 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1967
1968 if (!iwm_nic_lock(sc))
1969 return EBUSY;
1970 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1971 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1972 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1973 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1974 IWM_SCD_QUEUE_STTS_REG_MSK);
1975 } else {
1976 struct iwm_scd_txq_cfg_cmd cmd;
1977 int err;
1978
1979 iwm_nic_unlock(sc);
1980
1981 memset(&cmd, 0, sizeof(cmd));
1982 cmd.scd_queue = qid;
1983 cmd.enable = 1;
1984 cmd.sta_id = sta_id;
1985 cmd.tx_fifo = fifo;
1986 cmd.aggregate = 0;
1987 cmd.window = IWM_FRAME_LIMIT;
1988
1989 err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
1990 &cmd);
1991 if (err)
1992 return err;
1993
1994 if (!iwm_nic_lock(sc))
1995 return EBUSY;
1996 }
1997
1998 iwm_write_prph(sc, IWM_SCD_EN_CTRL,
1999 iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2000
2001 iwm_nic_unlock(sc);
2002
2003 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2004
2005 return 0;
2006 }
2007
2008 static int
2009 iwm_post_alive(struct iwm_softc *sc)
2010 {
2011 int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2012 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2013 int err, chnl;
2014 uint32_t base;
2015
2016 if (!iwm_nic_lock(sc))
2017 return EBUSY;
2018
2019 base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2020 if (sc->sched_base != base) {
2021 DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2022 DEVNAME(sc), sc->sched_base, base));
2023 sc->sched_base = base;
2024 }
2025
2026 iwm_nic_unlock(sc);
2027
2028 iwm_ict_reset(sc);
2029
2030 /* Clear TX scheduler state in SRAM. */
2031 err = iwm_write_mem(sc,
2032 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2033 if (err)
2034 return err;
2035
2036 if (!iwm_nic_lock(sc))
2037 return EBUSY;
2038
2039 /* Set physical address of TX scheduler rings (1KB aligned). */
2040 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2041
2042 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2043
2044 iwm_nic_unlock(sc);
2045
2046 /* enable command channel */
2047 err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2048 if (err)
2049 return err;
2050
2051 if (!iwm_nic_lock(sc))
2052 return EBUSY;
2053
2054 /* Activate TX scheduler. */
2055 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2056
2057 /* Enable DMA channels. */
2058 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2059 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2060 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2061 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2062 }
2063
2064 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2065 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2066
2067 /* Enable L1-Active */
2068 if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2069 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2070 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2071 }
2072
2073 iwm_nic_unlock(sc);
2074
2075 return 0;
2076 }
2077
2078 static struct iwm_phy_db_entry *
2079 iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2080 uint16_t chg_id)
2081 {
2082 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2083
2084 if (type >= IWM_PHY_DB_MAX)
2085 return NULL;
2086
2087 switch (type) {
2088 case IWM_PHY_DB_CFG:
2089 return &phy_db->cfg;
2090 case IWM_PHY_DB_CALIB_NCH:
2091 return &phy_db->calib_nch;
2092 case IWM_PHY_DB_CALIB_CHG_PAPD:
2093 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2094 return NULL;
2095 return &phy_db->calib_ch_group_papd[chg_id];
2096 case IWM_PHY_DB_CALIB_CHG_TXP:
2097 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2098 return NULL;
2099 return &phy_db->calib_ch_group_txp[chg_id];
2100 default:
2101 return NULL;
2102 }
2103 return NULL;
2104 }
2105
2106 static int
2107 iwm_phy_db_set_section(struct iwm_softc *sc,
2108 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2109 {
2110 struct iwm_phy_db_entry *entry;
2111 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2112 uint16_t chg_id = 0;
2113
2114 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2115 type == IWM_PHY_DB_CALIB_CHG_TXP)
2116 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2117
2118 entry = iwm_phy_db_get_section(sc, type, chg_id);
2119 if (!entry)
2120 return EINVAL;
2121
2122 if (entry->data)
2123 kmem_intr_free(entry->data, entry->size);
2124 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2125 if (!entry->data) {
2126 entry->size = 0;
2127 return ENOMEM;
2128 }
2129 memcpy(entry->data, phy_db_notif->data, size);
2130 entry->size = size;
2131
2132 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2133 __func__, __LINE__, type, size, entry->data));
2134
2135 return 0;
2136 }
2137
2138 static int
2139 iwm_is_valid_channel(uint16_t ch_id)
2140 {
2141 if (ch_id <= 14 ||
2142 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2143 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2144 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2145 return 1;
2146 return 0;
2147 }
2148
2149 static uint8_t
2150 iwm_ch_id_to_ch_index(uint16_t ch_id)
2151 {
2152 if (!iwm_is_valid_channel(ch_id))
2153 return 0xff;
2154
2155 if (ch_id <= 14)
2156 return ch_id - 1;
2157 if (ch_id <= 64)
2158 return (ch_id + 20) / 4;
2159 if (ch_id <= 140)
2160 return (ch_id - 12) / 4;
2161 return (ch_id - 13) / 4;
2162 }
2163
2164
2165 static uint16_t
2166 iwm_channel_id_to_papd(uint16_t ch_id)
2167 {
2168 if (!iwm_is_valid_channel(ch_id))
2169 return 0xff;
2170
2171 if (1 <= ch_id && ch_id <= 14)
2172 return 0;
2173 if (36 <= ch_id && ch_id <= 64)
2174 return 1;
2175 if (100 <= ch_id && ch_id <= 140)
2176 return 2;
2177 return 3;
2178 }
2179
2180 static uint16_t
2181 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2182 {
2183 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2184 struct iwm_phy_db_chg_txp *txp_chg;
2185 int i;
2186 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2187
2188 if (ch_index == 0xff)
2189 return 0xff;
2190
2191 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2192 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2193 if (!txp_chg)
2194 return 0xff;
2195 /*
2196 * Looking for the first channel group the max channel
2197 * of which is higher than the requested channel.
2198 */
2199 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2200 return i;
2201 }
2202 return 0xff;
2203 }
2204
2205 static int
2206 iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2207 uint16_t *size, uint16_t ch_id)
2208 {
2209 struct iwm_phy_db_entry *entry;
2210 uint16_t ch_group_id = 0;
2211
2212 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2213 ch_group_id = iwm_channel_id_to_papd(ch_id);
2214 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2215 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2216
2217 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2218 if (!entry)
2219 return EINVAL;
2220
2221 *data = entry->data;
2222 *size = entry->size;
2223
2224 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2225 __func__, __LINE__, type, *size));
2226
2227 return 0;
2228 }
2229
2230 static int
2231 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2232 void *data)
2233 {
2234 struct iwm_phy_db_cmd phy_db_cmd;
2235 struct iwm_host_cmd cmd = {
2236 .id = IWM_PHY_DB_CMD,
2237 .flags = IWM_CMD_ASYNC,
2238 };
2239
2240 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2241 type, length));
2242
2243 phy_db_cmd.type = le16toh(type);
2244 phy_db_cmd.length = le16toh(length);
2245
2246 cmd.data[0] = &phy_db_cmd;
2247 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2248 cmd.data[1] = data;
2249 cmd.len[1] = length;
2250
2251 return iwm_send_cmd(sc, &cmd);
2252 }
2253
2254 static int
2255 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2256 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2257 {
2258 uint16_t i;
2259 int err;
2260 struct iwm_phy_db_entry *entry;
2261
2262 /* Send all the channel-specific groups to operational fw */
2263 for (i = 0; i < max_ch_groups; i++) {
2264 entry = iwm_phy_db_get_section(sc, type, i);
2265 if (!entry)
2266 return EINVAL;
2267
2268 if (!entry->size)
2269 continue;
2270
2271 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2272 if (err) {
2273 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2274 "err %d\n", DEVNAME(sc), type, i, err));
2275 return err;
2276 }
2277
2278 DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2279 DEVNAME(sc), type, i));
2280
2281 DELAY(1000);
2282 }
2283
2284 return 0;
2285 }
2286
2287 static int
2288 iwm_send_phy_db_data(struct iwm_softc *sc)
2289 {
2290 uint8_t *data = NULL;
2291 uint16_t size = 0;
2292 int err;
2293
2294 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2295 if (err)
2296 return err;
2297
2298 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2299 if (err)
2300 return err;
2301
2302 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2303 &data, &size, 0);
2304 if (err)
2305 return err;
2306
2307 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2308 if (err)
2309 return err;
2310
2311 err = iwm_phy_db_send_all_channel_groups(sc,
2312 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2313 if (err)
2314 return err;
2315
2316 err = iwm_phy_db_send_all_channel_groups(sc,
2317 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2318 if (err)
2319 return err;
2320
2321 return 0;
2322 }
2323
2324 /*
2325 * For the high priority TE use a time event type that has similar priority to
2326 * the FW's action scan priority.
2327 */
2328 #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2329 #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2330
2331 /* used to convert from time event API v2 to v1 */
2332 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2333 IWM_TE_V2_EVENT_SOCIOPATHIC)
2334 static inline uint16_t
2335 iwm_te_v2_get_notify(uint16_t policy)
2336 {
2337 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2338 }
2339
2340 static inline uint16_t
2341 iwm_te_v2_get_dep_policy(uint16_t policy)
2342 {
2343 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2344 IWM_TE_V2_PLACEMENT_POS;
2345 }
2346
2347 static inline uint16_t
2348 iwm_te_v2_get_absence(uint16_t policy)
2349 {
2350 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2351 }
2352
2353 static void
2354 iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2355 struct iwm_time_event_cmd_v1 *cmd_v1)
2356 {
2357 cmd_v1->id_and_color = cmd_v2->id_and_color;
2358 cmd_v1->action = cmd_v2->action;
2359 cmd_v1->id = cmd_v2->id;
2360 cmd_v1->apply_time = cmd_v2->apply_time;
2361 cmd_v1->max_delay = cmd_v2->max_delay;
2362 cmd_v1->depends_on = cmd_v2->depends_on;
2363 cmd_v1->interval = cmd_v2->interval;
2364 cmd_v1->duration = cmd_v2->duration;
2365 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2366 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2367 else
2368 cmd_v1->repeat = htole32(cmd_v2->repeat);
2369 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2370 cmd_v1->interval_reciprocal = 0; /* unused */
2371
2372 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2373 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2374 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2375 }
2376
2377 static int
2378 iwm_send_time_event_cmd(struct iwm_softc *sc,
2379 const struct iwm_time_event_cmd_v2 *cmd)
2380 {
2381 struct iwm_time_event_cmd_v1 cmd_v1;
2382
2383 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2384 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2385 cmd);
2386
2387 iwm_te_v2_to_v1(cmd, &cmd_v1);
2388 return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2389 &cmd_v1);
2390 }
2391
2392 static void
2393 iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2394 uint32_t duration, uint32_t max_delay)
2395 {
2396 struct iwm_time_event_cmd_v2 time_cmd;
2397
2398 memset(&time_cmd, 0, sizeof(time_cmd));
2399
2400 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2401 time_cmd.id_and_color =
2402 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2403 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2404
2405 time_cmd.apply_time = htole32(0);
2406
2407 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2408 time_cmd.max_delay = htole32(max_delay);
2409 /* TODO: why do we need to interval = bi if it is not periodic? */
2410 time_cmd.interval = htole32(1);
2411 time_cmd.duration = htole32(duration);
2412 time_cmd.repeat = 1;
2413 time_cmd.policy
2414 = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2415 IWM_TE_V2_NOTIF_HOST_EVENT_END |
2416 IWM_T2_V2_START_IMMEDIATELY);
2417
2418 iwm_send_time_event_cmd(sc, &time_cmd);
2419 }
2420
2421 /*
2422 * NVM read access and content parsing. We do not support
2423 * external NVM or writing NVM.
2424 */
2425
2426 /* list of NVM sections we are allowed/need to read */
2427 static const int iwm_nvm_to_read[] = {
2428 IWM_NVM_SECTION_TYPE_HW,
2429 IWM_NVM_SECTION_TYPE_SW,
2430 IWM_NVM_SECTION_TYPE_REGULATORY,
2431 IWM_NVM_SECTION_TYPE_CALIBRATION,
2432 IWM_NVM_SECTION_TYPE_PRODUCTION,
2433 IWM_NVM_SECTION_TYPE_HW_8000,
2434 IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2435 IWM_NVM_SECTION_TYPE_PHY_SKU,
2436 };
2437
2438 /* Default NVM size to read */
2439 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2440 #define IWM_MAX_NVM_SECTION_SIZE_7000 (16 * 512 * sizeof(uint16_t)) /*16 KB*/
2441 #define IWM_MAX_NVM_SECTION_SIZE_8000 (32 * 512 * sizeof(uint16_t)) /*32 KB*/
2442
2443 #define IWM_NVM_WRITE_OPCODE 1
2444 #define IWM_NVM_READ_OPCODE 0
2445
2446 static int
2447 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2448 uint16_t length, uint8_t *data, uint16_t *len)
2449 {
2450 offset = 0;
2451 struct iwm_nvm_access_cmd nvm_access_cmd = {
2452 .offset = htole16(offset),
2453 .length = htole16(length),
2454 .type = htole16(section),
2455 .op_code = IWM_NVM_READ_OPCODE,
2456 };
2457 struct iwm_nvm_access_resp *nvm_resp;
2458 struct iwm_rx_packet *pkt;
2459 struct iwm_host_cmd cmd = {
2460 .id = IWM_NVM_ACCESS_CMD,
2461 .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2462 .data = { &nvm_access_cmd, },
2463 };
2464 int err, offset_read;
2465 size_t bytes_read;
2466 uint8_t *resp_data;
2467
2468 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2469
2470 err = iwm_send_cmd(sc, &cmd);
2471 if (err) {
2472 DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2473 DEVNAME(sc), err));
2474 return err;
2475 }
2476
2477 pkt = cmd.resp_pkt;
2478 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2479 err = EIO;
2480 goto exit;
2481 }
2482
2483 /* Extract NVM response */
2484 nvm_resp = (void *)pkt->data;
2485
2486 err = le16toh(nvm_resp->status);
2487 bytes_read = le16toh(nvm_resp->length);
2488 offset_read = le16toh(nvm_resp->offset);
2489 resp_data = nvm_resp->data;
2490 if (err) {
2491 err = EINVAL;
2492 goto exit;
2493 }
2494
2495 if (offset_read != offset) {
2496 err = EINVAL;
2497 goto exit;
2498 }
2499 if (bytes_read > length) {
2500 err = EINVAL;
2501 goto exit;
2502 }
2503
2504 memcpy(data + offset, resp_data, bytes_read);
2505 *len = bytes_read;
2506
2507 exit:
2508 iwm_free_resp(sc, &cmd);
2509 return err;
2510 }
2511
2512 /*
2513 * Reads an NVM section completely.
2514 * NICs prior to 7000 family doesn't have a real NVM, but just read
2515 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2516 * by uCode, we need to manually check in this case that we don't
2517 * overflow and try to read more than the EEPROM size.
2518 */
2519 static int
2520 iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2521 uint16_t *len, size_t max_len)
2522 {
2523 uint16_t chunklen, seglen;
2524 int err;
2525
2526 chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2527 *len = 0;
2528
2529 /* Read NVM chunks until exhausted (reading less than requested) */
2530 while (seglen == chunklen && *len < max_len) {
2531 err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2532 &seglen);
2533 if (err) {
2534 DPRINTF(("%s: Cannot read NVM from section %d "
2535 "offset %d, length %d\n",
2536 DEVNAME(sc), section, *len, chunklen));
2537 return err;
2538 }
2539 *len += seglen;
2540 }
2541
2542 DPRINTFN(4, ("NVM section %d read completed\n", section));
2543 return 0;
2544 }
2545
2546 static uint8_t
2547 iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2548 {
2549 uint8_t tx_ant;
2550
2551 tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2552 >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2553
2554 if (sc->sc_nvm.valid_tx_ant)
2555 tx_ant &= sc->sc_nvm.valid_tx_ant;
2556
2557 return tx_ant;
2558 }
2559
2560 static uint8_t
2561 iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2562 {
2563 uint8_t rx_ant;
2564
2565 rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2566 >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2567
2568 if (sc->sc_nvm.valid_rx_ant)
2569 rx_ant &= sc->sc_nvm.valid_rx_ant;
2570
2571 return rx_ant;
2572 }
2573
2574 static void
2575 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2576 const uint8_t *nvm_channels, size_t nchan)
2577 {
2578 struct ieee80211com *ic = &sc->sc_ic;
2579 struct iwm_nvm_data *data = &sc->sc_nvm;
2580 int ch_idx;
2581 struct ieee80211_channel *channel;
2582 uint16_t ch_flags;
2583 int is_5ghz;
2584 int flags, hw_value;
2585
2586 for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2587 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2588 aprint_debug_dev(sc->sc_dev,
2589 "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2590 " %cwide %c40MHz %c80MHz %c160MHz\n",
2591 nvm_channels[ch_idx],
2592 ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2593 ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2594 ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2595 ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2596 ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2597 ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2598 ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2599 ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2600 ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2601
2602 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2603 !data->sku_cap_band_52GHz_enable)
2604 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2605
2606 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2607 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2608 nvm_channels[ch_idx], ch_flags,
2609 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2610 continue;
2611 }
2612
2613 hw_value = nvm_channels[ch_idx];
2614 channel = &ic->ic_channels[hw_value];
2615
2616 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2617 if (!is_5ghz) {
2618 flags = IEEE80211_CHAN_2GHZ;
2619 channel->ic_flags
2620 = IEEE80211_CHAN_CCK
2621 | IEEE80211_CHAN_OFDM
2622 | IEEE80211_CHAN_DYN
2623 | IEEE80211_CHAN_2GHZ;
2624 } else {
2625 flags = IEEE80211_CHAN_5GHZ;
2626 channel->ic_flags =
2627 IEEE80211_CHAN_A;
2628 }
2629 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2630
2631 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2632 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2633
2634 #ifndef IEEE80211_NO_HT
2635 if (data->sku_cap_11n_enable)
2636 channel->ic_flags |= IEEE80211_CHAN_HT;
2637 #endif
2638 }
2639 }
2640
2641 #ifndef IEEE80211_NO_HT
2642 static void
2643 iwm_setup_ht_rates(struct iwm_softc *sc)
2644 {
2645 struct ieee80211com *ic = &sc->sc_ic;
2646
2647 /* TX is supported with the same MCS as RX. */
2648 ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2649
2650 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2651
2652 #ifdef notyet
2653 if (sc->sc_nvm.sku_cap_mimo_disable)
2654 return;
2655
2656 if (iwm_fw_valid_rx_ant(sc) > 1)
2657 ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2658 if (iwm_fw_valid_rx_ant(sc) > 2)
2659 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2660 #endif
2661 }
2662
2663 #define IWM_MAX_RX_BA_SESSIONS 16
2664
2665 static void
2666 iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2667 uint16_t ssn, int start)
2668 {
2669 struct ieee80211com *ic = &sc->sc_ic;
2670 struct iwm_add_sta_cmd_v7 cmd;
2671 struct iwm_node *in = (struct iwm_node *)ni;
2672 int err, s;
2673 uint32_t status;
2674
2675 if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2676 ieee80211_addba_req_refuse(ic, ni, tid);
2677 return;
2678 }
2679
2680 memset(&cmd, 0, sizeof(cmd));
2681
2682 cmd.sta_id = IWM_STATION_ID;
2683 cmd.mac_id_n_color
2684 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2685 cmd.add_modify = IWM_STA_MODE_MODIFY;
2686
2687 if (start) {
2688 cmd.add_immediate_ba_tid = (uint8_t)tid;
2689 cmd.add_immediate_ba_ssn = ssn;
2690 } else {
2691 cmd.remove_immediate_ba_tid = (uint8_t)tid;
2692 }
2693 cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2694 IWM_STA_MODIFY_REMOVE_BA_TID;
2695
2696 status = IWM_ADD_STA_SUCCESS;
2697 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2698 &status);
2699
2700 s = splnet();
2701 if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2702 if (start) {
2703 sc->sc_rx_ba_sessions++;
2704 ieee80211_addba_req_accept(ic, ni, tid);
2705 } else if (sc->sc_rx_ba_sessions > 0)
2706 sc->sc_rx_ba_sessions--;
2707 } else if (start)
2708 ieee80211_addba_req_refuse(ic, ni, tid);
2709
2710 splx(s);
2711 }
2712
2713 static void
2714 iwm_htprot_task(void *arg)
2715 {
2716 struct iwm_softc *sc = arg;
2717 struct ieee80211com *ic = &sc->sc_ic;
2718 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2719 int err;
2720
2721 /* This call updates HT protection based on in->in_ni.ni_htop1. */
2722 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2723 if (err)
2724 aprint_error_dev(sc->sc_dev,
2725 "could not change HT protection: error %d\n", err);
2726 }
2727
2728 /*
2729 * This function is called by upper layer when HT protection settings in
2730 * beacons have changed.
2731 */
2732 static void
2733 iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2734 {
2735 struct iwm_softc *sc = ic->ic_softc;
2736
2737 /* assumes that ni == ic->ic_bss */
2738 task_add(systq, &sc->htprot_task);
2739 }
2740
2741 static void
2742 iwm_ba_task(void *arg)
2743 {
2744 struct iwm_softc *sc = arg;
2745 struct ieee80211com *ic = &sc->sc_ic;
2746 struct ieee80211_node *ni = ic->ic_bss;
2747
2748 if (sc->ba_start)
2749 iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2750 else
2751 iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2752 }
2753
2754 /*
2755 * This function is called by upper layer when an ADDBA request is received
2756 * from another STA and before the ADDBA response is sent.
2757 */
2758 static int
2759 iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2760 uint8_t tid)
2761 {
2762 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2763 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2764
2765 if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2766 return ENOSPC;
2767
2768 sc->ba_start = 1;
2769 sc->ba_tid = tid;
2770 sc->ba_ssn = htole16(ba->ba_winstart);
2771 task_add(systq, &sc->ba_task);
2772
2773 return EBUSY;
2774 }
2775
2776 /*
2777 * This function is called by upper layer on teardown of an HT-immediate
2778 * Block Ack agreement (eg. upon receipt of a DELBA frame).
2779 */
2780 static void
2781 iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2782 uint8_t tid)
2783 {
2784 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2785
2786 sc->ba_start = 0;
2787 sc->ba_tid = tid;
2788 task_add(systq, &sc->ba_task);
2789 }
2790 #endif
2791
2792 static void
2793 iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
2794 const uint16_t *mac_override, const uint16_t *nvm_hw)
2795 {
2796 static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
2797 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2798 };
2799 static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
2800 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
2801 };
2802 const uint8_t *hw_addr;
2803
2804 if (mac_override) {
2805 hw_addr = (const uint8_t *)(mac_override +
2806 IWM_MAC_ADDRESS_OVERRIDE_8000);
2807
2808 /*
2809 * Store the MAC address from MAO section.
2810 * No byte swapping is required in MAO section
2811 */
2812 memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
2813
2814 /*
2815 * Force the use of the OTP MAC address in case of reserved MAC
2816 * address in the NVM, or if address is given but invalid.
2817 */
2818 if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
2819 (memcmp(etherbroadcastaddr, data->hw_addr,
2820 sizeof(etherbroadcastaddr)) != 0) &&
2821 (memcmp(etheranyaddr, data->hw_addr,
2822 sizeof(etheranyaddr)) != 0) &&
2823 !ETHER_IS_MULTICAST(data->hw_addr))
2824 return;
2825 }
2826
2827 if (nvm_hw) {
2828 /* Read the mac address from WFMP registers. */
2829 uint32_t mac_addr0 =
2830 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2831 uint32_t mac_addr1 =
2832 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2833
2834 hw_addr = (const uint8_t *)&mac_addr0;
2835 data->hw_addr[0] = hw_addr[3];
2836 data->hw_addr[1] = hw_addr[2];
2837 data->hw_addr[2] = hw_addr[1];
2838 data->hw_addr[3] = hw_addr[0];
2839
2840 hw_addr = (const uint8_t *)&mac_addr1;
2841 data->hw_addr[4] = hw_addr[1];
2842 data->hw_addr[5] = hw_addr[0];
2843
2844 return;
2845 }
2846
2847 aprint_error_dev(sc->sc_dev, "mac address not found\n");
2848 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2849 }
2850
2851 static int
2852 iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
2853 const uint16_t *nvm_sw, const uint16_t *nvm_calib,
2854 const uint16_t *mac_override, const uint16_t *phy_sku,
2855 const uint16_t *regulatory)
2856 {
2857 struct iwm_nvm_data *data = &sc->sc_nvm;
2858 uint8_t hw_addr[ETHER_ADDR_LEN];
2859 uint32_t sku;
2860
2861 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2862 uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2863 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2864 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2865 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2866 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2867
2868 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2869 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2870 } else {
2871 uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
2872 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
2873 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
2874 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
2875 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
2876 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
2877 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
2878
2879 data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
2880 sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
2881 }
2882
2883 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2884 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2885 data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
2886 data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
2887
2888 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2889
2890 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2891 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2892 data->hw_addr[0] = hw_addr[1];
2893 data->hw_addr[1] = hw_addr[0];
2894 data->hw_addr[2] = hw_addr[3];
2895 data->hw_addr[3] = hw_addr[2];
2896 data->hw_addr[4] = hw_addr[5];
2897 data->hw_addr[5] = hw_addr[4];
2898 } else
2899 iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
2900
2901 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2902 uint16_t lar_offset, lar_config;
2903 lar_offset = data->nvm_version < 0xE39 ?
2904 IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
2905 lar_config = le16_to_cpup(regulatory + lar_offset);
2906 data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
2907 }
2908
2909 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
2910 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
2911 iwm_nvm_channels, __arraycount(iwm_nvm_channels));
2912 else
2913 iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
2914 iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
2915
2916 data->calib_version = 255; /* TODO:
2917 this value will prevent some checks from
2918 failing, we need to check if this
2919 field is still needed, and if it does,
2920 where is it in the NVM */
2921
2922 return 0;
2923 }
2924
2925 static int
2926 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2927 {
2928 const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
2929 const uint16_t *regulatory = NULL;
2930
2931 /* Checking for required sections */
2932 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2933 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2934 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2935 return ENOENT;
2936 }
2937
2938 hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
2939 } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
2940 /* SW and REGULATORY sections are mandatory */
2941 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2942 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2943 return ENOENT;
2944 }
2945 /* MAC_OVERRIDE or at least HW section must exist */
2946 if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
2947 !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
2948 return ENOENT;
2949 }
2950
2951 /* PHY_SKU section is mandatory in B0 */
2952 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2953 return ENOENT;
2954 }
2955
2956 regulatory = (const uint16_t *)
2957 sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2958 hw = (const uint16_t *)
2959 sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
2960 mac_override =
2961 (const uint16_t *)
2962 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2963 phy_sku = (const uint16_t *)
2964 sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2965 } else {
2966 panic("unknown device family %d\n", sc->sc_device_family);
2967 }
2968
2969 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2970 calib = (const uint16_t *)
2971 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2972
2973 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2974 phy_sku, regulatory);
2975 }
2976
2977 static int
2978 iwm_nvm_init(struct iwm_softc *sc)
2979 {
2980 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2981 int i, section, err;
2982 uint16_t len;
2983 uint8_t *buf;
2984 const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
2985 IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
2986
2987 /* Read From FW NVM */
2988 DPRINTF(("Read NVM\n"));
2989
2990 memset(nvm_sections, 0, sizeof(nvm_sections));
2991
2992 buf = kmem_alloc(bufsz, KM_SLEEP);
2993 if (buf == NULL)
2994 return ENOMEM;
2995
2996 for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
2997 section = iwm_nvm_to_read[i];
2998 KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
2999
3000 err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3001 if (err) {
3002 err = 0;
3003 continue;
3004 }
3005 nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3006 if (nvm_sections[section].data == NULL) {
3007 err = ENOMEM;
3008 break;
3009 }
3010 memcpy(nvm_sections[section].data, buf, len);
3011 nvm_sections[section].length = len;
3012 }
3013 kmem_free(buf, bufsz);
3014 if (err == 0)
3015 err = iwm_parse_nvm_sections(sc, nvm_sections);
3016
3017 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3018 if (nvm_sections[i].data != NULL)
3019 kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3020 }
3021
3022 return err;
3023 }
3024
3025 static int
3026 iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3027 const uint8_t *section, uint32_t byte_cnt)
3028 {
3029 int err = EINVAL;
3030 uint32_t chunk_sz, offset;
3031
3032 chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3033
3034 for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3035 uint32_t addr, len;
3036 const uint8_t *data;
3037 bool is_extended = false;
3038
3039 addr = dst_addr + offset;
3040 len = MIN(chunk_sz, byte_cnt - offset);
3041 data = section + offset;
3042
3043 if (addr >= IWM_FW_MEM_EXTENDED_START &&
3044 addr <= IWM_FW_MEM_EXTENDED_END)
3045 is_extended = true;
3046
3047 if (is_extended)
3048 iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3049 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3050
3051 err = iwm_firmware_load_chunk(sc, addr, data, len);
3052
3053 if (is_extended)
3054 iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3055 IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3056
3057 if (err)
3058 break;
3059 }
3060
3061 return err;
3062 }
3063
3064 static int
3065 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3066 const uint8_t *section, uint32_t byte_cnt)
3067 {
3068 struct iwm_dma_info *dma = &sc->fw_dma;
3069 int err;
3070
3071 /* Copy firmware chunk into pre-allocated DMA-safe memory. */
3072 memcpy(dma->vaddr, section, byte_cnt);
3073 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3074 BUS_DMASYNC_PREWRITE);
3075
3076 sc->sc_fw_chunk_done = 0;
3077
3078 if (!iwm_nic_lock(sc))
3079 return EBUSY;
3080
3081 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3082 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3083 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3084 dst_addr);
3085 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3086 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3087 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3088 (iwm_get_dma_hi_addr(dma->paddr)
3089 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3090 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3091 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3092 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3093 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3094 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3095 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3096 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3097 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3098
3099 iwm_nic_unlock(sc);
3100
3101 /* Wait for this segment to load. */
3102 err = 0;
3103 while (!sc->sc_fw_chunk_done) {
3104 err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3105 if (err)
3106 break;
3107 }
3108 if (!sc->sc_fw_chunk_done) {
3109 DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3110 DEVNAME(sc), dst_addr, byte_cnt));
3111 }
3112
3113 return err;
3114 }
3115
3116 static int
3117 iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3118 {
3119 struct iwm_fw_sects *fws;
3120 int err, i;
3121 void *data;
3122 uint32_t dlen;
3123 uint32_t offset;
3124
3125 fws = &sc->sc_fw.fw_sects[ucode_type];
3126 for (i = 0; i < fws->fw_count; i++) {
3127 data = fws->fw_sect[i].fws_data;
3128 dlen = fws->fw_sect[i].fws_len;
3129 offset = fws->fw_sect[i].fws_devoff;
3130 if (dlen > sc->sc_fwdmasegsz) {
3131 err = EFBIG;
3132 } else
3133 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3134 if (err) {
3135 DPRINTF(("%s: could not load firmware chunk %u of %u\n",
3136 DEVNAME(sc), i, fws->fw_count));
3137 return err;
3138 }
3139 }
3140
3141 IWM_WRITE(sc, IWM_CSR_RESET, 0);
3142
3143 return 0;
3144 }
3145
3146 static int
3147 iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3148 int cpu, int *first_ucode_section)
3149 {
3150 int shift_param;
3151 int i, err = 0, sec_num = 0x1;
3152 uint32_t val, last_read_idx = 0;
3153 void *data;
3154 uint32_t dlen;
3155 uint32_t offset;
3156
3157 if (cpu == 1) {
3158 shift_param = 0;
3159 *first_ucode_section = 0;
3160 } else {
3161 shift_param = 16;
3162 (*first_ucode_section)++;
3163 }
3164
3165 for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3166 last_read_idx = i;
3167 data = fws->fw_sect[i].fws_data;
3168 dlen = fws->fw_sect[i].fws_len;
3169 offset = fws->fw_sect[i].fws_devoff;
3170
3171 /*
3172 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3173 * CPU1 to CPU2.
3174 * PAGING_SEPARATOR_SECTION delimiter - separate between
3175 * CPU2 non paged to CPU2 paging sec.
3176 */
3177 if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3178 offset == IWM_PAGING_SEPARATOR_SECTION)
3179 break;
3180
3181 if (dlen > sc->sc_fwdmasegsz) {
3182 err = EFBIG;
3183 } else
3184 err = iwm_firmware_load_sect(sc, offset, data, dlen);
3185 if (err) {
3186 DPRINTF(("%s: could not load firmware chunk %d "
3187 "(error %d)\n", DEVNAME(sc), i, err));
3188 return err;
3189 }
3190
3191 /* Notify the ucode of the loaded section number and status */
3192 if (iwm_nic_lock(sc)) {
3193 val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3194 val = val | (sec_num << shift_param);
3195 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3196 sec_num = (sec_num << 1) | 0x1;
3197 iwm_nic_unlock(sc);
3198
3199 /*
3200 * The firmware won't load correctly without this delay.
3201 */
3202 DELAY(8000);
3203 }
3204 }
3205
3206 *first_ucode_section = last_read_idx;
3207
3208 if (iwm_nic_lock(sc)) {
3209 if (cpu == 1)
3210 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3211 else
3212 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3213 iwm_nic_unlock(sc);
3214 }
3215
3216 return 0;
3217 }
3218
3219 static int
3220 iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3221 {
3222 struct iwm_fw_sects *fws;
3223 int err = 0;
3224 int first_ucode_section;
3225
3226 fws = &sc->sc_fw.fw_sects[ucode_type];
3227
3228 /* configure the ucode to be ready to get the secured image */
3229 /* release CPU reset */
3230 if (iwm_nic_lock(sc)) {
3231 iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3232 IWM_RELEASE_CPU_RESET_BIT);
3233 iwm_nic_unlock(sc);
3234 }
3235
3236 /* load to FW the binary Secured sections of CPU1 */
3237 err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3238 if (err)
3239 return err;
3240
3241 /* load to FW the binary sections of CPU2 */
3242 return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3243 }
3244
3245 static int
3246 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3247 {
3248 int err, w;
3249
3250 sc->sc_uc.uc_intr = 0;
3251
3252 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3253 err = iwm_load_firmware_8000(sc, ucode_type);
3254 else
3255 err = iwm_load_firmware_7000(sc, ucode_type);
3256 if (err)
3257 return err;
3258
3259 /* wait for the firmware to load */
3260 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3261 err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3262 if (err || !sc->sc_uc.uc_ok) {
3263 aprint_error_dev(sc->sc_dev,
3264 "could not load firmware (error %d, ok %d)\n",
3265 err, sc->sc_uc.uc_ok);
3266 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3267 aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3268 iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3269 aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3270 iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3271 }
3272 }
3273
3274 return err;
3275 }
3276
3277 static int
3278 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3279 {
3280 int err;
3281
3282 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3283
3284 err = iwm_nic_init(sc);
3285 if (err) {
3286 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3287 return err;
3288 }
3289
3290 /* make sure rfkill handshake bits are cleared */
3291 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3292 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3293 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3294
3295 /* clear (again), then enable host interrupts */
3296 IWM_WRITE(sc, IWM_CSR_INT, ~0);
3297 iwm_enable_interrupts(sc);
3298
3299 /* really make sure rfkill handshake bits are cleared */
3300 /* maybe we should write a few times more? just to make sure */
3301 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3302 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3303
3304 return iwm_load_firmware(sc, ucode_type);
3305 }
3306
3307 static int
3308 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3309 {
3310 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3311 .valid = htole32(valid_tx_ant),
3312 };
3313
3314 return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3315 sizeof(tx_ant_cmd), &tx_ant_cmd);
3316 }
3317
3318 static int
3319 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3320 {
3321 struct iwm_phy_cfg_cmd phy_cfg_cmd;
3322 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3323
3324 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3325 phy_cfg_cmd.calib_control.event_trigger =
3326 sc->sc_default_calib[ucode_type].event_trigger;
3327 phy_cfg_cmd.calib_control.flow_trigger =
3328 sc->sc_default_calib[ucode_type].flow_trigger;
3329
3330 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3331 return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3332 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3333 }
3334
3335 static int
3336 iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3337 {
3338 enum iwm_ucode_type old_type = sc->sc_uc_current;
3339 int err;
3340
3341 err = iwm_read_firmware(sc, ucode_type);
3342 if (err)
3343 return err;
3344
3345 sc->sc_uc_current = ucode_type;
3346 err = iwm_start_fw(sc, ucode_type);
3347 if (err) {
3348 sc->sc_uc_current = old_type;
3349 return err;
3350 }
3351
3352 return iwm_post_alive(sc);
3353 }
3354
3355 static int
3356 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3357 {
3358 int err;
3359
3360 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3361 aprint_error_dev(sc->sc_dev,
3362 "radio is disabled by hardware switch\n");
3363 return EPERM;
3364 }
3365
3366 sc->sc_init_complete = 0;
3367 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3368 if (err) {
3369 DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3370 return err;
3371 }
3372
3373 if (justnvm) {
3374 err = iwm_nvm_init(sc);
3375 if (err) {
3376 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3377 return err;
3378 }
3379
3380 memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3381 ETHER_ADDR_LEN);
3382 return 0;
3383 }
3384
3385 err = iwm_send_bt_init_conf(sc);
3386 if (err)
3387 return err;
3388
3389 err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3390 if (err)
3391 return err;
3392
3393 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3394 if (err)
3395 return err;
3396
3397 /*
3398 * Send phy configurations command to init uCode
3399 * to start the 16.0 uCode init image internal calibrations.
3400 */
3401 err = iwm_send_phy_cfg_cmd(sc);
3402 if (err)
3403 return err;
3404
3405 /*
3406 * Nothing to do but wait for the init complete notification
3407 * from the firmware
3408 */
3409 while (!sc->sc_init_complete) {
3410 err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3411 if (err)
3412 break;
3413 }
3414
3415 return err;
3416 }
3417
3418 static int
3419 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3420 {
3421 struct iwm_rx_ring *ring = &sc->rxq;
3422 struct iwm_rx_data *data = &ring->data[idx];
3423 struct mbuf *m;
3424 int err;
3425 int fatal = 0;
3426
3427 m = m_gethdr(M_DONTWAIT, MT_DATA);
3428 if (m == NULL)
3429 return ENOBUFS;
3430
3431 if (size <= MCLBYTES) {
3432 MCLGET(m, M_DONTWAIT);
3433 } else {
3434 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3435 }
3436 if ((m->m_flags & M_EXT) == 0) {
3437 m_freem(m);
3438 return ENOBUFS;
3439 }
3440
3441 if (data->m != NULL) {
3442 bus_dmamap_unload(sc->sc_dmat, data->map);
3443 fatal = 1;
3444 }
3445
3446 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3447 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3448 BUS_DMA_READ|BUS_DMA_NOWAIT);
3449 if (err) {
3450 /* XXX */
3451 if (fatal)
3452 panic("iwm: could not load RX mbuf");
3453 m_freem(m);
3454 return err;
3455 }
3456 data->m = m;
3457 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3458
3459 /* Update RX descriptor. */
3460 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3461 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3462 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3463
3464 return 0;
3465 }
3466
3467 #define IWM_RSSI_OFFSET 50
3468 static int
3469 iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3470 {
3471 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3472 uint32_t agc_a, agc_b;
3473 uint32_t val;
3474
3475 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3476 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3477 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3478
3479 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3480 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3481 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3482
3483 /*
3484 * dBm = rssi dB - agc dB - constant.
3485 * Higher AGC (higher radio gain) means lower signal.
3486 */
3487 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3488 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3489 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3490
3491 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3492 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3493
3494 return max_rssi_dbm;
3495 }
3496
3497 /*
3498 * RSSI values are reported by the FW as positive values - need to negate
3499 * to obtain their dBM. Account for missing antennas by replacing 0
3500 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3501 */
3502 static int
3503 iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3504 {
3505 int energy_a, energy_b, energy_c, max_energy;
3506 uint32_t val;
3507
3508 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3509 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3510 IWM_RX_INFO_ENERGY_ANT_A_POS;
3511 energy_a = energy_a ? -energy_a : -256;
3512 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3513 IWM_RX_INFO_ENERGY_ANT_B_POS;
3514 energy_b = energy_b ? -energy_b : -256;
3515 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3516 IWM_RX_INFO_ENERGY_ANT_C_POS;
3517 energy_c = energy_c ? -energy_c : -256;
3518 max_energy = MAX(energy_a, energy_b);
3519 max_energy = MAX(max_energy, energy_c);
3520
3521 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3522 energy_a, energy_b, energy_c, max_energy));
3523
3524 return max_energy;
3525 }
3526
3527 static void
3528 iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3529 struct iwm_rx_data *data)
3530 {
3531 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3532
3533 DPRINTFN(20, ("received PHY stats\n"));
3534 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3535 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3536
3537 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3538 }
3539
3540 /*
3541 * Retrieve the average noise (in dBm) among receivers.
3542 */
3543 static int
3544 iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3545 {
3546 int i, total, nbant, noise;
3547
3548 total = nbant = noise = 0;
3549 for (i = 0; i < 3; i++) {
3550 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3551 if (noise) {
3552 total += noise;
3553 nbant++;
3554 }
3555 }
3556
3557 /* There should be at least one antenna but check anyway. */
3558 return (nbant == 0) ? -127 : (total / nbant) - 107;
3559 }
3560
3561 static void
3562 iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3563 struct iwm_rx_data *data)
3564 {
3565 struct ieee80211com *ic = &sc->sc_ic;
3566 struct ieee80211_frame *wh;
3567 struct ieee80211_node *ni;
3568 struct ieee80211_channel *c = NULL;
3569 struct mbuf *m;
3570 struct iwm_rx_phy_info *phy_info;
3571 struct iwm_rx_mpdu_res_start *rx_res;
3572 int device_timestamp;
3573 uint32_t len;
3574 uint32_t rx_pkt_status;
3575 int rssi;
3576 int s;
3577
3578 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3579 BUS_DMASYNC_POSTREAD);
3580
3581 phy_info = &sc->sc_last_phy_info;
3582 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3583 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3584 len = le16toh(rx_res->byte_count);
3585 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3586 sizeof(*rx_res) + len));
3587
3588 m = data->m;
3589 m->m_data = pkt->data + sizeof(*rx_res);
3590 m->m_pkthdr.len = m->m_len = len;
3591
3592 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3593 DPRINTF(("dsp size out of range [0,20]: %d\n",
3594 phy_info->cfg_phy_cnt));
3595 return;
3596 }
3597
3598 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3599 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3600 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3601 return; /* drop */
3602 }
3603
3604 device_timestamp = le32toh(phy_info->system_timestamp);
3605
3606 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3607 rssi = iwm_get_signal_strength(sc, phy_info);
3608 } else {
3609 rssi = iwm_calc_rssi(sc, phy_info);
3610 }
3611 rssi = -rssi;
3612
3613 if (ic->ic_state == IEEE80211_S_SCAN)
3614 iwm_fix_channel(sc, m);
3615
3616 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3617 return;
3618
3619 m_set_rcvif(m, IC2IFP(ic));
3620
3621 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3622 c = &ic->ic_channels[le32toh(phy_info->channel)];
3623
3624 s = splnet();
3625
3626 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3627 if (c)
3628 ni->ni_chan = c;
3629
3630 if (__predict_false(sc->sc_drvbpf != NULL)) {
3631 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3632
3633 tap->wr_flags = 0;
3634 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3635 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3636 tap->wr_chan_freq =
3637 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3638 tap->wr_chan_flags =
3639 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3640 tap->wr_dbm_antsignal = (int8_t)rssi;
3641 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3642 tap->wr_tsft = phy_info->system_timestamp;
3643 if (phy_info->phy_flags &
3644 htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
3645 uint8_t mcs = (phy_info->rate_n_flags &
3646 htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
3647 IWM_RATE_HT_MCS_NSS_MSK));
3648 tap->wr_rate = (0x80 | mcs);
3649 } else {
3650 uint8_t rate = (phy_info->rate_n_flags &
3651 htole32(IWM_RATE_LEGACY_RATE_MSK));
3652 switch (rate) {
3653 /* CCK rates. */
3654 case 10: tap->wr_rate = 2; break;
3655 case 20: tap->wr_rate = 4; break;
3656 case 55: tap->wr_rate = 11; break;
3657 case 110: tap->wr_rate = 22; break;
3658 /* OFDM rates. */
3659 case 0xd: tap->wr_rate = 12; break;
3660 case 0xf: tap->wr_rate = 18; break;
3661 case 0x5: tap->wr_rate = 24; break;
3662 case 0x7: tap->wr_rate = 36; break;
3663 case 0x9: tap->wr_rate = 48; break;
3664 case 0xb: tap->wr_rate = 72; break;
3665 case 0x1: tap->wr_rate = 96; break;
3666 case 0x3: tap->wr_rate = 108; break;
3667 /* Unknown rate: should not happen. */
3668 default: tap->wr_rate = 0;
3669 }
3670 }
3671
3672 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3673 }
3674 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3675 ieee80211_free_node(ni);
3676
3677 splx(s);
3678 }
3679
3680 static void
3681 iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3682 struct iwm_node *in)
3683 {
3684 struct ieee80211com *ic = &sc->sc_ic;
3685 struct ifnet *ifp = IC2IFP(ic);
3686 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3687 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3688 int failack = tx_resp->failure_frame;
3689
3690 KASSERT(tx_resp->frame_count == 1);
3691
3692 /* Update rate control statistics. */
3693 in->in_amn.amn_txcnt++;
3694 if (failack > 0) {
3695 in->in_amn.amn_retrycnt++;
3696 }
3697
3698 if (status != IWM_TX_STATUS_SUCCESS &&
3699 status != IWM_TX_STATUS_DIRECT_DONE)
3700 ifp->if_oerrors++;
3701 else
3702 ifp->if_opackets++;
3703 }
3704
3705 static void
3706 iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3707 struct iwm_rx_data *data)
3708 {
3709 struct ieee80211com *ic = &sc->sc_ic;
3710 struct ifnet *ifp = IC2IFP(ic);
3711 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3712 int idx = cmd_hdr->idx;
3713 int qid = cmd_hdr->qid;
3714 struct iwm_tx_ring *ring = &sc->txq[qid];
3715 struct iwm_tx_data *txd = &ring->data[idx];
3716 struct iwm_node *in = txd->in;
3717 int s;
3718
3719 s = splnet();
3720
3721 if (txd->done) {
3722 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3723 DEVNAME(sc)));
3724 splx(s);
3725 return;
3726 }
3727
3728 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3729 BUS_DMASYNC_POSTREAD);
3730
3731 sc->sc_tx_timer = 0;
3732
3733 iwm_rx_tx_cmd_single(sc, pkt, in);
3734
3735 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3736 BUS_DMASYNC_POSTWRITE);
3737 bus_dmamap_unload(sc->sc_dmat, txd->map);
3738 m_freem(txd->m);
3739
3740 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3741 KASSERT(txd->done == 0);
3742 txd->done = 1;
3743 KASSERT(txd->in);
3744
3745 txd->m = NULL;
3746 txd->in = NULL;
3747 ieee80211_free_node(&in->in_ni);
3748
3749 if (--ring->queued < IWM_TX_RING_LOMARK) {
3750 sc->qfullmsk &= ~(1 << qid);
3751 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3752 ifp->if_flags &= ~IFF_OACTIVE;
3753 if_start_lock(ifp);
3754 }
3755 }
3756
3757 splx(s);
3758 }
3759
3760 static int
3761 iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3762 {
3763 struct iwm_binding_cmd cmd;
3764 struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
3765 int i, err;
3766 uint32_t status;
3767
3768 memset(&cmd, 0, sizeof(cmd));
3769
3770 cmd.id_and_color
3771 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3772 cmd.action = htole32(action);
3773 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3774
3775 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3776 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3777 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3778
3779 status = 0;
3780 err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3781 sizeof(cmd), &cmd, &status);
3782 if (err == 0 && status != 0)
3783 err = EIO;
3784
3785 return err;
3786 }
3787
3788 static void
3789 iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3790 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3791 {
3792 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3793
3794 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3795 ctxt->color));
3796 cmd->action = htole32(action);
3797 cmd->apply_time = htole32(apply_time);
3798 }
3799
3800 static void
3801 iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
3802 struct ieee80211_channel *chan, uint8_t chains_static,
3803 uint8_t chains_dynamic)
3804 {
3805 struct ieee80211com *ic = &sc->sc_ic;
3806 uint8_t active_cnt, idle_cnt;
3807
3808 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3809 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3810
3811 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3812 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3813 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3814
3815 /* Set rx the chains */
3816 idle_cnt = chains_static;
3817 active_cnt = chains_dynamic;
3818
3819 cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
3820 IWM_PHY_RX_CHAIN_VALID_POS);
3821 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3822 cmd->rxchain_info |= htole32(active_cnt <<
3823 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3824
3825 cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
3826 }
3827
3828 static int
3829 iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
3830 uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
3831 uint32_t apply_time)
3832 {
3833 struct iwm_phy_context_cmd cmd;
3834
3835 iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3836
3837 iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3838 chains_static, chains_dynamic);
3839
3840 return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
3841 sizeof(struct iwm_phy_context_cmd), &cmd);
3842 }
3843
3844 static int
3845 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3846 {
3847 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3848 struct iwm_tfd *desc;
3849 struct iwm_tx_data *txdata;
3850 struct iwm_device_cmd *cmd;
3851 struct mbuf *m;
3852 bus_addr_t paddr;
3853 uint32_t addr_lo;
3854 int err = 0, i, paylen, off, s;
3855 int code;
3856 int async, wantresp;
3857 int group_id;
3858 size_t hdrlen, datasz;
3859 uint8_t *data;
3860
3861 code = hcmd->id;
3862 async = hcmd->flags & IWM_CMD_ASYNC;
3863 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3864
3865 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3866 paylen += hcmd->len[i];
3867 }
3868
3869 /* if the command wants an answer, busy sc_cmd_resp */
3870 if (wantresp) {
3871 KASSERT(!async);
3872 while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
3873 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3874 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3875 }
3876
3877 /*
3878 * Is the hardware still available? (after e.g. above wait).
3879 */
3880 s = splnet();
3881 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3882 err = ENXIO;
3883 goto out;
3884 }
3885
3886 desc = &ring->desc[ring->cur];
3887 txdata = &ring->data[ring->cur];
3888
3889 group_id = iwm_cmd_groupid(code);
3890 if (group_id != 0) {
3891 hdrlen = sizeof(cmd->hdr_wide);
3892 datasz = sizeof(cmd->data_wide);
3893 } else {
3894 hdrlen = sizeof(cmd->hdr);
3895 datasz = sizeof(cmd->data);
3896 }
3897
3898 if (paylen > datasz) {
3899 /* Command is too large to fit in pre-allocated space. */
3900 size_t totlen = hdrlen + paylen;
3901 if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
3902 aprint_error_dev(sc->sc_dev,
3903 "firmware command too long (%zd bytes)\n", totlen);
3904 err = EINVAL;
3905 goto out;
3906 }
3907 m = m_gethdr(M_DONTWAIT, MT_DATA);
3908 if (m == NULL) {
3909 err = ENOMEM;
3910 goto out;
3911 }
3912 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3913 if (!(m->m_flags & M_EXT)) {
3914 aprint_error_dev(sc->sc_dev,
3915 "could not get fw cmd mbuf (%zd bytes)\n", totlen);
3916 m_freem(m);
3917 err = ENOMEM;
3918 goto out;
3919 }
3920 cmd = mtod(m, struct iwm_device_cmd *);
3921 err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
3922 totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3923 if (err) {
3924 aprint_error_dev(sc->sc_dev,
3925 "could not load fw cmd mbuf (%zd bytes)\n", totlen);
3926 m_freem(m);
3927 goto out;
3928 }
3929 txdata->m = m;
3930 paddr = txdata->map->dm_segs[0].ds_addr;
3931 } else {
3932 cmd = &ring->cmd[ring->cur];
3933 paddr = txdata->cmd_paddr;
3934 }
3935
3936 if (group_id != 0) {
3937 cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
3938 cmd->hdr_wide.group_id = group_id;
3939 cmd->hdr_wide.qid = ring->qid;
3940 cmd->hdr_wide.idx = ring->cur;
3941 cmd->hdr_wide.length = htole16(paylen);
3942 cmd->hdr_wide.version = iwm_cmd_version(code);
3943 data = cmd->data_wide;
3944 } else {
3945 cmd->hdr.code = code;
3946 cmd->hdr.flags = 0;
3947 cmd->hdr.qid = ring->qid;
3948 cmd->hdr.idx = ring->cur;
3949 data = cmd->data;
3950 }
3951
3952 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3953 if (hcmd->len[i] == 0)
3954 continue;
3955 memcpy(data + off, hcmd->data[i], hcmd->len[i]);
3956 off += hcmd->len[i];
3957 }
3958 KASSERT(off == paylen);
3959
3960 /* lo field is not aligned */
3961 addr_lo = htole32((uint32_t)paddr);
3962 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3963 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3964 | ((hdrlen + paylen) << 4));
3965 desc->num_tbs = 1;
3966
3967 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3968 code, hdrlen + paylen, async ? " (async)" : ""));
3969
3970 if (paylen > datasz) {
3971 bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
3972 BUS_DMASYNC_PREWRITE);
3973 } else {
3974 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3975 (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
3976 BUS_DMASYNC_PREWRITE);
3977 }
3978 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3979 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
3980 BUS_DMASYNC_PREWRITE);
3981
3982 err = iwm_set_cmd_in_flight(sc);
3983 if (err)
3984 goto out;
3985 ring->queued++;
3986
3987 #if 0
3988 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3989 #endif
3990 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3991 code, ring->qid, ring->cur));
3992
3993 /* Kick command ring. */
3994 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3995 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3996
3997 if (!async) {
3998 int generation = sc->sc_generation;
3999 err = tsleep(desc, PCATCH, "iwmcmd", mstohz(1000));
4000 if (err == 0) {
4001 /* if hardware is no longer up, return error */
4002 if (generation != sc->sc_generation) {
4003 err = ENXIO;
4004 } else {
4005 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4006 }
4007 }
4008 }
4009 out:
4010 if (wantresp && err) {
4011 iwm_free_resp(sc, hcmd);
4012 }
4013 splx(s);
4014
4015 return err;
4016 }
4017
4018 static int
4019 iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4020 uint16_t len, const void *data)
4021 {
4022 struct iwm_host_cmd cmd = {
4023 .id = id,
4024 .len = { len, },
4025 .data = { data, },
4026 .flags = flags,
4027 };
4028
4029 return iwm_send_cmd(sc, &cmd);
4030 }
4031
4032 static int
4033 iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4034 uint32_t *status)
4035 {
4036 struct iwm_rx_packet *pkt;
4037 struct iwm_cmd_response *resp;
4038 int err, resp_len;
4039
4040 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4041 cmd->flags |= IWM_CMD_WANT_SKB;
4042
4043 err = iwm_send_cmd(sc, cmd);
4044 if (err)
4045 return err;
4046 pkt = cmd->resp_pkt;
4047
4048 /* Can happen if RFKILL is asserted */
4049 if (!pkt) {
4050 err = 0;
4051 goto out_free_resp;
4052 }
4053
4054 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4055 err = EIO;
4056 goto out_free_resp;
4057 }
4058
4059 resp_len = iwm_rx_packet_payload_len(pkt);
4060 if (resp_len != sizeof(*resp)) {
4061 err = EIO;
4062 goto out_free_resp;
4063 }
4064
4065 resp = (void *)pkt->data;
4066 *status = le32toh(resp->status);
4067 out_free_resp:
4068 iwm_free_resp(sc, cmd);
4069 return err;
4070 }
4071
4072 static int
4073 iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4074 const void *data, uint32_t *status)
4075 {
4076 struct iwm_host_cmd cmd = {
4077 .id = id,
4078 .len = { len, },
4079 .data = { data, },
4080 };
4081
4082 return iwm_send_cmd_status(sc, &cmd, status);
4083 }
4084
4085 static void
4086 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4087 {
4088 KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4089 KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4090 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4091 wakeup(&sc->sc_wantresp);
4092 }
4093
4094 static void
4095 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4096 {
4097 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4098 struct iwm_tx_data *data;
4099 int s;
4100
4101 if (qid != IWM_CMD_QUEUE) {
4102 return; /* Not a command ack. */
4103 }
4104
4105 s = splnet();
4106
4107 data = &ring->data[idx];
4108
4109 if (data->m != NULL) {
4110 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4111 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4112 bus_dmamap_unload(sc->sc_dmat, data->map);
4113 m_freem(data->m);
4114 data->m = NULL;
4115 }
4116 wakeup(&ring->desc[idx]);
4117
4118 if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4119 aprint_error_dev(sc->sc_dev,
4120 "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4121 idx, ring->queued, ring->cur);
4122 }
4123
4124 KASSERT(ring->queued > 0);
4125 if (--ring->queued == 0)
4126 iwm_clear_cmd_in_flight(sc);
4127
4128 splx(s);
4129 }
4130
4131 #if 0
4132 /*
4133 * necessary only for block ack mode
4134 */
4135 void
4136 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4137 uint16_t len)
4138 {
4139 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4140 uint16_t w_val;
4141
4142 scd_bc_tbl = sc->sched_dma.vaddr;
4143
4144 len += 8; /* magic numbers came naturally from paris */
4145 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4146 len = roundup(len, 4) / 4;
4147
4148 w_val = htole16(sta_id << 12 | len);
4149
4150 /* Update TX scheduler. */
4151 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4152 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4153 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4154 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4155
4156 /* I really wonder what this is ?!? */
4157 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4158 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4159 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4160 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4161 (char *)(void *)sc->sched_dma.vaddr,
4162 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4163 }
4164 }
4165 #endif
4166
4167 /*
4168 * Fill in various bit for management frames, and leave them
4169 * unfilled for data frames (firmware takes care of that).
4170 * Return the selected TX rate.
4171 */
4172 static const struct iwm_rate *
4173 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4174 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4175 {
4176 struct ieee80211com *ic = &sc->sc_ic;
4177 struct ieee80211_node *ni = &in->in_ni;
4178 const struct iwm_rate *rinfo;
4179 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4180 int ridx, rate_flags, i, ind;
4181 int nrates = ni->ni_rates.rs_nrates;
4182
4183 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4184 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4185
4186 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4187 type != IEEE80211_FC0_TYPE_DATA) {
4188 /* for non-data, use the lowest supported rate */
4189 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4190 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4191 tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4192 #ifndef IEEE80211_NO_HT
4193 } else if (ic->ic_fixed_mcs != -1) {
4194 ridx = sc->sc_fixed_ridx;
4195 #endif
4196 } else if (ic->ic_fixed_rate != -1) {
4197 ridx = sc->sc_fixed_ridx;
4198 } else {
4199 /* for data frames, use RS table */
4200 tx->initial_rate_index = 0;
4201 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4202 DPRINTFN(12, ("start with txrate %d\n",
4203 tx->initial_rate_index));
4204 #ifndef IEEE80211_NO_HT
4205 if (ni->ni_flags & IEEE80211_NODE_HT) {
4206 ridx = iwm_mcs2ridx[ni->ni_txmcs];
4207 return &iwm_rates[ridx];
4208 }
4209 #endif
4210 ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4211 IWM_RIDX_OFDM : IWM_RIDX_CCK;
4212 for (i = 0; i < nrates; i++) {
4213 if (iwm_rates[i].rate == (ni->ni_txrate &
4214 IEEE80211_RATE_VAL)) {
4215 ridx = i;
4216 break;
4217 }
4218 }
4219 return &iwm_rates[ridx];
4220 }
4221
4222 rinfo = &iwm_rates[ridx];
4223 for (i = 0, ind = sc->sc_mgmt_last_antenna;
4224 i < IWM_RATE_MCS_ANT_NUM; i++) {
4225 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4226 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4227 sc->sc_mgmt_last_antenna = ind;
4228 break;
4229 }
4230 }
4231 rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4232 if (IWM_RIDX_IS_CCK(ridx))
4233 rate_flags |= IWM_RATE_MCS_CCK_MSK;
4234 #ifndef IEEE80211_NO_HT
4235 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4236 rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4237 rate_flags |= IWM_RATE_MCS_HT_MSK;
4238 tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4239 } else
4240 #endif
4241 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4242
4243 return rinfo;
4244 }
4245
4246 #define TB0_SIZE 16
4247 static int
4248 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4249 {
4250 struct ieee80211com *ic = &sc->sc_ic;
4251 struct iwm_node *in = (struct iwm_node *)ni;
4252 struct iwm_tx_ring *ring;
4253 struct iwm_tx_data *data;
4254 struct iwm_tfd *desc;
4255 struct iwm_device_cmd *cmd;
4256 struct iwm_tx_cmd *tx;
4257 struct ieee80211_frame *wh;
4258 struct ieee80211_key *k = NULL;
4259 struct mbuf *m1;
4260 const struct iwm_rate *rinfo;
4261 uint32_t flags;
4262 u_int hdrlen;
4263 bus_dma_segment_t *seg;
4264 uint8_t tid, type;
4265 int i, totlen, err, pad;
4266
4267 wh = mtod(m, struct ieee80211_frame *);
4268 hdrlen = ieee80211_anyhdrsize(wh);
4269 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4270
4271 tid = 0;
4272
4273 ring = &sc->txq[ac];
4274 desc = &ring->desc[ring->cur];
4275 memset(desc, 0, sizeof(*desc));
4276 data = &ring->data[ring->cur];
4277
4278 cmd = &ring->cmd[ring->cur];
4279 cmd->hdr.code = IWM_TX_CMD;
4280 cmd->hdr.flags = 0;
4281 cmd->hdr.qid = ring->qid;
4282 cmd->hdr.idx = ring->cur;
4283
4284 tx = (void *)cmd->data;
4285 memset(tx, 0, sizeof(*tx));
4286
4287 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4288
4289 if (__predict_false(sc->sc_drvbpf != NULL)) {
4290 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4291
4292 tap->wt_flags = 0;
4293 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4294 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4295 #ifndef IEEE80211_NO_HT
4296 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4297 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4298 type == IEEE80211_FC0_TYPE_DATA &&
4299 rinfo->plcp == IWM_RATE_INVM_PLCP) {
4300 tap->wt_rate = (0x80 | rinfo->ht_plcp);
4301 } else
4302 #endif
4303 tap->wt_rate = rinfo->rate;
4304 tap->wt_hwqueue = ac;
4305 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4306 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4307
4308 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4309 }
4310
4311 /* Encrypt the frame if need be. */
4312 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4313 k = ieee80211_crypto_encap(ic, ni, m);
4314 if (k == NULL) {
4315 m_freem(m);
4316 return ENOBUFS;
4317 }
4318 /* Packet header may have moved, reset our local pointer. */
4319 wh = mtod(m, struct ieee80211_frame *);
4320 }
4321 totlen = m->m_pkthdr.len;
4322
4323 flags = 0;
4324 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4325 flags |= IWM_TX_CMD_FLG_ACK;
4326 }
4327
4328 if (type == IEEE80211_FC0_TYPE_DATA &&
4329 !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4330 (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4331 (ic->ic_flags & IEEE80211_F_USEPROT)))
4332 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4333
4334 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4335 type != IEEE80211_FC0_TYPE_DATA)
4336 tx->sta_id = IWM_AUX_STA_ID;
4337 else
4338 tx->sta_id = IWM_STATION_ID;
4339
4340 if (type == IEEE80211_FC0_TYPE_MGT) {
4341 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4342
4343 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4344 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4345 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4346 else
4347 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4348 } else {
4349 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4350 }
4351
4352 if (hdrlen & 3) {
4353 /* First segment length must be a multiple of 4. */
4354 flags |= IWM_TX_CMD_FLG_MH_PAD;
4355 pad = 4 - (hdrlen & 3);
4356 } else
4357 pad = 0;
4358
4359 tx->driver_txop = 0;
4360 tx->next_frame_len = 0;
4361
4362 tx->len = htole16(totlen);
4363 tx->tid_tspec = tid;
4364 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4365
4366 /* Set physical address of "scratch area". */
4367 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4368 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4369
4370 /* Copy 802.11 header in TX command. */
4371 memcpy(tx + 1, wh, hdrlen);
4372
4373 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4374
4375 tx->sec_ctl = 0;
4376 tx->tx_flags |= htole32(flags);
4377
4378 /* Trim 802.11 header. */
4379 m_adj(m, hdrlen);
4380
4381 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4382 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4383 if (err) {
4384 if (err != EFBIG) {
4385 aprint_error_dev(sc->sc_dev,
4386 "can't map mbuf (error %d)\n", err);
4387 m_freem(m);
4388 return err;
4389 }
4390 /* Too many DMA segments, linearize mbuf. */
4391 MGETHDR(m1, M_DONTWAIT, MT_DATA);
4392 if (m1 == NULL) {
4393 m_freem(m);
4394 return ENOBUFS;
4395 }
4396 if (m->m_pkthdr.len > MHLEN) {
4397 MCLGET(m1, M_DONTWAIT);
4398 if (!(m1->m_flags & M_EXT)) {
4399 m_freem(m);
4400 m_freem(m1);
4401 return ENOBUFS;
4402 }
4403 }
4404 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4405 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4406 m_freem(m);
4407 m = m1;
4408
4409 err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4410 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4411 if (err) {
4412 aprint_error_dev(sc->sc_dev,
4413 "can't map mbuf (error %d)\n", err);
4414 m_freem(m);
4415 return err;
4416 }
4417 }
4418 data->m = m;
4419 data->in = in;
4420 data->done = 0;
4421
4422 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4423 KASSERT(data->in != NULL);
4424
4425 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4426 "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4427 ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4428 (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4429 le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4430 le32toh(tx->rate_n_flags)));
4431
4432 /* Fill TX descriptor. */
4433 desc->num_tbs = 2 + data->map->dm_nsegs;
4434
4435 desc->tbs[0].lo = htole32(data->cmd_paddr);
4436 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4437 (TB0_SIZE << 4);
4438 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4439 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4440 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4441 + hdrlen + pad - TB0_SIZE) << 4);
4442
4443 /* Other DMA segments are for data payload. */
4444 seg = data->map->dm_segs;
4445 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4446 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4447 desc->tbs[i+2].hi_n_len =
4448 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4449 | ((seg->ds_len) << 4);
4450 }
4451
4452 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4453 BUS_DMASYNC_PREWRITE);
4454 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4455 (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4456 BUS_DMASYNC_PREWRITE);
4457 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4458 (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4459 BUS_DMASYNC_PREWRITE);
4460
4461 #if 0
4462 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4463 le16toh(tx->len));
4464 #endif
4465
4466 /* Kick TX ring. */
4467 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4468 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4469
4470 /* Mark TX ring as full if we reach a certain threshold. */
4471 if (++ring->queued > IWM_TX_RING_HIMARK) {
4472 sc->qfullmsk |= 1 << ring->qid;
4473 }
4474
4475 return 0;
4476 }
4477
4478 #if 0
4479 /* not necessary? */
4480 static int
4481 iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4482 {
4483 struct iwm_tx_path_flush_cmd flush_cmd = {
4484 .queues_ctl = htole32(tfd_msk),
4485 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4486 };
4487 int err;
4488
4489 err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4490 sizeof(flush_cmd), &flush_cmd);
4491 if (err)
4492 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4493 err);
4494 return err;
4495 }
4496 #endif
4497
4498 static void
4499 iwm_led_enable(struct iwm_softc *sc)
4500 {
4501 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4502 }
4503
4504 static void
4505 iwm_led_disable(struct iwm_softc *sc)
4506 {
4507 IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4508 }
4509
4510 static int
4511 iwm_led_is_enabled(struct iwm_softc *sc)
4512 {
4513 return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4514 }
4515
4516 static void
4517 iwm_led_blink_timeout(void *arg)
4518 {
4519 struct iwm_softc *sc = arg;
4520
4521 if (iwm_led_is_enabled(sc))
4522 iwm_led_disable(sc);
4523 else
4524 iwm_led_enable(sc);
4525
4526 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4527 }
4528
4529 static void
4530 iwm_led_blink_start(struct iwm_softc *sc)
4531 {
4532 callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4533 }
4534
4535 static void
4536 iwm_led_blink_stop(struct iwm_softc *sc)
4537 {
4538 callout_stop(&sc->sc_led_blink_to);
4539 iwm_led_disable(sc);
4540 }
4541
4542 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4543
4544 static int
4545 iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4546 struct iwm_beacon_filter_cmd *cmd)
4547 {
4548 return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4549 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4550 }
4551
4552 static void
4553 iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4554 struct iwm_beacon_filter_cmd *cmd)
4555 {
4556 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4557 }
4558
4559 static int
4560 iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4561 {
4562 struct iwm_beacon_filter_cmd cmd = {
4563 IWM_BF_CMD_CONFIG_DEFAULTS,
4564 .bf_enable_beacon_filter = htole32(1),
4565 .ba_enable_beacon_abort = htole32(enable),
4566 };
4567
4568 if (!sc->sc_bf.bf_enabled)
4569 return 0;
4570
4571 sc->sc_bf.ba_enabled = enable;
4572 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4573 return iwm_beacon_filter_send_cmd(sc, &cmd);
4574 }
4575
4576 static void
4577 iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4578 struct iwm_mac_power_cmd *cmd)
4579 {
4580 struct ieee80211_node *ni = &in->in_ni;
4581 int dtim_period, dtim_msec, keep_alive;
4582
4583 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4584 in->in_color));
4585 if (ni->ni_dtim_period)
4586 dtim_period = ni->ni_dtim_period;
4587 else
4588 dtim_period = 1;
4589
4590 /*
4591 * Regardless of power management state the driver must set
4592 * keep alive period. FW will use it for sending keep alive NDPs
4593 * immediately after association. Check that keep alive period
4594 * is at least 3 * DTIM.
4595 */
4596 dtim_msec = dtim_period * ni->ni_intval;
4597 keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4598 keep_alive = roundup(keep_alive, 1000) / 1000;
4599 cmd->keep_alive_seconds = htole16(keep_alive);
4600
4601 #ifdef notyet
4602 cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
4603 cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
4604 cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
4605 #endif
4606 }
4607
4608 static int
4609 iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4610 {
4611 int err;
4612 int ba_enable;
4613 struct iwm_mac_power_cmd cmd;
4614
4615 memset(&cmd, 0, sizeof(cmd));
4616
4617 iwm_power_build_cmd(sc, in, &cmd);
4618
4619 err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
4620 sizeof(cmd), &cmd);
4621 if (err)
4622 return err;
4623
4624 ba_enable = !!(cmd.flags &
4625 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4626 return iwm_update_beacon_abort(sc, in, ba_enable);
4627 }
4628
4629 static int
4630 iwm_power_update_device(struct iwm_softc *sc)
4631 {
4632 struct iwm_device_power_cmd cmd = {
4633 #ifdef notyet
4634 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4635 #else
4636 .flags = 0,
4637 #endif
4638 };
4639
4640 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4641 return 0;
4642
4643 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4644 DPRINTF(("Sending device power command with flags = 0x%X\n",
4645 cmd.flags));
4646
4647 return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
4648 }
4649
4650 #ifdef notyet
4651 static int
4652 iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4653 {
4654 struct iwm_beacon_filter_cmd cmd = {
4655 IWM_BF_CMD_CONFIG_DEFAULTS,
4656 .bf_enable_beacon_filter = htole32(1),
4657 };
4658 int err;
4659
4660 iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4661 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4662
4663 if (err == 0)
4664 sc->sc_bf.bf_enabled = 1;
4665
4666 return err;
4667 }
4668 #endif
4669
4670 static int
4671 iwm_disable_beacon_filter(struct iwm_softc *sc)
4672 {
4673 struct iwm_beacon_filter_cmd cmd;
4674 int err;
4675
4676 memset(&cmd, 0, sizeof(cmd));
4677 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4678 return 0;
4679
4680 err = iwm_beacon_filter_send_cmd(sc, &cmd);
4681 if (err == 0)
4682 sc->sc_bf.bf_enabled = 0;
4683
4684 return err;
4685 }
4686
4687 static int
4688 iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
4689 {
4690 struct iwm_add_sta_cmd_v7 add_sta_cmd;
4691 int err;
4692 uint32_t status;
4693
4694 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4695
4696 add_sta_cmd.sta_id = IWM_STATION_ID;
4697 add_sta_cmd.mac_id_n_color
4698 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4699 if (!update) {
4700 int ac;
4701 for (ac = 0; ac < WME_NUM_AC; ac++) {
4702 add_sta_cmd.tfd_queue_msk |=
4703 htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
4704 }
4705 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4706 }
4707 add_sta_cmd.add_modify = update ? 1 : 0;
4708 add_sta_cmd.station_flags_msk
4709 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4710 add_sta_cmd.tid_disable_tx = htole16(0xffff);
4711 if (update)
4712 add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
4713
4714 #ifndef IEEE80211_NO_HT
4715 if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
4716 add_sta_cmd.station_flags_msk
4717 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
4718 IWM_STA_FLG_AGG_MPDU_DENS_MSK);
4719
4720 add_sta_cmd.station_flags
4721 |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
4722 switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
4723 case IEEE80211_AMPDU_PARAM_SS_2:
4724 add_sta_cmd.station_flags
4725 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
4726 break;
4727 case IEEE80211_AMPDU_PARAM_SS_4:
4728 add_sta_cmd.station_flags
4729 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
4730 break;
4731 case IEEE80211_AMPDU_PARAM_SS_8:
4732 add_sta_cmd.station_flags
4733 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
4734 break;
4735 case IEEE80211_AMPDU_PARAM_SS_16:
4736 add_sta_cmd.station_flags
4737 |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
4738 break;
4739 default:
4740 break;
4741 }
4742 }
4743 #endif
4744
4745 status = IWM_ADD_STA_SUCCESS;
4746 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
4747 &add_sta_cmd, &status);
4748 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4749 err = EIO;
4750
4751 return err;
4752 }
4753
4754 static int
4755 iwm_add_aux_sta(struct iwm_softc *sc)
4756 {
4757 struct iwm_add_sta_cmd_v7 cmd;
4758 int err;
4759 uint32_t status;
4760
4761 err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
4762 if (err)
4763 return err;
4764
4765 memset(&cmd, 0, sizeof(cmd));
4766 cmd.sta_id = IWM_AUX_STA_ID;
4767 cmd.mac_id_n_color =
4768 htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
4769 cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
4770 cmd.tid_disable_tx = htole16(0xffff);
4771
4772 status = IWM_ADD_STA_SUCCESS;
4773 err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
4774 &status);
4775 if (err == 0 && status != IWM_ADD_STA_SUCCESS)
4776 err = EIO;
4777
4778 return err;
4779 }
4780
4781 #define IWM_PLCP_QUIET_THRESH 1
4782 #define IWM_ACTIVE_QUIET_TIME 10
4783 #define LONG_OUT_TIME_PERIOD 600
4784 #define SHORT_OUT_TIME_PERIOD 200
4785 #define SUSPEND_TIME_PERIOD 100
4786
4787 static uint16_t
4788 iwm_scan_rx_chain(struct iwm_softc *sc)
4789 {
4790 uint16_t rx_chain;
4791 uint8_t rx_ant;
4792
4793 rx_ant = iwm_fw_valid_rx_ant(sc);
4794 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4795 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4796 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4797 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4798 return htole16(rx_chain);
4799 }
4800
4801 static uint32_t
4802 iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4803 {
4804 uint32_t tx_ant;
4805 int i, ind;
4806
4807 for (i = 0, ind = sc->sc_scan_last_antenna;
4808 i < IWM_RATE_MCS_ANT_NUM; i++) {
4809 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4810 if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4811 sc->sc_scan_last_antenna = ind;
4812 break;
4813 }
4814 }
4815 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4816
4817 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4818 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4819 tx_ant);
4820 else
4821 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4822 }
4823
4824 #ifdef notyet
4825 /*
4826 * If req->n_ssids > 0, it means we should do an active scan.
4827 * In case of active scan w/o directed scan, we receive a zero-length SSID
4828 * just to notify that this scan is active and not passive.
4829 * In order to notify the FW of the number of SSIDs we wish to scan (including
4830 * the zero-length one), we need to set the corresponding bits in chan->type,
4831 * one for each SSID, and set the active bit (first). If the first SSID is
4832 * already included in the probe template, so we need to set only
4833 * req->n_ssids - 1 bits in addition to the first bit.
4834 */
4835 static uint16_t
4836 iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4837 {
4838 if (flags & IEEE80211_CHAN_2GHZ)
4839 return 30 + 3 * (n_ssids + 1);
4840 return 20 + 2 * (n_ssids + 1);
4841 }
4842
4843 static uint16_t
4844 iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
4845 {
4846 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4847 }
4848 #endif
4849
4850 static uint8_t
4851 iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
4852 struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
4853 {
4854 struct ieee80211com *ic = &sc->sc_ic;
4855 struct ieee80211_channel *c;
4856 uint8_t nchan;
4857
4858 for (nchan = 0, c = &ic->ic_channels[1];
4859 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4860 nchan < sc->sc_capa_n_scan_channels;
4861 c++) {
4862 if (c->ic_flags == 0)
4863 continue;
4864
4865 chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
4866 chan->iter_count = htole16(1);
4867 chan->iter_interval = htole32(0);
4868 chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
4869 chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
4870 if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
4871 chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4872 chan++;
4873 nchan++;
4874 }
4875
4876 return nchan;
4877 }
4878
4879 static uint8_t
4880 iwm_umac_scan_fill_channels(struct iwm_softc *sc,
4881 struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
4882 {
4883 struct ieee80211com *ic = &sc->sc_ic;
4884 struct ieee80211_channel *c;
4885 uint8_t nchan;
4886
4887 for (nchan = 0, c = &ic->ic_channels[1];
4888 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
4889 nchan < sc->sc_capa_n_scan_channels;
4890 c++) {
4891 if (c->ic_flags == 0)
4892 continue;
4893
4894 chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
4895 chan->iter_count = 1;
4896 chan->iter_interval = htole16(0);
4897 chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
4898 chan++;
4899 nchan++;
4900 }
4901
4902 return nchan;
4903 }
4904
4905 static int
4906 iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
4907 {
4908 struct ieee80211com *ic = &sc->sc_ic;
4909 struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
4910 struct ieee80211_rateset *rs;
4911 size_t remain = sizeof(preq->buf);
4912 uint8_t *frm, *pos;
4913
4914 memset(preq, 0, sizeof(*preq));
4915
4916 if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
4917 return ENOBUFS;
4918
4919 /*
4920 * Build a probe request frame. Most of the following code is a
4921 * copy & paste of what is done in net80211.
4922 */
4923 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4924 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4925 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4926 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4927 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4928 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4929 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4930 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4931
4932 frm = (uint8_t *)(wh + 1);
4933 frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
4934
4935 /* Tell the firmware where the MAC header is. */
4936 preq->mac_header.offset = 0;
4937 preq->mac_header.len = htole16(frm - (uint8_t *)wh);
4938 remain -= frm - (uint8_t *)wh;
4939
4940 /* Fill in 2GHz IEs and tell firmware where they are. */
4941 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4942 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4943 if (remain < 4 + rs->rs_nrates)
4944 return ENOBUFS;
4945 } else if (remain < 2 + rs->rs_nrates)
4946 return ENOBUFS;
4947 preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
4948 pos = frm;
4949 frm = ieee80211_add_rates(frm, rs);
4950 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4951 frm = ieee80211_add_xrates(frm, rs);
4952 preq->band_data[0].len = htole16(frm - pos);
4953 remain -= frm - pos;
4954
4955 if (isset(sc->sc_enabled_capa,
4956 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
4957 if (remain < 3)
4958 return ENOBUFS;
4959 *frm++ = IEEE80211_ELEMID_DSPARMS;
4960 *frm++ = 1;
4961 *frm++ = 0;
4962 remain -= 3;
4963 }
4964
4965 if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
4966 /* Fill in 5GHz IEs. */
4967 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4968 if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
4969 if (remain < 4 + rs->rs_nrates)
4970 return ENOBUFS;
4971 } else if (remain < 2 + rs->rs_nrates)
4972 return ENOBUFS;
4973 preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
4974 pos = frm;
4975 frm = ieee80211_add_rates(frm, rs);
4976 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4977 frm = ieee80211_add_xrates(frm, rs);
4978 preq->band_data[1].len = htole16(frm - pos);
4979 remain -= frm - pos;
4980 }
4981
4982 #ifndef IEEE80211_NO_HT
4983 /* Send 11n IEs on both 2GHz and 5GHz bands. */
4984 preq->common_data.offset = htole16(frm - (uint8_t *)wh);
4985 pos = frm;
4986 if (ic->ic_flags & IEEE80211_F_HTON) {
4987 if (remain < 28)
4988 return ENOBUFS;
4989 frm = ieee80211_add_htcaps(frm, ic);
4990 /* XXX add WME info? */
4991 }
4992 #endif
4993
4994 preq->common_data.len = htole16(frm - pos);
4995
4996 return 0;
4997 }
4998
4999 static int
5000 iwm_lmac_scan(struct iwm_softc *sc)
5001 {
5002 struct ieee80211com *ic = &sc->sc_ic;
5003 struct iwm_host_cmd hcmd = {
5004 .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5005 .len = { 0, },
5006 .data = { NULL, },
5007 .flags = 0,
5008 };
5009 struct iwm_scan_req_lmac *req;
5010 size_t req_len;
5011 int err;
5012
5013 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5014
5015 req_len = sizeof(struct iwm_scan_req_lmac) +
5016 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5017 sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5018 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5019 return ENOMEM;
5020 req = kmem_zalloc(req_len, KM_SLEEP);
5021 if (req == NULL)
5022 return ENOMEM;
5023
5024 hcmd.len[0] = (uint16_t)req_len;
5025 hcmd.data[0] = (void *)req;
5026
5027 /* These timings correspond to iwlwifi's UNASSOC scan. */
5028 req->active_dwell = 10;
5029 req->passive_dwell = 110;
5030 req->fragmented_dwell = 44;
5031 req->extended_dwell = 90;
5032 req->max_out_time = 0;
5033 req->suspend_time = 0;
5034
5035 req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5036 req->rx_chain_select = iwm_scan_rx_chain(sc);
5037 req->iter_num = htole32(1);
5038 req->delay = 0;
5039
5040 req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5041 IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5042 IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5043 if (ic->ic_des_esslen == 0)
5044 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5045 else
5046 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5047 if (isset(sc->sc_enabled_capa,
5048 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5049 req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5050
5051 req->flags = htole32(IWM_PHY_BAND_24);
5052 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5053 req->flags |= htole32(IWM_PHY_BAND_5);
5054 req->filter_flags =
5055 htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5056
5057 /* Tx flags 2 GHz. */
5058 req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5059 IWM_TX_CMD_FLG_BT_DIS);
5060 req->tx_cmd[0].rate_n_flags =
5061 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5062 req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5063
5064 /* Tx flags 5 GHz. */
5065 req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5066 IWM_TX_CMD_FLG_BT_DIS);
5067 req->tx_cmd[1].rate_n_flags =
5068 iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5069 req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5070
5071 /* Check if we're doing an active directed scan. */
5072 if (ic->ic_des_esslen != 0) {
5073 req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5074 req->direct_scan[0].len = ic->ic_des_esslen;
5075 memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5076 ic->ic_des_esslen);
5077 }
5078
5079 req->n_channels = iwm_lmac_scan_fill_channels(sc,
5080 (struct iwm_scan_channel_cfg_lmac *)req->data,
5081 ic->ic_des_esslen != 0);
5082
5083 err = iwm_fill_probe_req(sc,
5084 (struct iwm_scan_probe_req *)(req->data +
5085 (sizeof(struct iwm_scan_channel_cfg_lmac) *
5086 sc->sc_capa_n_scan_channels)));
5087 if (err) {
5088 kmem_free(req, req_len);
5089 return err;
5090 }
5091
5092 /* Specify the scan plan: We'll do one iteration. */
5093 req->schedule[0].iterations = 1;
5094 req->schedule[0].full_scan_mul = 1;
5095
5096 /* Disable EBS. */
5097 req->channel_opt[0].non_ebs_ratio = 1;
5098 req->channel_opt[1].non_ebs_ratio = 1;
5099
5100 err = iwm_send_cmd(sc, &hcmd);
5101 kmem_free(req, req_len);
5102 return err;
5103 }
5104
5105 static int
5106 iwm_config_umac_scan(struct iwm_softc *sc)
5107 {
5108 struct ieee80211com *ic = &sc->sc_ic;
5109 struct iwm_scan_config *scan_config;
5110 int err, nchan;
5111 size_t cmd_size;
5112 struct ieee80211_channel *c;
5113 struct iwm_host_cmd hcmd = {
5114 .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5115 .flags = 0,
5116 };
5117 static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5118 IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5119 IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5120 IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5121 IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5122 IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5123 IWM_SCAN_CONFIG_RATE_54M);
5124
5125 cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5126
5127 scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5128 if (scan_config == NULL)
5129 return ENOMEM;
5130
5131 scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5132 scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5133 scan_config->legacy_rates = htole32(rates |
5134 IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5135
5136 /* These timings correspond to iwlwifi's UNASSOC scan. */
5137 scan_config->dwell_active = 10;
5138 scan_config->dwell_passive = 110;
5139 scan_config->dwell_fragmented = 44;
5140 scan_config->dwell_extended = 90;
5141 scan_config->out_of_channel_time = htole32(0);
5142 scan_config->suspend_time = htole32(0);
5143
5144 IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5145
5146 scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5147 scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5148 IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5149 IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5150
5151 for (c = &ic->ic_channels[1], nchan = 0;
5152 c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5153 nchan < sc->sc_capa_n_scan_channels; c++) {
5154 if (c->ic_flags == 0)
5155 continue;
5156 scan_config->channel_array[nchan++] =
5157 ieee80211_mhz2ieee(c->ic_freq, 0);
5158 }
5159
5160 scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5161 IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5162 IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5163 IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5164 IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5165 IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5166 IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5167 IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5168 IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5169 IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5170 IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5171
5172 hcmd.data[0] = scan_config;
5173 hcmd.len[0] = cmd_size;
5174
5175 err = iwm_send_cmd(sc, &hcmd);
5176 kmem_free(scan_config, cmd_size);
5177 return err;
5178 }
5179
5180 static int
5181 iwm_umac_scan(struct iwm_softc *sc)
5182 {
5183 struct ieee80211com *ic = &sc->sc_ic;
5184 struct iwm_host_cmd hcmd = {
5185 .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5186 .len = { 0, },
5187 .data = { NULL, },
5188 .flags = 0,
5189 };
5190 struct iwm_scan_req_umac *req;
5191 struct iwm_scan_req_umac_tail *tail;
5192 size_t req_len;
5193 int err;
5194
5195 DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5196
5197 req_len = sizeof(struct iwm_scan_req_umac) +
5198 (sizeof(struct iwm_scan_channel_cfg_umac) *
5199 sc->sc_capa_n_scan_channels) +
5200 sizeof(struct iwm_scan_req_umac_tail);
5201 if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5202 return ENOMEM;
5203 req = kmem_zalloc(req_len, KM_SLEEP);
5204 if (req == NULL)
5205 return ENOMEM;
5206
5207 hcmd.len[0] = (uint16_t)req_len;
5208 hcmd.data[0] = (void *)req;
5209
5210 /* These timings correspond to iwlwifi's UNASSOC scan. */
5211 req->active_dwell = 10;
5212 req->passive_dwell = 110;
5213 req->fragmented_dwell = 44;
5214 req->extended_dwell = 90;
5215 req->max_out_time = 0;
5216 req->suspend_time = 0;
5217
5218 req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5219 req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5220
5221 req->n_channels = iwm_umac_scan_fill_channels(sc,
5222 (struct iwm_scan_channel_cfg_umac *)req->data,
5223 ic->ic_des_esslen != 0);
5224
5225 req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5226 IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5227 IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5228
5229 tail = (struct iwm_scan_req_umac_tail *)(req->data +
5230 sizeof(struct iwm_scan_channel_cfg_umac) *
5231 sc->sc_capa_n_scan_channels);
5232
5233 /* Check if we're doing an active directed scan. */
5234 if (ic->ic_des_esslen != 0) {
5235 tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5236 tail->direct_scan[0].len = ic->ic_des_esslen;
5237 memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5238 ic->ic_des_esslen);
5239 req->general_flags |=
5240 htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5241 } else
5242 req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5243
5244 if (isset(sc->sc_enabled_capa,
5245 IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5246 req->general_flags |=
5247 htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5248
5249 err = iwm_fill_probe_req(sc, &tail->preq);
5250 if (err) {
5251 kmem_free(req, req_len);
5252 return err;
5253 }
5254
5255 /* Specify the scan plan: We'll do one iteration. */
5256 tail->schedule[0].interval = 0;
5257 tail->schedule[0].iter_count = 1;
5258
5259 err = iwm_send_cmd(sc, &hcmd);
5260 kmem_free(req, req_len);
5261 return err;
5262 }
5263
5264 static uint8_t
5265 iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5266 {
5267 int i;
5268 uint8_t rval;
5269
5270 for (i = 0; i < rs->rs_nrates; i++) {
5271 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5272 if (rval == iwm_rates[ridx].rate)
5273 return rs->rs_rates[i];
5274 }
5275 return 0;
5276 }
5277
5278 static void
5279 iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5280 int *ofdm_rates)
5281 {
5282 struct ieee80211_node *ni = &in->in_ni;
5283 struct ieee80211_rateset *rs = &ni->ni_rates;
5284 int lowest_present_ofdm = -1;
5285 int lowest_present_cck = -1;
5286 uint8_t cck = 0;
5287 uint8_t ofdm = 0;
5288 int i;
5289
5290 if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5291 IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5292 for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5293 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5294 continue;
5295 cck |= (1 << i);
5296 if (lowest_present_cck == -1 || lowest_present_cck > i)
5297 lowest_present_cck = i;
5298 }
5299 }
5300 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5301 if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5302 continue;
5303 ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5304 if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5305 lowest_present_ofdm = i;
5306 }
5307
5308 /*
5309 * Now we've got the basic rates as bitmaps in the ofdm and cck
5310 * variables. This isn't sufficient though, as there might not
5311 * be all the right rates in the bitmap. E.g. if the only basic
5312 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5313 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5314 *
5315 * [...] a STA responding to a received frame shall transmit
5316 * its Control Response frame [...] at the highest rate in the
5317 * BSSBasicRateSet parameter that is less than or equal to the
5318 * rate of the immediately previous frame in the frame exchange
5319 * sequence ([...]) and that is of the same modulation class
5320 * ([...]) as the received frame. If no rate contained in the
5321 * BSSBasicRateSet parameter meets these conditions, then the
5322 * control frame sent in response to a received frame shall be
5323 * transmitted at the highest mandatory rate of the PHY that is
5324 * less than or equal to the rate of the received frame, and
5325 * that is of the same modulation class as the received frame.
5326 *
5327 * As a consequence, we need to add all mandatory rates that are
5328 * lower than all of the basic rates to these bitmaps.
5329 */
5330
5331 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5332 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5333 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5334 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5335 /* 6M already there or needed so always add */
5336 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5337
5338 /*
5339 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5340 * Note, however:
5341 * - if no CCK rates are basic, it must be ERP since there must
5342 * be some basic rates at all, so they're OFDM => ERP PHY
5343 * (or we're in 5 GHz, and the cck bitmap will never be used)
5344 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5345 * - if 5.5M is basic, 1M and 2M are mandatory
5346 * - if 2M is basic, 1M is mandatory
5347 * - if 1M is basic, that's the only valid ACK rate.
5348 * As a consequence, it's not as complicated as it sounds, just add
5349 * any lower rates to the ACK rate bitmap.
5350 */
5351 if (IWM_RATE_11M_INDEX < lowest_present_cck)
5352 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5353 if (IWM_RATE_5M_INDEX < lowest_present_cck)
5354 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5355 if (IWM_RATE_2M_INDEX < lowest_present_cck)
5356 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5357 /* 1M already there or needed so always add */
5358 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5359
5360 *cck_rates = cck;
5361 *ofdm_rates = ofdm;
5362 }
5363
5364 static void
5365 iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5366 struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5367 {
5368 #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5369 struct ieee80211com *ic = &sc->sc_ic;
5370 struct ieee80211_node *ni = ic->ic_bss;
5371 int cck_ack_rates, ofdm_ack_rates;
5372 int i;
5373
5374 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5375 in->in_color));
5376 cmd->action = htole32(action);
5377
5378 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5379 cmd->tsf_id = htole32(IWM_TSF_ID_A);
5380
5381 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5382 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5383
5384 iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5385 cmd->cck_rates = htole32(cck_ack_rates);
5386 cmd->ofdm_rates = htole32(ofdm_ack_rates);
5387
5388 cmd->cck_short_preamble
5389 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5390 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5391 cmd->short_slot
5392 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5393 ? IWM_MAC_FLG_SHORT_SLOT : 0);
5394
5395 for (i = 0; i < WME_NUM_AC; i++) {
5396 struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5397 int txf = iwm_ac_to_tx_fifo[i];
5398
5399 cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5400 cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5401 cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5402 cmd->ac[txf].fifos_mask = (1 << txf);
5403 cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5404 }
5405 if (ni->ni_flags & IEEE80211_NODE_QOS)
5406 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5407
5408 #ifndef IEEE80211_NO_HT
5409 if (ni->ni_flags & IEEE80211_NODE_HT) {
5410 enum ieee80211_htprot htprot =
5411 (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5412 switch (htprot) {
5413 case IEEE80211_HTPROT_NONE:
5414 break;
5415 case IEEE80211_HTPROT_NONMEMBER:
5416 case IEEE80211_HTPROT_NONHT_MIXED:
5417 cmd->protection_flags |=
5418 htole32(IWM_MAC_PROT_FLG_HT_PROT);
5419 case IEEE80211_HTPROT_20MHZ:
5420 cmd->protection_flags |=
5421 htole32(IWM_MAC_PROT_FLG_HT_PROT |
5422 IWM_MAC_PROT_FLG_FAT_PROT);
5423 break;
5424 default:
5425 break;
5426 }
5427
5428 cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5429 }
5430 #endif
5431
5432 if (ic->ic_flags & IEEE80211_F_USEPROT)
5433 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5434
5435 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5436 #undef IWM_EXP2
5437 }
5438
5439 static void
5440 iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5441 struct iwm_mac_data_sta *sta, int assoc)
5442 {
5443 struct ieee80211_node *ni = &in->in_ni;
5444 uint32_t dtim_off;
5445 uint64_t tsf;
5446
5447 dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5448 tsf = le64toh(ni->ni_tstamp.tsf);
5449
5450 sta->is_assoc = htole32(assoc);
5451 sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5452 sta->dtim_tsf = htole64(tsf + dtim_off);
5453 sta->bi = htole32(ni->ni_intval);
5454 sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5455 sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5456 sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5457 sta->listen_interval = htole32(10);
5458 sta->assoc_id = htole32(ni->ni_associd);
5459 sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5460 }
5461
5462 static int
5463 iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5464 int assoc)
5465 {
5466 struct ieee80211_node *ni = &in->in_ni;
5467 struct iwm_mac_ctx_cmd cmd;
5468
5469 memset(&cmd, 0, sizeof(cmd));
5470
5471 iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5472
5473 /* Allow beacons to pass through as long as we are not associated or we
5474 * do not have dtim period information */
5475 if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5476 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5477 else
5478 iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5479
5480 return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5481 }
5482
5483 #define IWM_MISSED_BEACONS_THRESHOLD 8
5484
5485 static void
5486 iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5487 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5488 {
5489 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5490
5491 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5492 le32toh(mb->mac_id),
5493 le32toh(mb->consec_missed_beacons),
5494 le32toh(mb->consec_missed_beacons_since_last_rx),
5495 le32toh(mb->num_recvd_beacons),
5496 le32toh(mb->num_expected_beacons)));
5497
5498 /*
5499 * TODO: the threshold should be adjusted based on latency conditions,
5500 * and/or in case of a CS flow on one of the other AP vifs.
5501 */
5502 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5503 IWM_MISSED_BEACONS_THRESHOLD)
5504 ieee80211_beacon_miss(&sc->sc_ic);
5505 }
5506
5507 static int
5508 iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5509 {
5510 struct iwm_time_quota_cmd cmd;
5511 int i, idx, num_active_macs, quota, quota_rem;
5512 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5513 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5514 uint16_t id;
5515
5516 memset(&cmd, 0, sizeof(cmd));
5517
5518 /* currently, PHY ID == binding ID */
5519 if (in) {
5520 id = in->in_phyctxt->id;
5521 KASSERT(id < IWM_MAX_BINDINGS);
5522 colors[id] = in->in_phyctxt->color;
5523
5524 if (1)
5525 n_ifs[id] = 1;
5526 }
5527
5528 /*
5529 * The FW's scheduling session consists of
5530 * IWM_MAX_QUOTA fragments. Divide these fragments
5531 * equally between all the bindings that require quota
5532 */
5533 num_active_macs = 0;
5534 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5535 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5536 num_active_macs += n_ifs[i];
5537 }
5538
5539 quota = 0;
5540 quota_rem = 0;
5541 if (num_active_macs) {
5542 quota = IWM_MAX_QUOTA / num_active_macs;
5543 quota_rem = IWM_MAX_QUOTA % num_active_macs;
5544 }
5545
5546 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5547 if (colors[i] < 0)
5548 continue;
5549
5550 cmd.quotas[idx].id_and_color =
5551 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5552
5553 if (n_ifs[i] <= 0) {
5554 cmd.quotas[idx].quota = htole32(0);
5555 cmd.quotas[idx].max_duration = htole32(0);
5556 } else {
5557 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5558 cmd.quotas[idx].max_duration = htole32(0);
5559 }
5560 idx++;
5561 }
5562
5563 /* Give the remainder of the session to the first binding */
5564 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5565
5566 return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5567 }
5568
5569 static int
5570 iwm_auth(struct iwm_softc *sc)
5571 {
5572 struct ieee80211com *ic = &sc->sc_ic;
5573 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5574 uint32_t duration;
5575 int err;
5576
5577 err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5578 if (err)
5579 return err;
5580
5581 err = iwm_allow_mcast(sc);
5582 if (err)
5583 return err;
5584
5585 sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5586 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5587 IWM_FW_CTXT_ACTION_MODIFY, 0);
5588 if (err)
5589 return err;
5590 in->in_phyctxt = &sc->sc_phyctxt[0];
5591
5592 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
5593 if (err) {
5594 aprint_error_dev(sc->sc_dev,
5595 "could not add MAC context (error %d)\n", err);
5596 return err;
5597 }
5598
5599 err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
5600 if (err)
5601 return err;
5602
5603 err = iwm_add_sta_cmd(sc, in, 0);
5604 if (err)
5605 return err;
5606
5607 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
5608 if (err) {
5609 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5610 return err;
5611 }
5612
5613 /*
5614 * Prevent the FW from wandering off channel during association
5615 * by "protecting" the session with a time event.
5616 */
5617 if (in->in_ni.ni_intval)
5618 duration = in->in_ni.ni_intval * 2;
5619 else
5620 duration = IEEE80211_DUR_TU;
5621 iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
5622 DELAY(100);
5623
5624 return 0;
5625 }
5626
5627 static int
5628 iwm_assoc(struct iwm_softc *sc)
5629 {
5630 struct ieee80211com *ic = &sc->sc_ic;
5631 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5632 int err;
5633
5634 err = iwm_add_sta_cmd(sc, in, 1);
5635 if (err)
5636 return err;
5637
5638 return 0;
5639 }
5640
5641 static struct ieee80211_node *
5642 iwm_node_alloc(struct ieee80211_node_table *nt)
5643 {
5644 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5645 }
5646
5647 static void
5648 iwm_calib_timeout(void *arg)
5649 {
5650 struct iwm_softc *sc = arg;
5651 struct ieee80211com *ic = &sc->sc_ic;
5652 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5653 #ifndef IEEE80211_NO_HT
5654 struct ieee80211_node *ni = &in->in_ni;
5655 int otxrate;
5656 #endif
5657 int s;
5658
5659 s = splnet();
5660 if ((ic->ic_fixed_rate == -1
5661 #ifndef IEEE80211_NO_HT
5662 || ic->ic_fixed_mcs == -1
5663 #endif
5664 ) &&
5665 ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
5666 #ifndef IEEE80211_NO_HT
5667 if (ni->ni_flags & IEEE80211_NODE_HT)
5668 otxrate = ni->ni_txmcs;
5669 else
5670 otxrate = ni->ni_txrate;
5671 #endif
5672 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5673
5674 #ifndef IEEE80211_NO_HT
5675 /*
5676 * If AMRR has chosen a new TX rate we must update
5677 * the firwmare's LQ rate table from process context.
5678 */
5679 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5680 otxrate != ni->ni_txmcs)
5681 softint_schedule(sc->setrates_task);
5682 else if (otxrate != ni->ni_txrate)
5683 softint_schedule(sc->setrates_task);
5684 #endif
5685 }
5686 splx(s);
5687
5688 callout_schedule(&sc->sc_calib_to, mstohz(500));
5689 }
5690
5691 #ifndef IEEE80211_NO_HT
5692 static void
5693 iwm_setrates_task(void *arg)
5694 {
5695 struct iwm_softc *sc = arg;
5696 struct ieee80211com *ic = &sc->sc_ic;
5697 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5698
5699 /* Update rates table based on new TX rate determined by AMRR. */
5700 iwm_setrates(in);
5701 }
5702
5703 static int
5704 iwm_setrates(struct iwm_node *in)
5705 {
5706 struct ieee80211_node *ni = &in->in_ni;
5707 struct ieee80211com *ic = ni->ni_ic;
5708 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5709 struct iwm_lq_cmd *lq = &in->in_lq;
5710 struct ieee80211_rateset *rs = &ni->ni_rates;
5711 int i, j, ridx, ridx_min, tab = 0;
5712 #ifndef IEEE80211_NO_HT
5713 int sgi_ok;
5714 #endif
5715 struct iwm_host_cmd cmd = {
5716 .id = IWM_LQ_CMD,
5717 .len = { sizeof(in->in_lq), },
5718 };
5719
5720 memset(lq, 0, sizeof(*lq));
5721 lq->sta_id = IWM_STATION_ID;
5722
5723 if (ic->ic_flags & IEEE80211_F_USEPROT)
5724 lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
5725
5726 #ifndef IEEE80211_NO_HT
5727 sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
5728 (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
5729 #endif
5730
5731
5732 /*
5733 * Fill the LQ rate selection table with legacy and/or HT rates
5734 * in descending order, i.e. with the node's current TX rate first.
5735 * In cases where throughput of an HT rate corresponds to a legacy
5736 * rate it makes no sense to add both. We rely on the fact that
5737 * iwm_rates is laid out such that equivalent HT/legacy rates share
5738 * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
5739 * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
5740 */
5741 j = 0;
5742 ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
5743 IWM_RIDX_OFDM : IWM_RIDX_CCK;
5744 for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
5745 if (j >= __arraycount(lq->rs_table))
5746 break;
5747 tab = 0;
5748 #ifndef IEEE80211_NO_HT
5749 if ((ni->ni_flags & IEEE80211_NODE_HT) &&
5750 iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
5751 for (i = ni->ni_txmcs; i >= 0; i--) {
5752 if (isclr(ni->ni_rxmcs, i))
5753 continue;
5754 if (ridx == iwm_mcs2ridx[i]) {
5755 tab = iwm_rates[ridx].ht_plcp;
5756 tab |= IWM_RATE_MCS_HT_MSK;
5757 if (sgi_ok)
5758 tab |= IWM_RATE_MCS_SGI_MSK;
5759 break;
5760 }
5761 }
5762 }
5763 #endif
5764 if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
5765 for (i = ni->ni_txrate; i >= 0; i--) {
5766 if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
5767 IEEE80211_RATE_VAL)) {
5768 tab = iwm_rates[ridx].plcp;
5769 break;
5770 }
5771 }
5772 }
5773
5774 if (tab == 0)
5775 continue;
5776
5777 tab |= 1 << IWM_RATE_MCS_ANT_POS;
5778 if (IWM_RIDX_IS_CCK(ridx))
5779 tab |= IWM_RATE_MCS_CCK_MSK;
5780 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5781 lq->rs_table[j++] = htole32(tab);
5782 }
5783
5784 /* Fill the rest with the lowest possible rate */
5785 i = j > 0 ? j - 1 : 0;
5786 while (j < __arraycount(lq->rs_table))
5787 lq->rs_table[j++] = lq->rs_table[i];
5788
5789 lq->single_stream_ant_msk = IWM_ANT_A;
5790 lq->dual_stream_ant_msk = IWM_ANT_AB;
5791
5792 lq->agg_time_limit = htole16(4000); /* 4ms */
5793 lq->agg_disable_start_th = 3;
5794 #ifdef notyet
5795 lq->agg_frame_cnt_limit = 0x3f;
5796 #else
5797 lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
5798 #endif
5799
5800 cmd.data[0] = &in->in_lq;
5801 return iwm_send_cmd(sc, &cmd);
5802 }
5803 #endif
5804
5805 static int
5806 iwm_media_change(struct ifnet *ifp)
5807 {
5808 struct iwm_softc *sc = ifp->if_softc;
5809 struct ieee80211com *ic = &sc->sc_ic;
5810 uint8_t rate, ridx;
5811 int err;
5812
5813 err = ieee80211_media_change(ifp);
5814 if (err != ENETRESET)
5815 return err;
5816
5817 #ifndef IEEE80211_NO_HT
5818 if (ic->ic_fixed_mcs != -1)
5819 sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
5820 else
5821 #endif
5822 if (ic->ic_fixed_rate != -1) {
5823 rate = ic->ic_sup_rates[ic->ic_curmode].
5824 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5825 /* Map 802.11 rate to HW rate index. */
5826 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5827 if (iwm_rates[ridx].rate == rate)
5828 break;
5829 sc->sc_fixed_ridx = ridx;
5830 }
5831
5832 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5833 (IFF_UP | IFF_RUNNING)) {
5834 iwm_stop(ifp, 0);
5835 err = iwm_init(ifp);
5836 }
5837 return err;
5838 }
5839
5840 static void
5841 iwm_newstate_cb(struct work *wk, void *v)
5842 {
5843 struct iwm_softc *sc = v;
5844 struct ieee80211com *ic = &sc->sc_ic;
5845 struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
5846 enum ieee80211_state nstate = iwmns->ns_nstate;
5847 enum ieee80211_state ostate = ic->ic_state;
5848 int generation = iwmns->ns_generation;
5849 struct iwm_node *in;
5850 int arg = iwmns->ns_arg;
5851 int err;
5852
5853 kmem_free(iwmns, sizeof(*iwmns));
5854
5855 DPRINTF(("Prepare to switch state %d->%d\n", ostate, nstate));
5856 if (sc->sc_generation != generation) {
5857 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5858 if (nstate == IEEE80211_S_INIT) {
5859 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5860 sc->sc_newstate(ic, nstate, arg);
5861 }
5862 return;
5863 }
5864
5865 DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
5866 ieee80211_state_name[nstate]));
5867
5868 if (ostate == IEEE80211_S_SCAN && nstate != ostate)
5869 iwm_led_blink_stop(sc);
5870
5871 if (ostate == IEEE80211_S_RUN && nstate != ostate)
5872 iwm_disable_beacon_filter(sc);
5873
5874 /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
5875 /* XXX Is there a way to switch states without a full reset? */
5876 if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
5877 iwm_stop_device(sc);
5878 iwm_init_hw(sc);
5879
5880 /*
5881 * Upon receiving a deauth frame from AP the net80211 stack
5882 * puts the driver into AUTH state. This will fail with this
5883 * driver so bring the FSM from RUN to SCAN in this case.
5884 */
5885 if (nstate == IEEE80211_S_SCAN ||
5886 nstate == IEEE80211_S_AUTH ||
5887 nstate == IEEE80211_S_ASSOC) {
5888 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5889 /* Always pass arg as -1 since we can't Tx right now. */
5890 sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
5891 DPRINTF(("Going INIT->SCAN\n"));
5892 nstate = IEEE80211_S_SCAN;
5893 }
5894 }
5895
5896 switch (nstate) {
5897 case IEEE80211_S_INIT:
5898 break;
5899
5900 case IEEE80211_S_SCAN:
5901 if (ostate == nstate &&
5902 ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
5903 return;
5904 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
5905 err = iwm_umac_scan(sc);
5906 else
5907 err = iwm_lmac_scan(sc);
5908 if (err) {
5909 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5910 return;
5911 }
5912 SET(sc->sc_flags, IWM_FLAG_SCANNING);
5913 ic->ic_state = nstate;
5914 iwm_led_blink_start(sc);
5915 return;
5916
5917 case IEEE80211_S_AUTH:
5918 err = iwm_auth(sc);
5919 if (err) {
5920 DPRINTF(("%s: could not move to auth state: %d\n",
5921 DEVNAME(sc), err));
5922 return;
5923 }
5924 break;
5925
5926 case IEEE80211_S_ASSOC:
5927 err = iwm_assoc(sc);
5928 if (err) {
5929 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5930 err));
5931 return;
5932 }
5933 break;
5934
5935 case IEEE80211_S_RUN:
5936 in = (struct iwm_node *)ic->ic_bss;
5937
5938 /* We have now been assigned an associd by the AP. */
5939 err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
5940 if (err) {
5941 aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
5942 return;
5943 }
5944
5945 err = iwm_power_update_device(sc);
5946 if (err) {
5947 aprint_error_dev(sc->sc_dev,
5948 "could send power command (error %d)\n", err);
5949 return;
5950 }
5951 #ifdef notyet
5952 /*
5953 * Disabled for now. Default beacon filter settings
5954 * prevent net80211 from getting ERP and HT protection
5955 * updates from beacons.
5956 */
5957 err = iwm_enable_beacon_filter(sc, in);
5958 if (err) {
5959 aprint_error_dev(sc->sc_dev,
5960 "could not enable beacon filter\n");
5961 return;
5962 }
5963 #endif
5964 err = iwm_power_mac_update_mode(sc, in);
5965 if (err) {
5966 aprint_error_dev(sc->sc_dev,
5967 "could not update MAC power (error %d)\n", err);
5968 return;
5969 }
5970
5971 err = iwm_update_quotas(sc, in);
5972 if (err) {
5973 aprint_error_dev(sc->sc_dev,
5974 "could not update quotas (error %d)\n", err);
5975 return;
5976 }
5977
5978 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5979
5980 /* Start at lowest available bit-rate, AMRR will raise. */
5981 in->in_ni.ni_txrate = 0;
5982 #ifndef IEEE80211_NO_HT
5983 in->in_ni.ni_txmcs = 0;
5984 iwm_setrates(in);
5985 #endif
5986
5987 callout_schedule(&sc->sc_calib_to, mstohz(500));
5988 iwm_led_enable(sc);
5989 break;
5990
5991 default:
5992 break;
5993 }
5994
5995 sc->sc_newstate(ic, nstate, arg);
5996 }
5997
5998 static int
5999 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6000 {
6001 struct iwm_newstate_state *iwmns;
6002 struct ifnet *ifp = IC2IFP(ic);
6003 struct iwm_softc *sc = ifp->if_softc;
6004
6005 callout_stop(&sc->sc_calib_to);
6006
6007 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6008 if (!iwmns) {
6009 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6010 return ENOMEM;
6011 }
6012
6013 iwmns->ns_nstate = nstate;
6014 iwmns->ns_arg = arg;
6015 iwmns->ns_generation = sc->sc_generation;
6016
6017 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6018
6019 return 0;
6020 }
6021
6022 static void
6023 iwm_endscan(struct iwm_softc *sc)
6024 {
6025 struct ieee80211com *ic = &sc->sc_ic;
6026 int s;
6027
6028 DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6029
6030 s = splnet();
6031 if (ic->ic_state == IEEE80211_S_SCAN)
6032 ieee80211_end_scan(ic);
6033 splx(s);
6034 }
6035
6036 /*
6037 * Aging and idle timeouts for the different possible scenarios
6038 * in default configuration
6039 */
6040 static const uint32_t
6041 iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6042 {
6043 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6044 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6045 },
6046 {
6047 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6048 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6049 },
6050 {
6051 htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6052 htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6053 },
6054 {
6055 htole32(IWM_SF_BA_AGING_TIMER_DEF),
6056 htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6057 },
6058 {
6059 htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6060 htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6061 },
6062 };
6063
6064 /*
6065 * Aging and idle timeouts for the different possible scenarios
6066 * in single BSS MAC configuration.
6067 */
6068 static const uint32_t
6069 iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6070 {
6071 htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6072 htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6073 },
6074 {
6075 htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6076 htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6077 },
6078 {
6079 htole32(IWM_SF_MCAST_AGING_TIMER),
6080 htole32(IWM_SF_MCAST_IDLE_TIMER)
6081 },
6082 {
6083 htole32(IWM_SF_BA_AGING_TIMER),
6084 htole32(IWM_SF_BA_IDLE_TIMER)
6085 },
6086 {
6087 htole32(IWM_SF_TX_RE_AGING_TIMER),
6088 htole32(IWM_SF_TX_RE_IDLE_TIMER)
6089 },
6090 };
6091
6092 static void
6093 iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6094 struct ieee80211_node *ni)
6095 {
6096 int i, j, watermark;
6097
6098 sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6099
6100 /*
6101 * If we are in association flow - check antenna configuration
6102 * capabilities of the AP station, and choose the watermark accordingly.
6103 */
6104 if (ni) {
6105 #ifndef IEEE80211_NO_HT
6106 if (ni->ni_flags & IEEE80211_NODE_HT) {
6107 #ifdef notyet
6108 if (ni->ni_rxmcs[2] != 0)
6109 watermark = IWM_SF_W_MARK_MIMO3;
6110 else if (ni->ni_rxmcs[1] != 0)
6111 watermark = IWM_SF_W_MARK_MIMO2;
6112 else
6113 #endif
6114 watermark = IWM_SF_W_MARK_SISO;
6115 } else
6116 #endif
6117 watermark = IWM_SF_W_MARK_LEGACY;
6118 /* default watermark value for unassociated mode. */
6119 } else {
6120 watermark = IWM_SF_W_MARK_MIMO2;
6121 }
6122 sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6123
6124 for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6125 for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6126 sf_cmd->long_delay_timeouts[i][j] =
6127 htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6128 }
6129 }
6130
6131 if (ni) {
6132 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6133 sizeof(iwm_sf_full_timeout));
6134 } else {
6135 memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6136 sizeof(iwm_sf_full_timeout_def));
6137 }
6138 }
6139
6140 static int
6141 iwm_sf_config(struct iwm_softc *sc, int new_state)
6142 {
6143 struct ieee80211com *ic = &sc->sc_ic;
6144 struct iwm_sf_cfg_cmd sf_cmd = {
6145 .state = htole32(IWM_SF_FULL_ON),
6146 };
6147
6148 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6149 sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6150
6151 switch (new_state) {
6152 case IWM_SF_UNINIT:
6153 case IWM_SF_INIT_OFF:
6154 iwm_fill_sf_command(sc, &sf_cmd, NULL);
6155 break;
6156 case IWM_SF_FULL_ON:
6157 iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6158 break;
6159 default:
6160 return EINVAL;
6161 }
6162
6163 return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6164 sizeof(sf_cmd), &sf_cmd);
6165 }
6166
6167 static int
6168 iwm_send_bt_init_conf(struct iwm_softc *sc)
6169 {
6170 struct iwm_bt_coex_cmd bt_cmd;
6171
6172 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6173 bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6174
6175 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6176 }
6177
6178 static bool
6179 iwm_is_lar_supported(struct iwm_softc *sc)
6180 {
6181 bool nvm_lar = sc->sc_nvm.lar_enabled;
6182 bool tlv_lar = isset(sc->sc_enabled_capa,
6183 IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6184
6185 if (iwm_lar_disable)
6186 return false;
6187
6188 /*
6189 * Enable LAR only if it is supported by the FW (TLV) &&
6190 * enabled in the NVM
6191 */
6192 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6193 return nvm_lar && tlv_lar;
6194 else
6195 return tlv_lar;
6196 }
6197
6198 static int
6199 iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6200 {
6201 struct iwm_mcc_update_cmd mcc_cmd;
6202 struct iwm_host_cmd hcmd = {
6203 .id = IWM_MCC_UPDATE_CMD,
6204 .flags = IWM_CMD_WANT_SKB,
6205 .data = { &mcc_cmd },
6206 };
6207 int err;
6208 int resp_v2 = isset(sc->sc_enabled_capa,
6209 IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6210
6211 if (!iwm_is_lar_supported(sc)) {
6212 DPRINTF(("%s: no LAR support\n", __func__));
6213 return 0;
6214 }
6215
6216 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6217 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6218 if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6219 isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6220 mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6221 else
6222 mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6223
6224 if (resp_v2)
6225 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6226 else
6227 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6228
6229 err = iwm_send_cmd(sc, &hcmd);
6230 if (err)
6231 return err;
6232
6233 iwm_free_resp(sc, &hcmd);
6234
6235 return 0;
6236 }
6237
6238 static void
6239 iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6240 {
6241 struct iwm_host_cmd cmd = {
6242 .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6243 .len = { sizeof(uint32_t), },
6244 .data = { &backoff, },
6245 };
6246
6247 iwm_send_cmd(sc, &cmd);
6248 }
6249
6250 static int
6251 iwm_init_hw(struct iwm_softc *sc)
6252 {
6253 struct ieee80211com *ic = &sc->sc_ic;
6254 int err, i, ac;
6255
6256 err = iwm_preinit(sc);
6257 if (err)
6258 return err;
6259
6260 err = iwm_start_hw(sc);
6261 if (err) {
6262 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6263 return err;
6264 }
6265
6266 err = iwm_run_init_mvm_ucode(sc, 0);
6267 if (err)
6268 return err;
6269
6270 /* Should stop and start HW since INIT image just loaded. */
6271 iwm_stop_device(sc);
6272 err = iwm_start_hw(sc);
6273 if (err) {
6274 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6275 return err;
6276 }
6277
6278 /* Restart, this time with the regular firmware */
6279 err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6280 if (err) {
6281 aprint_error_dev(sc->sc_dev,
6282 "could not load firmware (error %d)\n", err);
6283 goto err;
6284 }
6285
6286 err = iwm_send_bt_init_conf(sc);
6287 if (err) {
6288 aprint_error_dev(sc->sc_dev,
6289 "could not init bt coex (error %d)\n", err);
6290 goto err;
6291 }
6292
6293 err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6294 if (err) {
6295 aprint_error_dev(sc->sc_dev,
6296 "could not init tx ant config (error %d)\n", err);
6297 goto err;
6298 }
6299
6300 /* Send phy db control command and then phy db calibration*/
6301 err = iwm_send_phy_db_data(sc);
6302 if (err) {
6303 aprint_error_dev(sc->sc_dev,
6304 "could not init phy db (error %d)\n", err);
6305 goto err;
6306 }
6307
6308 err = iwm_send_phy_cfg_cmd(sc);
6309 if (err) {
6310 aprint_error_dev(sc->sc_dev,
6311 "could not send phy config (error %d)\n", err);
6312 goto err;
6313 }
6314
6315 /* Add auxiliary station for scanning */
6316 err = iwm_add_aux_sta(sc);
6317 if (err) {
6318 aprint_error_dev(sc->sc_dev,
6319 "could not add aux station (error %d)\n", err);
6320 goto err;
6321 }
6322
6323 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6324 /*
6325 * The channel used here isn't relevant as it's
6326 * going to be overwritten in the other flows.
6327 * For now use the first channel we have.
6328 */
6329 sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6330 err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6331 IWM_FW_CTXT_ACTION_ADD, 0);
6332 if (err) {
6333 aprint_error_dev(sc->sc_dev,
6334 "could not add phy context %d (error %d)\n",
6335 i, err);
6336 goto err;
6337 }
6338 }
6339
6340 /* Initialize tx backoffs to the minimum. */
6341 if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6342 iwm_tt_tx_backoff(sc, 0);
6343
6344 err = iwm_power_update_device(sc);
6345 if (err) {
6346 aprint_error_dev(sc->sc_dev,
6347 "could send power command (error %d)\n", err);
6348 goto err;
6349 }
6350
6351 err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6352 if (err) {
6353 aprint_error_dev(sc->sc_dev,
6354 "could not init LAR (error %d)\n", err);
6355 goto err;
6356 }
6357
6358 if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6359 err = iwm_config_umac_scan(sc);
6360 if (err) {
6361 aprint_error_dev(sc->sc_dev,
6362 "could not configure scan (error %d)\n", err);
6363 goto err;
6364 }
6365 }
6366
6367 for (ac = 0; ac < WME_NUM_AC; ac++) {
6368 err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6369 iwm_ac_to_tx_fifo[ac]);
6370 if (err) {
6371 aprint_error_dev(sc->sc_dev,
6372 "could not enable Tx queue %d (error %d)\n",
6373 i, err);
6374 goto err;
6375 }
6376 }
6377
6378 err = iwm_disable_beacon_filter(sc);
6379 if (err) {
6380 aprint_error_dev(sc->sc_dev,
6381 "could not disable beacon filter (error %d)\n", err);
6382 goto err;
6383 }
6384
6385 return 0;
6386
6387 err:
6388 iwm_stop_device(sc);
6389 return err;
6390 }
6391
6392 /* Allow multicast from our BSSID. */
6393 static int
6394 iwm_allow_mcast(struct iwm_softc *sc)
6395 {
6396 struct ieee80211com *ic = &sc->sc_ic;
6397 struct ieee80211_node *ni = ic->ic_bss;
6398 struct iwm_mcast_filter_cmd *cmd;
6399 size_t size;
6400 int err;
6401
6402 size = roundup(sizeof(*cmd), 4);
6403 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6404 if (cmd == NULL)
6405 return ENOMEM;
6406 cmd->filter_own = 1;
6407 cmd->port_id = 0;
6408 cmd->count = 0;
6409 cmd->pass_all = 1;
6410 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6411
6412 err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6413 kmem_intr_free(cmd, size);
6414 return err;
6415 }
6416
6417 static int
6418 iwm_init(struct ifnet *ifp)
6419 {
6420 struct iwm_softc *sc = ifp->if_softc;
6421 int err;
6422 int s;
6423
6424 if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6425 return 0;
6426
6427 sc->sc_generation++;
6428 sc->sc_flags &= ~IWM_FLAG_STOPPED;
6429
6430 err = iwm_init_hw(sc);
6431 if (err) {
6432 iwm_stop(ifp, 1);
6433 return err;
6434 }
6435
6436 ifp->if_flags &= ~IFF_OACTIVE;
6437 ifp->if_flags |= IFF_RUNNING;
6438
6439 s = splnet();
6440 ieee80211_begin_scan(&sc->sc_ic, 0);
6441 splx(s);
6442
6443 SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6444
6445 return 0;
6446 }
6447
6448 static void
6449 iwm_start(struct ifnet *ifp)
6450 {
6451 struct iwm_softc *sc = ifp->if_softc;
6452 struct ieee80211com *ic = &sc->sc_ic;
6453 struct ieee80211_node *ni;
6454 struct ether_header *eh;
6455 struct mbuf *m;
6456 int ac;
6457
6458 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6459 return;
6460
6461 for (;;) {
6462 /* why isn't this done per-queue? */
6463 if (sc->qfullmsk != 0) {
6464 ifp->if_flags |= IFF_OACTIVE;
6465 break;
6466 }
6467
6468 /* need to send management frames even if we're not RUNning */
6469 IF_DEQUEUE(&ic->ic_mgtq, m);
6470 if (m) {
6471 ni = M_GETCTX(m, struct ieee80211_node *);
6472 M_CLEARCTX(m);
6473 ac = WME_AC_BE;
6474 goto sendit;
6475 }
6476 if (ic->ic_state != IEEE80211_S_RUN) {
6477 break;
6478 }
6479
6480 IFQ_DEQUEUE(&ifp->if_snd, m);
6481 if (m == NULL)
6482 break;
6483
6484 if (m->m_len < sizeof (*eh) &&
6485 (m = m_pullup(m, sizeof (*eh))) == NULL) {
6486 ifp->if_oerrors++;
6487 continue;
6488 }
6489
6490 eh = mtod(m, struct ether_header *);
6491 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6492 if (ni == NULL) {
6493 m_freem(m);
6494 ifp->if_oerrors++;
6495 continue;
6496 }
6497
6498 /* classify mbuf so we can find which tx ring to use */
6499 if (ieee80211_classify(ic, m, ni) != 0) {
6500 m_freem(m);
6501 ieee80211_free_node(ni);
6502 ifp->if_oerrors++;
6503 continue;
6504 }
6505
6506 /* No QoS encapsulation for EAPOL frames. */
6507 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6508 M_WME_GETAC(m) : WME_AC_BE;
6509
6510 bpf_mtap(ifp, m);
6511
6512 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6513 ieee80211_free_node(ni);
6514 ifp->if_oerrors++;
6515 continue;
6516 }
6517
6518 sendit:
6519 bpf_mtap3(ic->ic_rawbpf, m);
6520
6521 if (iwm_tx(sc, m, ni, ac) != 0) {
6522 ieee80211_free_node(ni);
6523 ifp->if_oerrors++;
6524 continue;
6525 }
6526
6527 if (ifp->if_flags & IFF_UP) {
6528 sc->sc_tx_timer = 15;
6529 ifp->if_timer = 1;
6530 }
6531 }
6532 }
6533
6534 static void
6535 iwm_stop(struct ifnet *ifp, int disable)
6536 {
6537 struct iwm_softc *sc = ifp->if_softc;
6538 struct ieee80211com *ic = &sc->sc_ic;
6539 struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6540 int s;
6541
6542 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6543 sc->sc_flags |= IWM_FLAG_STOPPED;
6544 sc->sc_generation++;
6545 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6546
6547 if (in)
6548 in->in_phyctxt = NULL;
6549
6550 s = splnet();
6551 if (ic->ic_state != IEEE80211_S_INIT)
6552 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6553 splx(s);
6554
6555 callout_stop(&sc->sc_calib_to);
6556 iwm_led_blink_stop(sc);
6557 ifp->if_timer = sc->sc_tx_timer = 0;
6558 iwm_stop_device(sc);
6559 }
6560
6561 static void
6562 iwm_watchdog(struct ifnet *ifp)
6563 {
6564 struct iwm_softc *sc = ifp->if_softc;
6565
6566 ifp->if_timer = 0;
6567 if (sc->sc_tx_timer > 0) {
6568 if (--sc->sc_tx_timer == 0) {
6569 aprint_error_dev(sc->sc_dev, "device timeout\n");
6570 #ifdef IWM_DEBUG
6571 iwm_nic_error(sc);
6572 #endif
6573 ifp->if_flags &= ~IFF_UP;
6574 iwm_stop(ifp, 1);
6575 ifp->if_oerrors++;
6576 return;
6577 }
6578 ifp->if_timer = 1;
6579 }
6580
6581 ieee80211_watchdog(&sc->sc_ic);
6582 }
6583
6584 static int
6585 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
6586 {
6587 struct iwm_softc *sc = ifp->if_softc;
6588 struct ieee80211com *ic = &sc->sc_ic;
6589 const struct sockaddr *sa;
6590 int s, err = 0;
6591
6592 s = splnet();
6593
6594 switch (cmd) {
6595 case SIOCSIFADDR:
6596 ifp->if_flags |= IFF_UP;
6597 /* FALLTHROUGH */
6598 case SIOCSIFFLAGS:
6599 err = ifioctl_common(ifp, cmd, data);
6600 if (err)
6601 break;
6602 if (ifp->if_flags & IFF_UP) {
6603 if (!(ifp->if_flags & IFF_RUNNING)) {
6604 err = iwm_init(ifp);
6605 if (err)
6606 ifp->if_flags &= ~IFF_UP;
6607 }
6608 } else {
6609 if (ifp->if_flags & IFF_RUNNING)
6610 iwm_stop(ifp, 1);
6611 }
6612 break;
6613
6614 case SIOCADDMULTI:
6615 case SIOCDELMULTI:
6616 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6617 err = ENXIO;
6618 break;
6619 }
6620 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
6621 err = (cmd == SIOCADDMULTI) ?
6622 ether_addmulti(sa, &sc->sc_ec) :
6623 ether_delmulti(sa, &sc->sc_ec);
6624 if (err == ENETRESET)
6625 err = 0;
6626 break;
6627
6628 default:
6629 if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
6630 err = ether_ioctl(ifp, cmd, data);
6631 break;
6632 }
6633 err = ieee80211_ioctl(ic, cmd, data);
6634 break;
6635 }
6636
6637 if (err == ENETRESET) {
6638 err = 0;
6639 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6640 (IFF_UP | IFF_RUNNING)) {
6641 iwm_stop(ifp, 0);
6642 err = iwm_init(ifp);
6643 }
6644 }
6645
6646 splx(s);
6647 return err;
6648 }
6649
6650 /*
6651 * Note: This structure is read from the device with IO accesses,
6652 * and the reading already does the endian conversion. As it is
6653 * read with uint32_t-sized accesses, any members with a different size
6654 * need to be ordered correctly though!
6655 */
6656 struct iwm_error_event_table {
6657 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6658 uint32_t error_id; /* type of error */
6659 uint32_t trm_hw_status0; /* TRM HW status */
6660 uint32_t trm_hw_status1; /* TRM HW status */
6661 uint32_t blink2; /* branch link */
6662 uint32_t ilink1; /* interrupt link */
6663 uint32_t ilink2; /* interrupt link */
6664 uint32_t data1; /* error-specific data */
6665 uint32_t data2; /* error-specific data */
6666 uint32_t data3; /* error-specific data */
6667 uint32_t bcon_time; /* beacon timer */
6668 uint32_t tsf_low; /* network timestamp function timer */
6669 uint32_t tsf_hi; /* network timestamp function timer */
6670 uint32_t gp1; /* GP1 timer register */
6671 uint32_t gp2; /* GP2 timer register */
6672 uint32_t fw_rev_type; /* firmware revision type */
6673 uint32_t major; /* uCode version major */
6674 uint32_t minor; /* uCode version minor */
6675 uint32_t hw_ver; /* HW Silicon version */
6676 uint32_t brd_ver; /* HW board version */
6677 uint32_t log_pc; /* log program counter */
6678 uint32_t frame_ptr; /* frame pointer */
6679 uint32_t stack_ptr; /* stack pointer */
6680 uint32_t hcmd; /* last host command header */
6681 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
6682 * rxtx_flag */
6683 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
6684 * host_flag */
6685 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
6686 * enc_flag */
6687 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
6688 * time_flag */
6689 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
6690 * wico interrupt */
6691 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
6692 uint32_t wait_event; /* wait event() caller address */
6693 uint32_t l2p_control; /* L2pControlField */
6694 uint32_t l2p_duration; /* L2pDurationField */
6695 uint32_t l2p_mhvalid; /* L2pMhValidBits */
6696 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
6697 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
6698 * (LMPM_PMG_SEL) */
6699 uint32_t u_timestamp; /* indicate when the date and time of the
6700 * compilation */
6701 uint32_t flow_handler; /* FH read/write pointers, RX credit */
6702 } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
6703
6704 /*
6705 * UMAC error struct - relevant starting from family 8000 chip.
6706 * Note: This structure is read from the device with IO accesses,
6707 * and the reading already does the endian conversion. As it is
6708 * read with u32-sized accesses, any members with a different size
6709 * need to be ordered correctly though!
6710 */
6711 struct iwm_umac_error_event_table {
6712 uint32_t valid; /* (nonzero) valid, (0) log is empty */
6713 uint32_t error_id; /* type of error */
6714 uint32_t blink1; /* branch link */
6715 uint32_t blink2; /* branch link */
6716 uint32_t ilink1; /* interrupt link */
6717 uint32_t ilink2; /* interrupt link */
6718 uint32_t data1; /* error-specific data */
6719 uint32_t data2; /* error-specific data */
6720 uint32_t data3; /* error-specific data */
6721 uint32_t umac_major;
6722 uint32_t umac_minor;
6723 uint32_t frame_pointer; /* core register 27 */
6724 uint32_t stack_pointer; /* core register 28 */
6725 uint32_t cmd_header; /* latest host cmd sent to UMAC */
6726 uint32_t nic_isr_pref; /* ISR status register */
6727 } __packed;
6728
6729 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6730 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6731
6732 #ifdef IWM_DEBUG
6733 static const struct {
6734 const char *name;
6735 uint8_t num;
6736 } advanced_lookup[] = {
6737 { "NMI_INTERRUPT_WDG", 0x34 },
6738 { "SYSASSERT", 0x35 },
6739 { "UCODE_VERSION_MISMATCH", 0x37 },
6740 { "BAD_COMMAND", 0x38 },
6741 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6742 { "FATAL_ERROR", 0x3D },
6743 { "NMI_TRM_HW_ERR", 0x46 },
6744 { "NMI_INTERRUPT_TRM", 0x4C },
6745 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6746 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6747 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6748 { "NMI_INTERRUPT_HOST", 0x66 },
6749 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6750 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6751 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6752 { "ADVANCED_SYSASSERT", 0 },
6753 };
6754
6755 static const char *
6756 iwm_desc_lookup(uint32_t num)
6757 {
6758 int i;
6759
6760 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6761 if (advanced_lookup[i].num == num)
6762 return advanced_lookup[i].name;
6763
6764 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6765 return advanced_lookup[i].name;
6766 }
6767
6768 /*
6769 * Support for dumping the error log seemed like a good idea ...
6770 * but it's mostly hex junk and the only sensible thing is the
6771 * hw/ucode revision (which we know anyway). Since it's here,
6772 * I'll just leave it in, just in case e.g. the Intel guys want to
6773 * help us decipher some "ADVANCED_SYSASSERT" later.
6774 */
6775 static void
6776 iwm_nic_error(struct iwm_softc *sc)
6777 {
6778 struct iwm_error_event_table t;
6779 uint32_t base;
6780
6781 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6782 base = sc->sc_uc.uc_error_event_table;
6783 if (base < 0x800000) {
6784 aprint_error_dev(sc->sc_dev,
6785 "Invalid error log pointer 0x%08x\n", base);
6786 return;
6787 }
6788
6789 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6790 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6791 return;
6792 }
6793
6794 if (!t.valid) {
6795 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6796 return;
6797 }
6798
6799 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6800 aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
6801 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6802 sc->sc_flags, t.valid);
6803 }
6804
6805 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
6806 iwm_desc_lookup(t.error_id));
6807 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
6808 t.trm_hw_status0);
6809 aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
6810 t.trm_hw_status1);
6811 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
6812 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
6813 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
6814 aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
6815 aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
6816 aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
6817 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
6818 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
6819 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
6820 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
6821 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
6822 aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
6823 t.fw_rev_type);
6824 aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
6825 t.major);
6826 aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
6827 t.minor);
6828 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
6829 aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
6830 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
6831 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
6832 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
6833 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
6834 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
6835 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
6836 aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
6837 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
6838 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
6839 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
6840 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
6841 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6842 t.l2p_addr_match);
6843 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
6844 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
6845 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
6846
6847 if (sc->sc_uc.uc_umac_error_event_table)
6848 iwm_nic_umac_error(sc);
6849 }
6850
6851 static void
6852 iwm_nic_umac_error(struct iwm_softc *sc)
6853 {
6854 struct iwm_umac_error_event_table t;
6855 uint32_t base;
6856
6857 base = sc->sc_uc.uc_umac_error_event_table;
6858
6859 if (base < 0x800000) {
6860 aprint_error_dev(sc->sc_dev,
6861 "Invalid error log pointer 0x%08x\n", base);
6862 return;
6863 }
6864
6865 if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
6866 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6867 return;
6868 }
6869
6870 if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
6871 aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
6872 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6873 sc->sc_flags, t.valid);
6874 }
6875
6876 aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
6877 iwm_desc_lookup(t.error_id));
6878 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
6879 aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
6880 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
6881 t.ilink1);
6882 aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
6883 t.ilink2);
6884 aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
6885 aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
6886 aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
6887 aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
6888 aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
6889 aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
6890 t.frame_pointer);
6891 aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
6892 t.stack_pointer);
6893 aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
6894 aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
6895 t.nic_isr_pref);
6896 }
6897 #endif
6898
6899 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6900 do { \
6901 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6902 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6903 _var_ = (void *)((_pkt_)+1); \
6904 } while (/*CONSTCOND*/0)
6905
6906 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6907 do { \
6908 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6909 sizeof(len), BUS_DMASYNC_POSTREAD); \
6910 _ptr_ = (void *)((_pkt_)+1); \
6911 } while (/*CONSTCOND*/0)
6912
6913 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6914
6915 static void
6916 iwm_notif_intr(struct iwm_softc *sc)
6917 {
6918 uint16_t hw;
6919
6920 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6921 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6922
6923 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6924 while (sc->rxq.cur != hw) {
6925 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6926 struct iwm_rx_packet *pkt;
6927 struct iwm_cmd_response *cresp;
6928 int orig_qid, qid, idx, code;
6929
6930 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6931 BUS_DMASYNC_POSTREAD);
6932 pkt = mtod(data->m, struct iwm_rx_packet *);
6933
6934 orig_qid = pkt->hdr.qid;
6935 qid = orig_qid & ~0x80;
6936 idx = pkt->hdr.idx;
6937
6938 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
6939
6940 /*
6941 * randomly get these from the firmware, no idea why.
6942 * they at least seem harmless, so just ignore them for now
6943 */
6944 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6945 || pkt->len_n_flags == htole32(0x55550000))) {
6946 ADVANCE_RXQ(sc);
6947 continue;
6948 }
6949
6950 switch (code) {
6951 case IWM_REPLY_RX_PHY_CMD:
6952 iwm_rx_rx_phy_cmd(sc, pkt, data);
6953 break;
6954
6955 case IWM_REPLY_RX_MPDU_CMD:
6956 iwm_rx_rx_mpdu(sc, pkt, data);
6957 break;
6958
6959 case IWM_TX_CMD:
6960 iwm_rx_tx_cmd(sc, pkt, data);
6961 break;
6962
6963 case IWM_MISSED_BEACONS_NOTIFICATION:
6964 iwm_rx_missed_beacons_notif(sc, pkt, data);
6965 break;
6966
6967 case IWM_MFUART_LOAD_NOTIFICATION:
6968 break;
6969
6970 case IWM_ALIVE: {
6971 struct iwm_alive_resp_v1 *resp1;
6972 struct iwm_alive_resp_v2 *resp2;
6973 struct iwm_alive_resp_v3 *resp3;
6974
6975 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
6976 SYNC_RESP_STRUCT(resp1, pkt);
6977 sc->sc_uc.uc_error_event_table
6978 = le32toh(resp1->error_event_table_ptr);
6979 sc->sc_uc.uc_log_event_table
6980 = le32toh(resp1->log_event_table_ptr);
6981 sc->sched_base = le32toh(resp1->scd_base_ptr);
6982 if (resp1->status == IWM_ALIVE_STATUS_OK)
6983 sc->sc_uc.uc_ok = 1;
6984 else
6985 sc->sc_uc.uc_ok = 0;
6986 }
6987 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
6988 SYNC_RESP_STRUCT(resp2, pkt);
6989 sc->sc_uc.uc_error_event_table
6990 = le32toh(resp2->error_event_table_ptr);
6991 sc->sc_uc.uc_log_event_table
6992 = le32toh(resp2->log_event_table_ptr);
6993 sc->sched_base = le32toh(resp2->scd_base_ptr);
6994 sc->sc_uc.uc_umac_error_event_table
6995 = le32toh(resp2->error_info_addr);
6996 if (resp2->status == IWM_ALIVE_STATUS_OK)
6997 sc->sc_uc.uc_ok = 1;
6998 else
6999 sc->sc_uc.uc_ok = 0;
7000 }
7001 if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7002 SYNC_RESP_STRUCT(resp3, pkt);
7003 sc->sc_uc.uc_error_event_table
7004 = le32toh(resp3->error_event_table_ptr);
7005 sc->sc_uc.uc_log_event_table
7006 = le32toh(resp3->log_event_table_ptr);
7007 sc->sched_base = le32toh(resp3->scd_base_ptr);
7008 sc->sc_uc.uc_umac_error_event_table
7009 = le32toh(resp3->error_info_addr);
7010 if (resp3->status == IWM_ALIVE_STATUS_OK)
7011 sc->sc_uc.uc_ok = 1;
7012 else
7013 sc->sc_uc.uc_ok = 0;
7014 }
7015
7016 sc->sc_uc.uc_intr = 1;
7017 wakeup(&sc->sc_uc);
7018 break;
7019 }
7020
7021 case IWM_CALIB_RES_NOTIF_PHY_DB: {
7022 struct iwm_calib_res_notif_phy_db *phy_db_notif;
7023 SYNC_RESP_STRUCT(phy_db_notif, pkt);
7024 uint16_t size = le16toh(phy_db_notif->length);
7025 bus_dmamap_sync(sc->sc_dmat, data->map,
7026 sizeof(*pkt) + sizeof(*phy_db_notif),
7027 size, BUS_DMASYNC_POSTREAD);
7028 iwm_phy_db_set_section(sc, phy_db_notif, size);
7029 break;
7030 }
7031
7032 case IWM_STATISTICS_NOTIFICATION: {
7033 struct iwm_notif_statistics *stats;
7034 SYNC_RESP_STRUCT(stats, pkt);
7035 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7036 sc->sc_noise = iwm_get_noise(&stats->rx.general);
7037 break;
7038 }
7039
7040 case IWM_NVM_ACCESS_CMD:
7041 case IWM_MCC_UPDATE_CMD:
7042 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7043 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7044 sizeof(sc->sc_cmd_resp),
7045 BUS_DMASYNC_POSTREAD);
7046 memcpy(sc->sc_cmd_resp,
7047 pkt, sizeof(sc->sc_cmd_resp));
7048 }
7049 break;
7050
7051 case IWM_MCC_CHUB_UPDATE_CMD: {
7052 struct iwm_mcc_chub_notif *notif;
7053 SYNC_RESP_STRUCT(notif, pkt);
7054
7055 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7056 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7057 sc->sc_fw_mcc[2] = '\0';
7058 break;
7059 }
7060
7061 case IWM_DTS_MEASUREMENT_NOTIFICATION:
7062 case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7063 IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7064 struct iwm_dts_measurement_notif_v1 *notif1;
7065 struct iwm_dts_measurement_notif_v2 *notif2;
7066
7067 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7068 SYNC_RESP_STRUCT(notif1, pkt);
7069 DPRINTF(("%s: DTS temp=%d \n",
7070 DEVNAME(sc), notif1->temp));
7071 break;
7072 }
7073 if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7074 SYNC_RESP_STRUCT(notif2, pkt);
7075 DPRINTF(("%s: DTS temp=%d \n",
7076 DEVNAME(sc), notif2->temp));
7077 break;
7078 }
7079 break;
7080 }
7081
7082 case IWM_PHY_CONFIGURATION_CMD:
7083 case IWM_TX_ANT_CONFIGURATION_CMD:
7084 case IWM_ADD_STA:
7085 case IWM_MAC_CONTEXT_CMD:
7086 case IWM_REPLY_SF_CFG_CMD:
7087 case IWM_POWER_TABLE_CMD:
7088 case IWM_PHY_CONTEXT_CMD:
7089 case IWM_BINDING_CONTEXT_CMD:
7090 case IWM_TIME_EVENT_CMD:
7091 case IWM_SCAN_REQUEST_CMD:
7092 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7093 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7094 case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7095 case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7096 case IWM_SCAN_OFFLOAD_ABORT_CMD:
7097 case IWM_REPLY_BEACON_FILTERING_CMD:
7098 case IWM_MAC_PM_POWER_TABLE:
7099 case IWM_TIME_QUOTA_CMD:
7100 case IWM_REMOVE_STA:
7101 case IWM_TXPATH_FLUSH:
7102 case IWM_LQ_CMD:
7103 case IWM_BT_CONFIG:
7104 case IWM_REPLY_THERMAL_MNG_BACKOFF:
7105 SYNC_RESP_STRUCT(cresp, pkt);
7106 if (sc->sc_wantresp == ((qid << 16) | idx)) {
7107 memcpy(sc->sc_cmd_resp,
7108 pkt, sizeof(*pkt) + sizeof(*cresp));
7109 }
7110 break;
7111
7112 /* ignore */
7113 case IWM_PHY_DB_CMD:
7114 break;
7115
7116 case IWM_INIT_COMPLETE_NOTIF:
7117 sc->sc_init_complete = 1;
7118 wakeup(&sc->sc_init_complete);
7119 break;
7120
7121 case IWM_SCAN_OFFLOAD_COMPLETE: {
7122 struct iwm_periodic_scan_complete *notif;
7123 SYNC_RESP_STRUCT(notif, pkt);
7124 break;
7125 }
7126
7127 case IWM_SCAN_ITERATION_COMPLETE: {
7128 struct iwm_lmac_scan_complete_notif *notif;
7129 SYNC_RESP_STRUCT(notif, pkt);
7130 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7131 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7132 iwm_endscan(sc);
7133 }
7134 break;
7135 }
7136
7137 case IWM_SCAN_COMPLETE_UMAC: {
7138 struct iwm_umac_scan_complete *notif;
7139 SYNC_RESP_STRUCT(notif, pkt);
7140 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7141 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7142 iwm_endscan(sc);
7143 }
7144 break;
7145 }
7146
7147 case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7148 struct iwm_umac_scan_iter_complete_notif *notif;
7149 SYNC_RESP_STRUCT(notif, pkt);
7150 if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7151 CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7152 iwm_endscan(sc);
7153 }
7154 break;
7155 }
7156
7157 case IWM_REPLY_ERROR: {
7158 struct iwm_error_resp *resp;
7159 SYNC_RESP_STRUCT(resp, pkt);
7160 aprint_error_dev(sc->sc_dev,
7161 "firmware error 0x%x, cmd 0x%x\n",
7162 le32toh(resp->error_type), resp->cmd_id);
7163 break;
7164 }
7165
7166 case IWM_TIME_EVENT_NOTIFICATION: {
7167 struct iwm_time_event_notif *notif;
7168 SYNC_RESP_STRUCT(notif, pkt);
7169 break;
7170 }
7171
7172 case IWM_MCAST_FILTER_CMD:
7173 break;
7174
7175 case IWM_SCD_QUEUE_CFG: {
7176 struct iwm_scd_txq_cfg_rsp *rsp;
7177 SYNC_RESP_STRUCT(rsp, pkt);
7178 break;
7179 }
7180
7181 default:
7182 aprint_error_dev(sc->sc_dev,
7183 "unhandled firmware response 0x%x 0x%x/0x%x "
7184 "rx ring %d[%d]\n",
7185 code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7186 break;
7187 }
7188
7189 /*
7190 * uCode sets bit 0x80 when it originates the notification,
7191 * i.e. when the notification is not a direct response to a
7192 * command sent by the driver.
7193 * For example, uCode issues IWM_REPLY_RX when it sends a
7194 * received frame to the driver.
7195 */
7196 if (!(orig_qid & (1 << 7))) {
7197 iwm_cmd_done(sc, qid, idx);
7198 }
7199
7200 ADVANCE_RXQ(sc);
7201 }
7202
7203 /*
7204 * Seems like the hardware gets upset unless we align the write by 8??
7205 */
7206 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7207 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7208 }
7209
7210 static int
7211 iwm_intr(void *arg)
7212 {
7213 struct iwm_softc *sc = arg;
7214 int r1, r2;
7215
7216 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7217
7218 if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7219 uint32_t *ict = sc->ict_dma.vaddr;
7220 int tmp;
7221
7222 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7223 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7224 tmp = htole32(ict[sc->ict_cur]);
7225 if (!tmp)
7226 goto out_ena;
7227
7228 /*
7229 * ok, there was something. keep plowing until we have all.
7230 */
7231 r1 = r2 = 0;
7232 while (tmp) {
7233 r1 |= tmp;
7234 ict[sc->ict_cur] = 0; /* Acknowledge. */
7235 sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7236 tmp = htole32(ict[sc->ict_cur]);
7237 }
7238
7239 bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7240 0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7241
7242 /* this is where the fun begins. don't ask */
7243 if (r1 == 0xffffffff)
7244 r1 = 0;
7245
7246 /* i am not expected to understand this */
7247 if (r1 & 0xc0000)
7248 r1 |= 0x8000;
7249 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7250 } else {
7251 r1 = IWM_READ(sc, IWM_CSR_INT);
7252 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7253 goto out;
7254 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7255 }
7256 if (r1 == 0 && r2 == 0) {
7257 goto out_ena;
7258 }
7259
7260 /* Acknowledge interrupts. */
7261 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7262 if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7263 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7264
7265 atomic_or_32(&sc->sc_soft_flags, r1);
7266 softint_schedule(sc->sc_soft_ih);
7267 return 1;
7268
7269 out_ena:
7270 iwm_restore_interrupts(sc);
7271 out:
7272 return 0;
7273 }
7274
7275 static void
7276 iwm_softintr(void *arg)
7277 {
7278 struct iwm_softc *sc = arg;
7279 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7280 uint32_t r1;
7281 int isperiodic = 0;
7282
7283 r1 = atomic_swap_32(&sc->sc_soft_flags, 0);
7284
7285 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7286 #ifdef IWM_DEBUG
7287 int i;
7288
7289 iwm_nic_error(sc);
7290
7291 /* Dump driver status (TX and RX rings) while we're here. */
7292 DPRINTF(("driver status:\n"));
7293 for (i = 0; i < IWM_MAX_QUEUES; i++) {
7294 struct iwm_tx_ring *ring = &sc->txq[i];
7295 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7296 "queued=%-3d\n",
7297 i, ring->qid, ring->cur, ring->queued));
7298 }
7299 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7300 DPRINTF((" 802.11 state %s\n",
7301 ieee80211_state_name[sc->sc_ic.ic_state]));
7302 #endif
7303
7304 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7305 fatal:
7306 ifp->if_flags &= ~IFF_UP;
7307 iwm_stop(ifp, 1);
7308 /* Don't restore interrupt mask */
7309 return;
7310
7311 }
7312
7313 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7314 aprint_error_dev(sc->sc_dev,
7315 "hardware error, stopping device\n");
7316 goto fatal;
7317 }
7318
7319 /* firmware chunk loaded */
7320 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7321 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7322 sc->sc_fw_chunk_done = 1;
7323 wakeup(&sc->sc_fw);
7324 }
7325
7326 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7327 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
7328 ifp->if_flags &= ~IFF_UP;
7329 iwm_stop(ifp, 1);
7330 }
7331 }
7332
7333 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7334 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7335 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7336 IWM_WRITE_1(sc,
7337 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7338 isperiodic = 1;
7339 }
7340
7341 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7342 isperiodic) {
7343 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7344
7345 iwm_notif_intr(sc);
7346
7347 /* enable periodic interrupt, see above */
7348 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7349 !isperiodic)
7350 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7351 IWM_CSR_INT_PERIODIC_ENA);
7352 }
7353
7354 iwm_restore_interrupts(sc);
7355 }
7356
7357 /*
7358 * Autoconf glue-sniffing
7359 */
7360
7361 static const pci_product_id_t iwm_devices[] = {
7362 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7363 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7364 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7365 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7366 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7367 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7368 PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7369 PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7370 PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7371 PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7372 PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7373 PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7374 };
7375
7376 static int
7377 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7378 {
7379 struct pci_attach_args *pa = aux;
7380
7381 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7382 return 0;
7383
7384 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7385 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7386 return 1;
7387
7388 return 0;
7389 }
7390
7391 static int
7392 iwm_preinit(struct iwm_softc *sc)
7393 {
7394 struct ieee80211com *ic = &sc->sc_ic;
7395 int err;
7396
7397 if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7398 return 0;
7399
7400 err = iwm_start_hw(sc);
7401 if (err) {
7402 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7403 return err;
7404 }
7405
7406 err = iwm_run_init_mvm_ucode(sc, 1);
7407 iwm_stop_device(sc);
7408 if (err)
7409 return err;
7410
7411 sc->sc_flags |= IWM_FLAG_ATTACHED;
7412
7413 aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7414 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7415 ether_sprintf(sc->sc_nvm.hw_addr));
7416
7417 #ifndef IEEE80211_NO_HT
7418 if (sc->sc_nvm.sku_cap_11n_enable)
7419 iwm_setup_ht_rates(sc);
7420 #endif
7421
7422 /* not all hardware can do 5GHz band */
7423 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7424 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7425
7426 ieee80211_ifattach(ic);
7427
7428 ic->ic_node_alloc = iwm_node_alloc;
7429
7430 /* Override 802.11 state transition machine. */
7431 sc->sc_newstate = ic->ic_newstate;
7432 ic->ic_newstate = iwm_newstate;
7433 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7434 ieee80211_announce(ic);
7435
7436 iwm_radiotap_attach(sc);
7437
7438 return 0;
7439 }
7440
7441 static void
7442 iwm_attach_hook(device_t dev)
7443 {
7444 struct iwm_softc *sc = device_private(dev);
7445
7446 iwm_preinit(sc);
7447 }
7448
7449 static void
7450 iwm_attach(device_t parent, device_t self, void *aux)
7451 {
7452 struct iwm_softc *sc = device_private(self);
7453 struct pci_attach_args *pa = aux;
7454 struct ieee80211com *ic = &sc->sc_ic;
7455 struct ifnet *ifp = &sc->sc_ec.ec_if;
7456 pcireg_t reg, memtype;
7457 char intrbuf[PCI_INTRSTR_LEN];
7458 const char *intrstr;
7459 int err;
7460 int txq_i;
7461 const struct sysctlnode *node;
7462
7463 sc->sc_dev = self;
7464 sc->sc_pct = pa->pa_pc;
7465 sc->sc_pcitag = pa->pa_tag;
7466 sc->sc_dmat = pa->pa_dmat;
7467 sc->sc_pciid = pa->pa_id;
7468
7469 pci_aprint_devinfo(pa, NULL);
7470
7471 if (workqueue_create(&sc->sc_nswq, "iwmns",
7472 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7473 panic("%s: could not create workqueue: newstate",
7474 device_xname(self));
7475 sc->sc_soft_ih = softint_establish(SOFTINT_NET, iwm_softintr, sc);
7476 if (sc->sc_soft_ih == NULL)
7477 panic("%s: could not establish softint", device_xname(self));
7478
7479 /*
7480 * Get the offset of the PCI Express Capability Structure in PCI
7481 * Configuration Space.
7482 */
7483 err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7484 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7485 if (err == 0) {
7486 aprint_error_dev(self,
7487 "PCIe capability structure not found!\n");
7488 return;
7489 }
7490
7491 /* Clear device-specific "PCI retry timeout" register (41h). */
7492 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7493 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7494
7495 /* Enable bus-mastering */
7496 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7497 reg |= PCI_COMMAND_MASTER_ENABLE;
7498 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7499
7500 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7501 err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7502 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7503 if (err) {
7504 aprint_error_dev(self, "can't map mem space\n");
7505 return;
7506 }
7507
7508 /* Install interrupt handler. */
7509 err = pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0);
7510 if (err) {
7511 aprint_error_dev(self, "can't allocate interrupt\n");
7512 return;
7513 }
7514 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7515 if (pci_intr_type(sc->sc_pct, sc->sc_pihp[0]) == PCI_INTR_TYPE_INTX)
7516 CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7517 else
7518 SET(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7519 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7520 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
7521 sizeof(intrbuf));
7522 sc->sc_ih = pci_intr_establish_xname(sc->sc_pct, sc->sc_pihp[0],
7523 IPL_NET, iwm_intr, sc, device_xname(self));
7524 if (sc->sc_ih == NULL) {
7525 aprint_error_dev(self, "can't establish interrupt");
7526 if (intrstr != NULL)
7527 aprint_error(" at %s", intrstr);
7528 aprint_error("\n");
7529 return;
7530 }
7531 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7532
7533 sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7534
7535 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7536 switch (PCI_PRODUCT(sc->sc_pciid)) {
7537 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7538 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7539 sc->sc_fwname = "iwlwifi-3160-16.ucode";
7540 sc->host_interrupt_operation_mode = 1;
7541 sc->apmg_wake_up_wa = 1;
7542 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7543 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7544 break;
7545 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7546 case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7547 sc->sc_fwname = "iwlwifi-7265D-17.ucode";
7548 sc->host_interrupt_operation_mode = 0;
7549 sc->apmg_wake_up_wa = 1;
7550 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7551 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7552 break;
7553 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7554 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7555 sc->sc_fwname = "iwlwifi-7260-16.ucode";
7556 sc->host_interrupt_operation_mode = 1;
7557 sc->apmg_wake_up_wa = 1;
7558 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7559 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7560 break;
7561 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7562 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7563 sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7564 IWM_CSR_HW_REV_TYPE_7265D ?
7565 "iwlwifi-7265D-17.ucode": "iwlwifi-7265-16.ucode";
7566 sc->host_interrupt_operation_mode = 0;
7567 sc->apmg_wake_up_wa = 1;
7568 sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7569 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7570 break;
7571 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7572 case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7573 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7574 case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7575 sc->sc_fwname = "iwlwifi-8000C-16.ucode";
7576 sc->host_interrupt_operation_mode = 0;
7577 sc->apmg_wake_up_wa = 0;
7578 sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7579 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7580 break;
7581 default:
7582 aprint_error_dev(self, "unknown product %#x",
7583 PCI_PRODUCT(sc->sc_pciid));
7584 return;
7585 }
7586 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
7587
7588 /*
7589 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
7590 * changed, and now the revision step also includes bit 0-1 (no more
7591 * "dash" value). To keep hw_rev backwards compatible - we'll store it
7592 * in the old format.
7593 */
7594
7595 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
7596 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
7597 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
7598
7599 if (iwm_prepare_card_hw(sc) != 0) {
7600 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7601 return;
7602 }
7603
7604 if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
7605 uint32_t hw_step;
7606
7607 /*
7608 * In order to recognize C step the driver should read the
7609 * chip version id located at the AUX bus MISC address.
7610 */
7611 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
7612 IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
7613 DELAY(2);
7614
7615 err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
7616 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7617 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
7618 25000);
7619 if (!err) {
7620 aprint_error_dev(sc->sc_dev,
7621 "failed to wake up the nic\n");
7622 return;
7623 }
7624
7625 if (iwm_nic_lock(sc)) {
7626 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
7627 hw_step |= IWM_ENABLE_WFPM;
7628 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
7629 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
7630 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
7631 if (hw_step == 0x3)
7632 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
7633 (IWM_SILICON_C_STEP << 2);
7634 iwm_nic_unlock(sc);
7635 } else {
7636 aprint_error_dev(sc->sc_dev,
7637 "failed to lock the nic\n");
7638 return;
7639 }
7640 }
7641
7642 /*
7643 * Allocate DMA memory for firmware transfers.
7644 * Must be aligned on a 16-byte boundary.
7645 */
7646 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
7647 16);
7648 if (err) {
7649 aprint_error_dev(sc->sc_dev,
7650 "could not allocate memory for firmware\n");
7651 return;
7652 }
7653
7654 /* Allocate "Keep Warm" page, used internally by the card. */
7655 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
7656 if (err) {
7657 aprint_error_dev(sc->sc_dev,
7658 "could not allocate keep warm page\n");
7659 goto fail1;
7660 }
7661
7662 /* Allocate interrupt cause table (ICT).*/
7663 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
7664 1 << IWM_ICT_PADDR_SHIFT);
7665 if (err) {
7666 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
7667 goto fail2;
7668 }
7669
7670 /* TX scheduler rings must be aligned on a 1KB boundary. */
7671 err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
7672 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
7673 if (err) {
7674 aprint_error_dev(sc->sc_dev,
7675 "could not allocate TX scheduler rings\n");
7676 goto fail3;
7677 }
7678
7679 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
7680 err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
7681 if (err) {
7682 aprint_error_dev(sc->sc_dev,
7683 "could not allocate TX ring %d\n", txq_i);
7684 goto fail4;
7685 }
7686 }
7687
7688 err = iwm_alloc_rx_ring(sc, &sc->rxq);
7689 if (err) {
7690 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
7691 goto fail4;
7692 }
7693
7694 /* Clear pending interrupts. */
7695 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
7696
7697 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7698 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
7699 SYSCTL_DESCR("iwm per-controller controls"),
7700 NULL, 0, NULL, 0,
7701 CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
7702 CTL_EOL)) != 0) {
7703 aprint_normal_dev(sc->sc_dev,
7704 "couldn't create iwm per-controller sysctl node\n");
7705 }
7706 if (err == 0) {
7707 int iwm_nodenum = node->sysctl_num;
7708
7709 /* Reload firmware sysctl node */
7710 if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
7711 CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
7712 SYSCTL_DESCR("Reload firmware"),
7713 iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
7714 CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
7715 CTL_EOL)) != 0) {
7716 aprint_normal_dev(sc->sc_dev,
7717 "couldn't create load_fw sysctl node\n");
7718 }
7719 }
7720
7721 /*
7722 * Attach interface
7723 */
7724 ic->ic_ifp = ifp;
7725 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
7726 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
7727 ic->ic_state = IEEE80211_S_INIT;
7728
7729 /* Set device capabilities. */
7730 ic->ic_caps =
7731 IEEE80211_C_WEP | /* WEP */
7732 IEEE80211_C_WPA | /* 802.11i */
7733 #ifdef notyet
7734 IEEE80211_C_SCANALL | /* device scans all channels at once */
7735 IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
7736 #endif
7737 IEEE80211_C_SHSLOT | /* short slot time supported */
7738 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
7739
7740 #ifndef IEEE80211_NO_HT
7741 ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
7742 ic->ic_htxcaps = 0;
7743 ic->ic_txbfcaps = 0;
7744 ic->ic_aselcaps = 0;
7745 ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
7746 #endif
7747
7748 /* all hardware can do 2.4GHz band */
7749 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
7750 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
7751
7752 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
7753 sc->sc_phyctxt[i].id = i;
7754 }
7755
7756 sc->sc_amrr.amrr_min_success_threshold = 1;
7757 sc->sc_amrr.amrr_max_success_threshold = 15;
7758
7759 /* IBSS channel undefined for now. */
7760 ic->ic_ibss_chan = &ic->ic_channels[1];
7761
7762 #if 0
7763 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
7764 #endif
7765
7766 ifp->if_softc = sc;
7767 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
7768 ifp->if_init = iwm_init;
7769 ifp->if_stop = iwm_stop;
7770 ifp->if_ioctl = iwm_ioctl;
7771 ifp->if_start = iwm_start;
7772 ifp->if_watchdog = iwm_watchdog;
7773 IFQ_SET_READY(&ifp->if_snd);
7774 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
7775
7776 if_initialize(ifp);
7777 #if 0
7778 ieee80211_ifattach(ic);
7779 #else
7780 ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
7781 #endif
7782 /* Use common softint-based if_input */
7783 ifp->if_percpuq = if_percpuq_create(ifp);
7784 if_register(ifp);
7785
7786 callout_init(&sc->sc_calib_to, 0);
7787 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
7788 callout_init(&sc->sc_led_blink_to, 0);
7789 callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
7790 #ifndef IEEE80211_NO_HT
7791 if (workqueue_create(&sc->sc_setratewq, "iwmsr",
7792 iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
7793 panic("%s: could not create workqueue: setrates",
7794 device_xname(self));
7795 if (workqueue_create(&sc->sc_bawq, "iwmba",
7796 iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
7797 panic("%s: could not create workqueue: blockack",
7798 device_xname(self));
7799 if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
7800 iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
7801 panic("%s: could not create workqueue: htprot",
7802 device_xname(self));
7803 #endif
7804
7805 if (pmf_device_register(self, NULL, NULL))
7806 pmf_class_network_register(self, ifp);
7807 else
7808 aprint_error_dev(self, "couldn't establish power handler\n");
7809
7810 /*
7811 * We can't do normal attach before the file system is mounted
7812 * because we cannot read the MAC address without loading the
7813 * firmware from disk. So we postpone until mountroot is done.
7814 * Notably, this will require a full driver unload/load cycle
7815 * (or reboot) in case the firmware is not present when the
7816 * hook runs.
7817 */
7818 config_mountroot(self, iwm_attach_hook);
7819
7820 return;
7821
7822 fail4: while (--txq_i >= 0)
7823 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
7824 iwm_free_rx_ring(sc, &sc->rxq);
7825 iwm_dma_contig_free(&sc->sched_dma);
7826 fail3: if (sc->ict_dma.vaddr != NULL)
7827 iwm_dma_contig_free(&sc->ict_dma);
7828 fail2: iwm_dma_contig_free(&sc->kw_dma);
7829 fail1: iwm_dma_contig_free(&sc->fw_dma);
7830 }
7831
7832 void
7833 iwm_radiotap_attach(struct iwm_softc *sc)
7834 {
7835 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7836
7837 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
7838 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
7839 &sc->sc_drvbpf);
7840
7841 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
7842 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
7843 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
7844
7845 sc->sc_txtap_len = sizeof sc->sc_txtapu;
7846 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
7847 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
7848 }
7849
7850 #if 0
7851 static void
7852 iwm_init_task(void *arg)
7853 {
7854 struct iwm_softc *sc = arg;
7855 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7856 int s;
7857
7858 rw_enter_write(&sc->ioctl_rwl);
7859 s = splnet();
7860
7861 iwm_stop(ifp, 0);
7862 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
7863 iwm_init(ifp);
7864
7865 splx(s);
7866 rw_exit(&sc->ioctl_rwl);
7867 }
7868
7869 static void
7870 iwm_wakeup(struct iwm_softc *sc)
7871 {
7872 pcireg_t reg;
7873
7874 /* Clear device-specific "PCI retry timeout" register (41h). */
7875 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7876 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7877
7878 iwm_init_task(sc);
7879 }
7880
7881 static int
7882 iwm_activate(device_t self, enum devact act)
7883 {
7884 struct iwm_softc *sc = device_private(self);
7885 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7886
7887 switch (act) {
7888 case DVACT_DEACTIVATE:
7889 if (ifp->if_flags & IFF_RUNNING)
7890 iwm_stop(ifp, 0);
7891 return 0;
7892 default:
7893 return EOPNOTSUPP;
7894 }
7895 }
7896 #endif
7897
7898 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
7899 NULL, NULL);
7900
7901 static int
7902 iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
7903 {
7904 struct sysctlnode node;
7905 struct iwm_softc *sc;
7906 int err, t;
7907
7908 node = *rnode;
7909 sc = node.sysctl_data;
7910 t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
7911 node.sysctl_data = &t;
7912 err = sysctl_lookup(SYSCTLFN_CALL(&node));
7913 if (err || newp == NULL)
7914 return err;
7915
7916 if (t == 0)
7917 CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
7918 return 0;
7919 }
7920
7921 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
7922 {
7923 const struct sysctlnode *rnode;
7924 #ifdef IWM_DEBUG
7925 const struct sysctlnode *cnode;
7926 #endif /* IWM_DEBUG */
7927 int rc;
7928
7929 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
7930 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
7931 SYSCTL_DESCR("iwm global controls"),
7932 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
7933 goto err;
7934
7935 iwm_sysctl_root_num = rnode->sysctl_num;
7936
7937 #ifdef IWM_DEBUG
7938 /* control debugging printfs */
7939 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
7940 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
7941 "debug", SYSCTL_DESCR("Enable debugging output"),
7942 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
7943 goto err;
7944 #endif /* IWM_DEBUG */
7945
7946 return;
7947
7948 err:
7949 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
7950 }
7951