if_iwm.c revision 1.33 1 /* $NetBSD: if_iwm.c,v 1.33 2015/05/15 08:44:15 knakahara Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp */
3
4 /*
5 * Copyright (c) 2014 genua mbh <info (at) genua.de>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /*-
22 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
23 * which were used as the reference documentation for this implementation.
24 *
25 * Driver version we are currently based off of is
26 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 *
28 ***********************************************************************
29 *
30 * This file is provided under a dual BSD/GPLv2 license. When using or
31 * redistributing this file, you may do so under either license.
32 *
33 * GPL LICENSE SUMMARY
34 *
35 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * All rights reserved.
63 *
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
66 * are met:
67 *
68 * * Redistributions of source code must retain the above copyright
69 * notice, this list of conditions and the following disclaimer.
70 * * Redistributions in binary form must reproduce the above copyright
71 * notice, this list of conditions and the following disclaimer in
72 * the documentation and/or other materials provided with the
73 * distribution.
74 * * Neither the name Intel Corporation nor the names of its
75 * contributors may be used to endorse or promote products derived
76 * from this software without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
79 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
80 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
81 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
82 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
83 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
84 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
85 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
86 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
87 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
88 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89 */
90
91 /*-
92 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
93 *
94 * Permission to use, copy, modify, and distribute this software for any
95 * purpose with or without fee is hereby granted, provided that the above
96 * copyright notice and this permission notice appear in all copies.
97 *
98 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
99 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
100 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
101 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
102 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
103 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
104 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 */
106
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.33 2015/05/15 08:44:15 knakahara Exp $");
109
110 #include <sys/param.h>
111 #include <sys/conf.h>
112 #include <sys/kernel.h>
113 #include <sys/kmem.h>
114 #include <sys/mbuf.h>
115 #include <sys/mutex.h>
116 #include <sys/proc.h>
117 #include <sys/socket.h>
118 #include <sys/sockio.h>
119 #include <sys/sysctl.h>
120 #include <sys/systm.h>
121
122 #include <sys/cpu.h>
123 #include <sys/bus.h>
124 #include <sys/workqueue.h>
125 #include <machine/endian.h>
126 #include <machine/intr.h>
127
128 #include <dev/pci/pcireg.h>
129 #include <dev/pci/pcivar.h>
130 #include <dev/pci/pcidevs.h>
131 #include <dev/firmload.h>
132
133 #include <net/bpf.h>
134 #include <net/if.h>
135 #include <net/if_arp.h>
136 #include <net/if_dl.h>
137 #include <net/if_media.h>
138 #include <net/if_types.h>
139 #include <net/if_ether.h>
140
141 #include <netinet/in.h>
142 #include <netinet/in_systm.h>
143 #include <netinet/ip.h>
144
145 #include <net80211/ieee80211_var.h>
146 #include <net80211/ieee80211_amrr.h>
147 #include <net80211/ieee80211_radiotap.h>
148
149 #define DEVNAME(_s) device_xname((_s)->sc_dev)
150 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
151
152 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
153 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
154
155 #ifdef IWM_DEBUG
156 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
157 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
158 int iwm_debug = 0;
159 #else
160 #define DPRINTF(x) do { ; } while (0)
161 #define DPRINTFN(n, x) do { ; } while (0)
162 #endif
163
164 #include <dev/pci/if_iwmreg.h>
165 #include <dev/pci/if_iwmvar.h>
166
167 static const uint8_t iwm_nvm_channels[] = {
168 /* 2.4 GHz */
169 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
170 /* 5 GHz */
171 36, 40, 44 , 48, 52, 56, 60, 64,
172 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
173 149, 153, 157, 161, 165
174 };
175 #define IWM_NUM_2GHZ_CHANNELS 14
176
177 static const struct iwm_rate {
178 uint8_t rate;
179 uint8_t plcp;
180 } iwm_rates[] = {
181 { 2, IWM_RATE_1M_PLCP },
182 { 4, IWM_RATE_2M_PLCP },
183 { 11, IWM_RATE_5M_PLCP },
184 { 22, IWM_RATE_11M_PLCP },
185 { 12, IWM_RATE_6M_PLCP },
186 { 18, IWM_RATE_9M_PLCP },
187 { 24, IWM_RATE_12M_PLCP },
188 { 36, IWM_RATE_18M_PLCP },
189 { 48, IWM_RATE_24M_PLCP },
190 { 72, IWM_RATE_36M_PLCP },
191 { 96, IWM_RATE_48M_PLCP },
192 { 108, IWM_RATE_54M_PLCP },
193 };
194 #define IWM_RIDX_CCK 0
195 #define IWM_RIDX_OFDM 4
196 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
197 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
198 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
199
200 struct iwm_newstate_state {
201 struct work ns_wk;
202 enum ieee80211_state ns_nstate;
203 int ns_arg;
204 int ns_generation;
205 };
206
207 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
208 static int iwm_firmware_store_section(struct iwm_softc *,
209 enum iwm_ucode_type, uint8_t *, size_t);
210 static int iwm_set_default_calib(struct iwm_softc *, const void *);
211 static int iwm_read_firmware(struct iwm_softc *);
212 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
213 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
214 #ifdef IWM_DEBUG
215 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
216 #endif
217 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
218 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
219 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
220 static int iwm_nic_lock(struct iwm_softc *);
221 static void iwm_nic_unlock(struct iwm_softc *);
222 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
223 uint32_t);
224 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
225 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
226 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
227 bus_size_t, bus_size_t);
228 static void iwm_dma_contig_free(struct iwm_dma_info *);
229 static int iwm_alloc_fwmem(struct iwm_softc *);
230 static void iwm_free_fwmem(struct iwm_softc *);
231 static int iwm_alloc_sched(struct iwm_softc *);
232 static void iwm_free_sched(struct iwm_softc *);
233 static int iwm_alloc_kw(struct iwm_softc *);
234 static void iwm_free_kw(struct iwm_softc *);
235 static int iwm_alloc_ict(struct iwm_softc *);
236 static void iwm_free_ict(struct iwm_softc *);
237 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
238 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
239 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
240 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
241 int);
242 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
243 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
244 static void iwm_enable_rfkill_int(struct iwm_softc *);
245 static int iwm_check_rfkill(struct iwm_softc *);
246 static void iwm_enable_interrupts(struct iwm_softc *);
247 static void iwm_restore_interrupts(struct iwm_softc *);
248 static void iwm_disable_interrupts(struct iwm_softc *);
249 static void iwm_ict_reset(struct iwm_softc *);
250 static int iwm_set_hw_ready(struct iwm_softc *);
251 static int iwm_prepare_card_hw(struct iwm_softc *);
252 static void iwm_apm_config(struct iwm_softc *);
253 static int iwm_apm_init(struct iwm_softc *);
254 static void iwm_apm_stop(struct iwm_softc *);
255 static int iwm_allow_mcast(struct iwm_softc *);
256 static int iwm_start_hw(struct iwm_softc *);
257 static void iwm_stop_device(struct iwm_softc *);
258 static void iwm_set_pwr(struct iwm_softc *);
259 static void iwm_mvm_nic_config(struct iwm_softc *);
260 static int iwm_nic_rx_init(struct iwm_softc *);
261 static int iwm_nic_tx_init(struct iwm_softc *);
262 static int iwm_nic_init(struct iwm_softc *);
263 static void iwm_enable_txq(struct iwm_softc *, int, int);
264 static int iwm_post_alive(struct iwm_softc *);
265 static int iwm_is_valid_channel(uint16_t);
266 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
267 static uint16_t iwm_channel_id_to_papd(uint16_t);
268 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
269 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
270 uint8_t **, uint16_t *, uint16_t);
271 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
272 void *);
273 static int iwm_send_phy_db_data(struct iwm_softc *);
274 static int iwm_send_phy_db_data(struct iwm_softc *);
275 static void iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
276 struct iwm_time_event_cmd_v1 *);
277 static int iwm_mvm_send_time_event_cmd(struct iwm_softc *,
278 const struct iwm_time_event_cmd_v2 *);
279 static int iwm_mvm_time_event_send_add(struct iwm_softc *,
280 struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
281 static void iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
282 uint32_t, uint32_t, uint32_t);
283 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
284 uint16_t, uint8_t *, uint16_t *);
285 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
286 uint16_t *);
287 static void iwm_init_channel_map(struct iwm_softc *,
288 const uint16_t * const);
289 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
290 const uint16_t *, const uint16_t *, uint8_t, uint8_t);
291 static int iwm_nvm_init(struct iwm_softc *);
292 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
293 const uint8_t *, uint32_t);
294 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
295 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
296 static int iwm_fw_alive(struct iwm_softc *, uint32_t);
297 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
298 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
299 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
300 enum iwm_ucode_type);
301 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
302 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
303 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
304 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
305 struct iwm_rx_phy_info *);
306 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
307 struct iwm_rx_packet *, struct iwm_rx_data *);
308 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
309 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
310 struct iwm_rx_data *);
311 static void iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
312 struct iwm_rx_packet *, struct iwm_node *);
313 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
314 struct iwm_rx_data *);
315 static int iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
316 uint32_t);
317 static int iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
318 int);
319 static int iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
320 static void iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
321 struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
322 uint32_t, uint32_t);
323 static void iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
324 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
325 uint8_t, uint8_t);
326 static int iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
327 struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
328 uint32_t);
329 static int iwm_mvm_phy_ctxt_add(struct iwm_softc *,
330 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
331 uint8_t, uint8_t);
332 static int iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
333 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
334 uint8_t, uint8_t);
335 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
336 static int iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
337 uint16_t, const void *);
338 static int iwm_mvm_send_cmd_status(struct iwm_softc *,
339 struct iwm_host_cmd *, uint32_t *);
340 static int iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
341 uint16_t, const void *, uint32_t *);
342 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
343 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
344 #if 0
345 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
346 uint16_t);
347 #endif
348 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
349 struct iwm_node *, struct ieee80211_frame *,
350 struct iwm_tx_cmd *);
351 static int iwm_tx(struct iwm_softc *, struct mbuf *,
352 struct ieee80211_node *, int);
353 static int iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
354 struct iwm_beacon_filter_cmd *);
355 static void iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
356 struct iwm_node *, struct iwm_beacon_filter_cmd *);
357 static int iwm_mvm_update_beacon_abort(struct iwm_softc *,
358 struct iwm_node *, int);
359 static void iwm_mvm_power_log(struct iwm_softc *,
360 struct iwm_mac_power_cmd *);
361 static void iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
362 struct iwm_mac_power_cmd *);
363 static int iwm_mvm_power_mac_update_mode(struct iwm_softc *,
364 struct iwm_node *);
365 static int iwm_mvm_power_update_device(struct iwm_softc *);
366 static int iwm_mvm_enable_beacon_filter(struct iwm_softc *,
367 struct iwm_node *);
368 static int iwm_mvm_disable_beacon_filter(struct iwm_softc *,
369 struct iwm_node *);
370 static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
371 struct iwm_mvm_add_sta_cmd_v5 *);
372 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
373 struct iwm_mvm_add_sta_cmd_v6 *, int *);
374 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
375 int);
376 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
377 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
378 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
379 struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
380 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
381 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
382 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
383 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
384 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
385 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
386 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
387 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
388 static int iwm_mvm_scan_fill_channels(struct iwm_softc *,
389 struct iwm_scan_cmd *, int, int, int);
390 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
391 struct ieee80211_frame *, const uint8_t *, int,
392 const uint8_t *, int, const uint8_t *, int, int);
393 static int iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
394 int);
395 static void iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
396 int *);
397 static void iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
398 struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
399 static int iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
400 struct iwm_mac_ctx_cmd *);
401 static void iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
402 struct iwm_node *, struct iwm_mac_data_sta *, int);
403 static int iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
404 struct iwm_node *, uint32_t);
405 static int iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
406 uint32_t);
407 static int iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
408 static int iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
409 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
410 static int iwm_auth(struct iwm_softc *);
411 static int iwm_assoc(struct iwm_softc *);
412 static int iwm_release(struct iwm_softc *, struct iwm_node *);
413 static void iwm_calib_timeout(void *);
414 static void iwm_setrates(struct iwm_node *);
415 static int iwm_media_change(struct ifnet *);
416 static void iwm_newstate_cb(struct work *, void *);
417 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
418 static void iwm_endscan_cb(struct work *, void *);
419 static int iwm_init_hw(struct iwm_softc *);
420 static int iwm_init(struct ifnet *);
421 static void iwm_start(struct ifnet *);
422 static void iwm_stop(struct ifnet *, int);
423 static void iwm_watchdog(struct ifnet *);
424 static int iwm_ioctl(struct ifnet *, u_long, void *);
425 #ifdef IWM_DEBUG
426 static const char *iwm_desc_lookup(uint32_t);
427 static void iwm_nic_error(struct iwm_softc *);
428 #endif
429 static void iwm_notif_intr(struct iwm_softc *);
430 static int iwm_intr(void *);
431 static int iwm_preinit(struct iwm_softc *);
432 static void iwm_attach_hook(device_t);
433 static void iwm_attach(device_t, device_t, void *);
434 #if 0
435 static void iwm_init_task(void *);
436 static int iwm_activate(device_t, enum devact);
437 static void iwm_wakeup(struct iwm_softc *);
438 #endif
439 static void iwm_radiotap_attach(struct iwm_softc *);
440
441 static int
442 iwm_firmload(struct iwm_softc *sc)
443 {
444 struct iwm_fw_info *fw = &sc->sc_fw;
445 firmware_handle_t fwh;
446 int error;
447
448 /* Open firmware image. */
449 if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
450 aprint_error_dev(sc->sc_dev,
451 "could not get firmware handle %s\n", sc->sc_fwname);
452 return error;
453 }
454
455 fw->fw_rawsize = firmware_get_size(fwh);
456 /*
457 * Well, this is how the Linux driver checks it ....
458 */
459 if (fw->fw_rawsize < sizeof(uint32_t)) {
460 aprint_error_dev(sc->sc_dev,
461 "firmware too short: %zd bytes\n", fw->fw_rawsize);
462 error = EINVAL;
463 goto out;
464 }
465
466 /* some sanity */
467 if (fw->fw_rawsize > IWM_FWMAXSIZE) {
468 aprint_error_dev(sc->sc_dev,
469 "firmware size is ridiculous: %zd bytes\n",
470 fw->fw_rawsize);
471 error = EINVAL;
472 goto out;
473 }
474
475 /* Read the firmware. */
476 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
477 if (fw->fw_rawdata == NULL) {
478 aprint_error_dev(sc->sc_dev,
479 "not enough memory to stock firmware %s\n", sc->sc_fwname);
480 error = ENOMEM;
481 goto out;
482 }
483 error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
484 if (error) {
485 aprint_error_dev(sc->sc_dev,
486 "could not read firmware %s\n", sc->sc_fwname);
487 goto out;
488 }
489
490 out:
491 /* caller will release memory, if necessary */
492
493 firmware_close(fwh);
494 return error;
495 }
496
497 /*
498 * just maintaining status quo.
499 */
500 static void
501 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
502 {
503 struct ieee80211_frame *wh;
504 uint8_t subtype;
505 uint8_t *frm, *efrm;
506
507 wh = mtod(m, struct ieee80211_frame *);
508
509 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
510 return;
511
512 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
513
514 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
515 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
516 return;
517
518 frm = (uint8_t *)(wh + 1);
519 efrm = mtod(m, uint8_t *) + m->m_len;
520
521 frm += 12; /* skip tstamp, bintval and capinfo fields */
522 while (frm < efrm) {
523 if (*frm == IEEE80211_ELEMID_DSPARMS) {
524 #if IEEE80211_CHAN_MAX < 255
525 if (frm[2] <= IEEE80211_CHAN_MAX)
526 #endif
527 ic->ic_curchan = &ic->ic_channels[frm[2]];
528 }
529 frm += frm[1] + 2;
530 }
531 }
532
533 /*
534 * Firmware parser.
535 */
536
537 static int
538 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
539 {
540 struct iwm_fw_cscheme_list *l = (void *)data;
541
542 if (dlen < sizeof(*l) ||
543 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
544 return EINVAL;
545
546 /* we don't actually store anything for now, always use s/w crypto */
547
548 return 0;
549 }
550
551 static int
552 iwm_firmware_store_section(struct iwm_softc *sc,
553 enum iwm_ucode_type type, uint8_t *data, size_t dlen)
554 {
555 struct iwm_fw_sects *fws;
556 struct iwm_fw_onesect *fwone;
557
558 if (type >= IWM_UCODE_TYPE_MAX)
559 return EINVAL;
560 if (dlen < sizeof(uint32_t))
561 return EINVAL;
562
563 fws = &sc->sc_fw.fw_sects[type];
564 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
565 return EINVAL;
566
567 fwone = &fws->fw_sect[fws->fw_count];
568
569 /* first 32bit are device load offset */
570 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
571
572 /* rest is data */
573 fwone->fws_data = data + sizeof(uint32_t);
574 fwone->fws_len = dlen - sizeof(uint32_t);
575
576 /* for freeing the buffer during driver unload */
577 fwone->fws_alloc = data;
578 fwone->fws_allocsize = dlen;
579
580 fws->fw_count++;
581 fws->fw_totlen += fwone->fws_len;
582
583 return 0;
584 }
585
586 /* iwlwifi: iwl-drv.c */
587 struct iwm_tlv_calib_data {
588 uint32_t ucode_type;
589 struct iwm_tlv_calib_ctrl calib;
590 } __packed;
591
592 static int
593 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
594 {
595 const struct iwm_tlv_calib_data *def_calib = data;
596 uint32_t ucode_type = le32toh(def_calib->ucode_type);
597
598 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
599 DPRINTF(("%s: Wrong ucode_type %u for default "
600 "calibration.\n", DEVNAME(sc), ucode_type));
601 return EINVAL;
602 }
603
604 sc->sc_default_calib[ucode_type].flow_trigger =
605 def_calib->calib.flow_trigger;
606 sc->sc_default_calib[ucode_type].event_trigger =
607 def_calib->calib.event_trigger;
608
609 return 0;
610 }
611
612 static int
613 iwm_read_firmware(struct iwm_softc *sc)
614 {
615 struct iwm_fw_info *fw = &sc->sc_fw;
616 struct iwm_tlv_ucode_header *uhdr;
617 struct iwm_ucode_tlv tlv;
618 enum iwm_ucode_tlv_type tlv_type;
619 uint8_t *data;
620 int error, status;
621 size_t len;
622
623 if (fw->fw_status == IWM_FW_STATUS_NONE) {
624 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
625 } else {
626 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
627 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
628 }
629 status = fw->fw_status;
630
631 if (status == IWM_FW_STATUS_DONE)
632 return 0;
633
634 /*
635 * Load firmware into driver memory.
636 * fw_rawdata and fw_rawsize will be set.
637 */
638 error = iwm_firmload(sc);
639 if (error != 0) {
640 aprint_error_dev(sc->sc_dev,
641 "could not read firmware %s (error %d)\n",
642 sc->sc_fwname, error);
643 goto out;
644 }
645
646 /*
647 * Parse firmware contents
648 */
649
650 uhdr = (void *)fw->fw_rawdata;
651 if (*(uint32_t *)fw->fw_rawdata != 0
652 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
653 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
654 sc->sc_fwname);
655 error = EINVAL;
656 goto out;
657 }
658
659 sc->sc_fwver = le32toh(uhdr->ver);
660 data = uhdr->data;
661 len = fw->fw_rawsize - sizeof(*uhdr);
662
663 while (len >= sizeof(tlv)) {
664 size_t tlv_len;
665 void *tlv_data;
666
667 memcpy(&tlv, data, sizeof(tlv));
668 tlv_len = le32toh(tlv.length);
669 tlv_type = le32toh(tlv.type);
670
671 len -= sizeof(tlv);
672 data += sizeof(tlv);
673 tlv_data = data;
674
675 if (len < tlv_len) {
676 aprint_error_dev(sc->sc_dev,
677 "firmware too short: %zu bytes\n", len);
678 error = EINVAL;
679 goto parse_out;
680 }
681
682 switch ((int)tlv_type) {
683 case IWM_UCODE_TLV_PROBE_MAX_LEN:
684 if (tlv_len < sizeof(uint32_t)) {
685 error = EINVAL;
686 goto parse_out;
687 }
688 sc->sc_capa_max_probe_len
689 = le32toh(*(uint32_t *)tlv_data);
690 /* limit it to something sensible */
691 if (sc->sc_capa_max_probe_len > (1<<16)) {
692 DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
693 "ridiculous\n", DEVNAME(sc)));
694 error = EINVAL;
695 goto parse_out;
696 }
697 break;
698 case IWM_UCODE_TLV_PAN:
699 if (tlv_len) {
700 error = EINVAL;
701 goto parse_out;
702 }
703 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
704 break;
705 case IWM_UCODE_TLV_FLAGS:
706 if (tlv_len < sizeof(uint32_t)) {
707 error = EINVAL;
708 goto parse_out;
709 }
710 /*
711 * Apparently there can be many flags, but Linux driver
712 * parses only the first one, and so do we.
713 *
714 * XXX: why does this override IWM_UCODE_TLV_PAN?
715 * Intentional or a bug? Observations from
716 * current firmware file:
717 * 1) TLV_PAN is parsed first
718 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
719 * ==> this resets TLV_PAN to itself... hnnnk
720 */
721 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
722 break;
723 case IWM_UCODE_TLV_CSCHEME:
724 if ((error = iwm_store_cscheme(sc,
725 tlv_data, tlv_len)) != 0)
726 goto parse_out;
727 break;
728 case IWM_UCODE_TLV_NUM_OF_CPU:
729 if (tlv_len != sizeof(uint32_t)) {
730 error = EINVAL;
731 goto parse_out;
732 }
733 if (le32toh(*(uint32_t*)tlv_data) != 1) {
734 DPRINTF(("%s: driver supports "
735 "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
736 error = EINVAL;
737 goto parse_out;
738 }
739 break;
740 case IWM_UCODE_TLV_SEC_RT:
741 if ((error = iwm_firmware_store_section(sc,
742 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
743 goto parse_out;
744 break;
745 case IWM_UCODE_TLV_SEC_INIT:
746 if ((error = iwm_firmware_store_section(sc,
747 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
748 goto parse_out;
749 break;
750 case IWM_UCODE_TLV_SEC_WOWLAN:
751 if ((error = iwm_firmware_store_section(sc,
752 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
753 goto parse_out;
754 break;
755 case IWM_UCODE_TLV_DEF_CALIB:
756 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
757 error = EINVAL;
758 goto parse_out;
759 }
760 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
761 goto parse_out;
762 break;
763 case IWM_UCODE_TLV_PHY_SKU:
764 if (tlv_len != sizeof(uint32_t)) {
765 error = EINVAL;
766 goto parse_out;
767 }
768 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
769 break;
770
771 case IWM_UCODE_TLV_API_CHANGES_SET:
772 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
773 /* ignore, not used by current driver */
774 break;
775
776 default:
777 DPRINTF(("%s: unknown firmware section %d, abort\n",
778 DEVNAME(sc), tlv_type));
779 error = EINVAL;
780 goto parse_out;
781 }
782
783 len -= roundup(tlv_len, 4);
784 data += roundup(tlv_len, 4);
785 }
786
787 KASSERT(error == 0);
788
789 parse_out:
790 if (error) {
791 aprint_error_dev(sc->sc_dev,
792 "firmware parse error, section type %d\n", tlv_type);
793 }
794
795 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
796 aprint_error_dev(sc->sc_dev,
797 "device uses unsupported power ops\n");
798 error = ENOTSUP;
799 }
800
801 out:
802 if (error)
803 fw->fw_status = IWM_FW_STATUS_NONE;
804 else
805 fw->fw_status = IWM_FW_STATUS_DONE;
806 wakeup(&sc->sc_fw);
807
808 if (error && fw->fw_rawdata != NULL) {
809 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
810 fw->fw_rawdata = NULL;
811 }
812 return error;
813 }
814
815 /*
816 * basic device access
817 */
818
819 static uint32_t
820 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
821 {
822 IWM_WRITE(sc,
823 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
824 IWM_BARRIER_READ_WRITE(sc);
825 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
826 }
827
828 static void
829 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
830 {
831 IWM_WRITE(sc,
832 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
833 IWM_BARRIER_WRITE(sc);
834 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
835 }
836
837 #ifdef IWM_DEBUG
838 /* iwlwifi: pcie/trans.c */
839 static int
840 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
841 {
842 int offs, ret = 0;
843 uint32_t *vals = buf;
844
845 if (iwm_nic_lock(sc)) {
846 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
847 for (offs = 0; offs < dwords; offs++)
848 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
849 iwm_nic_unlock(sc);
850 } else {
851 ret = EBUSY;
852 }
853 return ret;
854 }
855 #endif
856
857 /* iwlwifi: pcie/trans.c */
858 static int
859 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
860 {
861 int offs;
862 const uint32_t *vals = buf;
863
864 if (iwm_nic_lock(sc)) {
865 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
866 /* WADDR auto-increments */
867 for (offs = 0; offs < dwords; offs++) {
868 uint32_t val = vals ? vals[offs] : 0;
869 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
870 }
871 iwm_nic_unlock(sc);
872 } else {
873 DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
874 return EBUSY;
875 }
876 return 0;
877 }
878
879 static int
880 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
881 {
882 return iwm_write_mem(sc, addr, &val, 1);
883 }
884
885 static int
886 iwm_poll_bit(struct iwm_softc *sc, int reg,
887 uint32_t bits, uint32_t mask, int timo)
888 {
889 for (;;) {
890 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
891 return 1;
892 }
893 if (timo < 10) {
894 return 0;
895 }
896 timo -= 10;
897 DELAY(10);
898 }
899 }
900
901 static int
902 iwm_nic_lock(struct iwm_softc *sc)
903 {
904 int rv = 0;
905
906 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
907 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
908
909 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
910 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
911 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
912 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
913 rv = 1;
914 } else {
915 /* jolt */
916 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
917 }
918
919 return rv;
920 }
921
922 static void
923 iwm_nic_unlock(struct iwm_softc *sc)
924 {
925 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
926 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
927 }
928
929 static void
930 iwm_set_bits_mask_prph(struct iwm_softc *sc,
931 uint32_t reg, uint32_t bits, uint32_t mask)
932 {
933 uint32_t val;
934
935 /* XXX: no error path? */
936 if (iwm_nic_lock(sc)) {
937 val = iwm_read_prph(sc, reg) & mask;
938 val |= bits;
939 iwm_write_prph(sc, reg, val);
940 iwm_nic_unlock(sc);
941 }
942 }
943
944 static void
945 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
946 {
947 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
948 }
949
950 static void
951 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
952 {
953 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
954 }
955
956 /*
957 * DMA resource routines
958 */
959
960 static int
961 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
962 bus_size_t size, bus_size_t alignment)
963 {
964 int nsegs, error;
965 void *va;
966
967 dma->tag = tag;
968 dma->size = size;
969
970 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
971 &dma->map);
972 if (error != 0)
973 goto fail;
974
975 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
976 BUS_DMA_NOWAIT);
977 if (error != 0)
978 goto fail;
979
980 error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
981 BUS_DMA_NOWAIT);
982 if (error != 0)
983 goto fail;
984 dma->vaddr = va;
985
986 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
987 BUS_DMA_NOWAIT);
988 if (error != 0)
989 goto fail;
990
991 memset(dma->vaddr, 0, size);
992 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
993 dma->paddr = dma->map->dm_segs[0].ds_addr;
994
995 return 0;
996
997 fail: iwm_dma_contig_free(dma);
998 return error;
999 }
1000
1001 static void
1002 iwm_dma_contig_free(struct iwm_dma_info *dma)
1003 {
1004 if (dma->map != NULL) {
1005 if (dma->vaddr != NULL) {
1006 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1007 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1008 bus_dmamap_unload(dma->tag, dma->map);
1009 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1010 bus_dmamem_free(dma->tag, &dma->seg, 1);
1011 dma->vaddr = NULL;
1012 }
1013 bus_dmamap_destroy(dma->tag, dma->map);
1014 dma->map = NULL;
1015 }
1016 }
1017
1018 /* fwmem is used to load firmware onto the card */
1019 static int
1020 iwm_alloc_fwmem(struct iwm_softc *sc)
1021 {
1022 /* Must be aligned on a 16-byte boundary. */
1023 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1024 sc->sc_fwdmasegsz, 16);
1025 }
1026
1027 static void
1028 iwm_free_fwmem(struct iwm_softc *sc)
1029 {
1030 iwm_dma_contig_free(&sc->fw_dma);
1031 }
1032
1033 /* tx scheduler rings. not used? */
1034 static int
1035 iwm_alloc_sched(struct iwm_softc *sc)
1036 {
1037 int rv;
1038
1039 /* TX scheduler rings must be aligned on a 1KB boundary. */
1040 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1041 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1042 return rv;
1043 }
1044
1045 static void
1046 iwm_free_sched(struct iwm_softc *sc)
1047 {
1048 iwm_dma_contig_free(&sc->sched_dma);
1049 }
1050
1051 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
1052 static int
1053 iwm_alloc_kw(struct iwm_softc *sc)
1054 {
1055 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1056 }
1057
1058 static void
1059 iwm_free_kw(struct iwm_softc *sc)
1060 {
1061 iwm_dma_contig_free(&sc->kw_dma);
1062 }
1063
1064 /* interrupt cause table */
1065 static int
1066 iwm_alloc_ict(struct iwm_softc *sc)
1067 {
1068 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1069 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1070 }
1071
1072 static void
1073 iwm_free_ict(struct iwm_softc *sc)
1074 {
1075 iwm_dma_contig_free(&sc->ict_dma);
1076 }
1077
1078 static int
1079 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1080 {
1081 bus_size_t size;
1082 int i, error;
1083
1084 ring->cur = 0;
1085
1086 /* Allocate RX descriptors (256-byte aligned). */
1087 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1088 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1089 if (error != 0) {
1090 aprint_error_dev(sc->sc_dev,
1091 "could not allocate RX ring DMA memory\n");
1092 goto fail;
1093 }
1094 ring->desc = ring->desc_dma.vaddr;
1095
1096 /* Allocate RX status area (16-byte aligned). */
1097 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1098 sizeof(*ring->stat), 16);
1099 if (error != 0) {
1100 aprint_error_dev(sc->sc_dev,
1101 "could not allocate RX status DMA memory\n");
1102 goto fail;
1103 }
1104 ring->stat = ring->stat_dma.vaddr;
1105
1106 /*
1107 * Allocate and map RX buffers.
1108 */
1109 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1110 struct iwm_rx_data *data = &ring->data[i];
1111
1112 memset(data, 0, sizeof(*data));
1113 error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1114 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1115 &data->map);
1116 if (error != 0) {
1117 aprint_error_dev(sc->sc_dev,
1118 "could not create RX buf DMA map\n");
1119 goto fail;
1120 }
1121
1122 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1123 goto fail;
1124 }
1125 }
1126 return 0;
1127
1128 fail: iwm_free_rx_ring(sc, ring);
1129 return error;
1130 }
1131
1132 static void
1133 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1134 {
1135 int ntries;
1136
1137 if (iwm_nic_lock(sc)) {
1138 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1139 for (ntries = 0; ntries < 1000; ntries++) {
1140 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1141 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1142 break;
1143 DELAY(10);
1144 }
1145 iwm_nic_unlock(sc);
1146 }
1147 ring->cur = 0;
1148 }
1149
1150 static void
1151 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1152 {
1153 int i;
1154
1155 iwm_dma_contig_free(&ring->desc_dma);
1156 iwm_dma_contig_free(&ring->stat_dma);
1157
1158 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1159 struct iwm_rx_data *data = &ring->data[i];
1160
1161 if (data->m != NULL) {
1162 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1163 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1164 bus_dmamap_unload(sc->sc_dmat, data->map);
1165 m_freem(data->m);
1166 }
1167 if (data->map != NULL)
1168 bus_dmamap_destroy(sc->sc_dmat, data->map);
1169 }
1170 }
1171
1172 static int
1173 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1174 {
1175 bus_addr_t paddr;
1176 bus_size_t size;
1177 int i, error;
1178
1179 ring->qid = qid;
1180 ring->queued = 0;
1181 ring->cur = 0;
1182
1183 /* Allocate TX descriptors (256-byte aligned). */
1184 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1185 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1186 if (error != 0) {
1187 aprint_error_dev(sc->sc_dev,
1188 "could not allocate TX ring DMA memory\n");
1189 goto fail;
1190 }
1191 ring->desc = ring->desc_dma.vaddr;
1192
1193 /*
1194 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1195 * to allocate commands space for other rings.
1196 */
1197 if (qid > IWM_MVM_CMD_QUEUE)
1198 return 0;
1199
1200 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1201 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1202 if (error != 0) {
1203 aprint_error_dev(sc->sc_dev,
1204 "could not allocate TX cmd DMA memory\n");
1205 goto fail;
1206 }
1207 ring->cmd = ring->cmd_dma.vaddr;
1208
1209 paddr = ring->cmd_dma.paddr;
1210 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1211 struct iwm_tx_data *data = &ring->data[i];
1212
1213 data->cmd_paddr = paddr;
1214 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1215 + offsetof(struct iwm_tx_cmd, scratch);
1216 paddr += sizeof(struct iwm_device_cmd);
1217
1218 error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
1219 IWM_NUM_OF_TBS, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
1220 &data->map);
1221 if (error != 0) {
1222 aprint_error_dev(sc->sc_dev,
1223 "could not create TX buf DMA map\n");
1224 goto fail;
1225 }
1226 }
1227 KASSERT(paddr == ring->cmd_dma.paddr + size);
1228 return 0;
1229
1230 fail: iwm_free_tx_ring(sc, ring);
1231 return error;
1232 }
1233
1234 static void
1235 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1236 {
1237 int i;
1238
1239 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1240 struct iwm_tx_data *data = &ring->data[i];
1241
1242 if (data->m != NULL) {
1243 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1244 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1245 bus_dmamap_unload(sc->sc_dmat, data->map);
1246 m_freem(data->m);
1247 data->m = NULL;
1248 }
1249 }
1250 /* Clear TX descriptors. */
1251 memset(ring->desc, 0, ring->desc_dma.size);
1252 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1253 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1254 sc->qfullmsk &= ~(1 << ring->qid);
1255 ring->queued = 0;
1256 ring->cur = 0;
1257 }
1258
1259 static void
1260 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1261 {
1262 int i;
1263
1264 iwm_dma_contig_free(&ring->desc_dma);
1265 iwm_dma_contig_free(&ring->cmd_dma);
1266
1267 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1268 struct iwm_tx_data *data = &ring->data[i];
1269
1270 if (data->m != NULL) {
1271 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1272 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1273 bus_dmamap_unload(sc->sc_dmat, data->map);
1274 m_freem(data->m);
1275 }
1276 if (data->map != NULL)
1277 bus_dmamap_destroy(sc->sc_dmat, data->map);
1278 }
1279 }
1280
1281 /*
1282 * High-level hardware frobbing routines
1283 */
1284
1285 static void
1286 iwm_enable_rfkill_int(struct iwm_softc *sc)
1287 {
1288 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1289 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1290 }
1291
1292 static int
1293 iwm_check_rfkill(struct iwm_softc *sc)
1294 {
1295 uint32_t v;
1296 int s;
1297 int rv;
1298
1299 s = splnet();
1300
1301 /*
1302 * "documentation" is not really helpful here:
1303 * 27: HW_RF_KILL_SW
1304 * Indicates state of (platform's) hardware RF-Kill switch
1305 *
1306 * But apparently when it's off, it's on ...
1307 */
1308 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1309 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1310 if (rv) {
1311 sc->sc_flags |= IWM_FLAG_RFKILL;
1312 } else {
1313 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1314 }
1315
1316 splx(s);
1317 return rv;
1318 }
1319
1320 static void
1321 iwm_enable_interrupts(struct iwm_softc *sc)
1322 {
1323 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1324 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1325 }
1326
1327 static void
1328 iwm_restore_interrupts(struct iwm_softc *sc)
1329 {
1330 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1331 }
1332
1333 static void
1334 iwm_disable_interrupts(struct iwm_softc *sc)
1335 {
1336 int s = splnet();
1337
1338 /* disable interrupts */
1339 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1340
1341 /* acknowledge all interrupts */
1342 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1343 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1344
1345 splx(s);
1346 }
1347
1348 static void
1349 iwm_ict_reset(struct iwm_softc *sc)
1350 {
1351 iwm_disable_interrupts(sc);
1352
1353 /* Reset ICT table. */
1354 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1355 sc->ict_cur = 0;
1356
1357 /* Set physical address of ICT table (4KB aligned). */
1358 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1359 IWM_CSR_DRAM_INT_TBL_ENABLE
1360 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1361 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1362
1363 /* Switch to ICT interrupt mode in driver. */
1364 sc->sc_flags |= IWM_FLAG_USE_ICT;
1365
1366 /* Re-enable interrupts. */
1367 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1368 iwm_enable_interrupts(sc);
1369 }
1370
1371 #define IWM_HW_READY_TIMEOUT 50
1372 static int
1373 iwm_set_hw_ready(struct iwm_softc *sc)
1374 {
1375 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1376 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1377
1378 return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1379 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1380 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1381 IWM_HW_READY_TIMEOUT);
1382 }
1383 #undef IWM_HW_READY_TIMEOUT
1384
1385 static int
1386 iwm_prepare_card_hw(struct iwm_softc *sc)
1387 {
1388 int rv = 0;
1389 int t = 0;
1390
1391 if (iwm_set_hw_ready(sc))
1392 goto out;
1393
1394 /* If HW is not ready, prepare the conditions to check again */
1395 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1396 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1397
1398 do {
1399 if (iwm_set_hw_ready(sc))
1400 goto out;
1401 DELAY(200);
1402 t += 200;
1403 } while (t < 150000);
1404
1405 rv = ETIMEDOUT;
1406
1407 out:
1408 return rv;
1409 }
1410
1411 static void
1412 iwm_apm_config(struct iwm_softc *sc)
1413 {
1414 pcireg_t reg;
1415
1416 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1417 sc->sc_cap_off + PCIE_LCSR);
1418 if (reg & PCIE_LCSR_ASPM_L1) {
1419 /* Um the Linux driver prints "Disabling L0S for this one ... */
1420 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1421 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1422 } else {
1423 /* ... and "Enabling" here */
1424 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1425 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1426 }
1427 }
1428
1429 /*
1430 * Start up NIC's basic functionality after it has been reset
1431 * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
1432 * NOTE: This does not load uCode nor start the embedded processor
1433 */
1434 static int
1435 iwm_apm_init(struct iwm_softc *sc)
1436 {
1437 int error = 0;
1438
1439 DPRINTF(("iwm apm start\n"));
1440
1441 /* Disable L0S exit timer (platform NMI Work/Around) */
1442 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1443 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1444
1445 /*
1446 * Disable L0s without affecting L1;
1447 * don't wait for ICH L0s (ICH bug W/A)
1448 */
1449 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1450 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1451
1452 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1453 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1454
1455 /*
1456 * Enable HAP INTA (interrupt from management bus) to
1457 * wake device's PCI Express link L1a -> L0s
1458 */
1459 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1460 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1461
1462 iwm_apm_config(sc);
1463
1464 #if 0 /* not for 7k */
1465 /* Configure analog phase-lock-loop before activating to D0A */
1466 if (trans->cfg->base_params->pll_cfg_val)
1467 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1468 trans->cfg->base_params->pll_cfg_val);
1469 #endif
1470
1471 /*
1472 * Set "initialization complete" bit to move adapter from
1473 * D0U* --> D0A* (powered-up active) state.
1474 */
1475 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1476
1477 /*
1478 * Wait for clock stabilization; once stabilized, access to
1479 * device-internal resources is supported, e.g. iwm_write_prph()
1480 * and accesses to uCode SRAM.
1481 */
1482 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1483 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1484 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1485 aprint_error_dev(sc->sc_dev,
1486 "timeout waiting for clock stabilization\n");
1487 goto out;
1488 }
1489
1490 if (sc->host_interrupt_operation_mode) {
1491 /*
1492 * This is a bit of an abuse - This is needed for 7260 / 3160
1493 * only check host_interrupt_operation_mode even if this is
1494 * not related to host_interrupt_operation_mode.
1495 *
1496 * Enable the oscillator to count wake up time for L1 exit. This
1497 * consumes slightly more power (100uA) - but allows to be sure
1498 * that we wake up from L1 on time.
1499 *
1500 * This looks weird: read twice the same register, discard the
1501 * value, set a bit, and yet again, read that same register
1502 * just to discard the value. But that's the way the hardware
1503 * seems to like it.
1504 */
1505 iwm_read_prph(sc, IWM_OSC_CLK);
1506 iwm_read_prph(sc, IWM_OSC_CLK);
1507 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1508 iwm_read_prph(sc, IWM_OSC_CLK);
1509 iwm_read_prph(sc, IWM_OSC_CLK);
1510 }
1511
1512 /*
1513 * Enable DMA clock and wait for it to stabilize.
1514 *
1515 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1516 * do not disable clocks. This preserves any hardware bits already
1517 * set by default in "CLK_CTRL_REG" after reset.
1518 */
1519 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1520 //kpause("iwmapm", 0, mstohz(20), NULL);
1521 DELAY(20);
1522
1523 /* Disable L1-Active */
1524 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1525 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1526
1527 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1528 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1529 IWM_APMG_RTC_INT_STT_RFKILL);
1530
1531 out:
1532 if (error)
1533 aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
1534 return error;
1535 }
1536
1537 /* iwlwifi/pcie/trans.c */
1538 static void
1539 iwm_apm_stop(struct iwm_softc *sc)
1540 {
1541 /* stop device's busmaster DMA activity */
1542 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1543
1544 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1545 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1546 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1547 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1548 DPRINTF(("iwm apm stop\n"));
1549 }
1550
1551 /* iwlwifi pcie/trans.c */
1552 static int
1553 iwm_start_hw(struct iwm_softc *sc)
1554 {
1555 int error;
1556
1557 if ((error = iwm_prepare_card_hw(sc)) != 0)
1558 return error;
1559
1560 /* Reset the entire device */
1561 IWM_WRITE(sc, IWM_CSR_RESET,
1562 IWM_CSR_RESET_REG_FLAG_SW_RESET |
1563 IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1564 DELAY(10);
1565
1566 if ((error = iwm_apm_init(sc)) != 0)
1567 return error;
1568
1569 iwm_enable_rfkill_int(sc);
1570 iwm_check_rfkill(sc);
1571
1572 return 0;
1573 }
1574
1575 /* iwlwifi pcie/trans.c */
1576
1577 static void
1578 iwm_stop_device(struct iwm_softc *sc)
1579 {
1580 int chnl, ntries;
1581 int qid;
1582
1583 /* tell the device to stop sending interrupts */
1584 iwm_disable_interrupts(sc);
1585
1586 /* device going down, Stop using ICT table */
1587 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1588
1589 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1590
1591 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1592
1593 /* Stop all DMA channels. */
1594 if (iwm_nic_lock(sc)) {
1595 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1596 IWM_WRITE(sc,
1597 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1598 for (ntries = 0; ntries < 200; ntries++) {
1599 uint32_t r;
1600
1601 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1602 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1603 chnl))
1604 break;
1605 DELAY(20);
1606 }
1607 }
1608 iwm_nic_unlock(sc);
1609 }
1610
1611 /* Stop RX ring. */
1612 iwm_reset_rx_ring(sc, &sc->rxq);
1613
1614 /* Reset all TX rings. */
1615 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1616 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1617
1618 /*
1619 * Power-down device's busmaster DMA clocks
1620 */
1621 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1622 DELAY(5);
1623
1624 /* Make sure (redundant) we've released our request to stay awake */
1625 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1626 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1627
1628 /* Stop the device, and put it in low power state */
1629 iwm_apm_stop(sc);
1630
1631 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1632 * Clean again the interrupt here
1633 */
1634 iwm_disable_interrupts(sc);
1635 /* stop and reset the on-board processor */
1636 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1637
1638 /*
1639 * Even if we stop the HW, we still want the RF kill
1640 * interrupt
1641 */
1642 iwm_enable_rfkill_int(sc);
1643 iwm_check_rfkill(sc);
1644 }
1645
1646 /* iwlwifi pcie/trans.c (always main power) */
1647 static void
1648 iwm_set_pwr(struct iwm_softc *sc)
1649 {
1650 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1651 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1652 }
1653
1654 /* iwlwifi: mvm/ops.c */
1655 static void
1656 iwm_mvm_nic_config(struct iwm_softc *sc)
1657 {
1658 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1659 uint32_t reg_val = 0;
1660
1661 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1662 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1663 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1664 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1665 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1666 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1667
1668 /* SKU control */
1669 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1670 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1671 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1672 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1673
1674 /* radio configuration */
1675 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1676 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1677 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1678
1679 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1680
1681 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1682 radio_cfg_step, radio_cfg_dash));
1683
1684 /*
1685 * W/A : NIC is stuck in a reset state after Early PCIe power off
1686 * (PCIe power is lost before PERST# is asserted), causing ME FW
1687 * to lose ownership and not being able to obtain it back.
1688 */
1689 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1690 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1691 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1692 }
1693
1694 static int
1695 iwm_nic_rx_init(struct iwm_softc *sc)
1696 {
1697 if (!iwm_nic_lock(sc))
1698 return EBUSY;
1699
1700 /*
1701 * Initialize RX ring. This is from the iwn driver.
1702 */
1703 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1704
1705 /* stop DMA */
1706 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1707 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1708 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1709 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1710 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1711
1712 /* Set physical address of RX ring (256-byte aligned). */
1713 IWM_WRITE(sc,
1714 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1715
1716 /* Set physical address of RX status (16-byte aligned). */
1717 IWM_WRITE(sc,
1718 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1719
1720 /* Enable RX. */
1721 /*
1722 * Note: Linux driver also sets this:
1723 * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1724 *
1725 * It causes weird behavior. YMMV.
1726 */
1727 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1728 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1729 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1730 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1731 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1732 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1733
1734 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1735
1736 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1737 if (sc->host_interrupt_operation_mode)
1738 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1739
1740 /*
1741 * Thus sayeth el jefe (iwlwifi) via a comment:
1742 *
1743 * This value should initially be 0 (before preparing any
1744 * RBs), should be 8 after preparing the first 8 RBs (for example)
1745 */
1746 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1747
1748 iwm_nic_unlock(sc);
1749
1750 return 0;
1751 }
1752
1753 static int
1754 iwm_nic_tx_init(struct iwm_softc *sc)
1755 {
1756 int qid;
1757
1758 if (!iwm_nic_lock(sc))
1759 return EBUSY;
1760
1761 /* Deactivate TX scheduler. */
1762 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1763
1764 /* Set physical address of "keep warm" page (16-byte aligned). */
1765 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1766
1767 /* Initialize TX rings. */
1768 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1769 struct iwm_tx_ring *txq = &sc->txq[qid];
1770
1771 /* Set physical address of TX ring (256-byte aligned). */
1772 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1773 txq->desc_dma.paddr >> 8);
1774 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1775 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1776 }
1777 iwm_nic_unlock(sc);
1778
1779 return 0;
1780 }
1781
1782 static int
1783 iwm_nic_init(struct iwm_softc *sc)
1784 {
1785 int error;
1786
1787 iwm_apm_init(sc);
1788 iwm_set_pwr(sc);
1789
1790 iwm_mvm_nic_config(sc);
1791
1792 if ((error = iwm_nic_rx_init(sc)) != 0)
1793 return error;
1794
1795 /*
1796 * Ditto for TX, from iwn
1797 */
1798 if ((error = iwm_nic_tx_init(sc)) != 0)
1799 return error;
1800
1801 DPRINTF(("shadow registers enabled\n"));
1802 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1803
1804 return 0;
1805 }
1806
1807 #if 0
1808 enum iwm_mvm_tx_fifo {
1809 IWM_MVM_TX_FIFO_BK = 0,
1810 IWM_MVM_TX_FIFO_BE,
1811 IWM_MVM_TX_FIFO_VI,
1812 IWM_MVM_TX_FIFO_VO,
1813 IWM_MVM_TX_FIFO_MCAST = 5,
1814 };
1815
1816 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1817 IWM_MVM_TX_FIFO_VO,
1818 IWM_MVM_TX_FIFO_VI,
1819 IWM_MVM_TX_FIFO_BE,
1820 IWM_MVM_TX_FIFO_BK,
1821 };
1822 #endif
1823
1824 static void
1825 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1826 {
1827 if (!iwm_nic_lock(sc)) {
1828 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1829 return; /* XXX return EBUSY */
1830 }
1831
1832 /* unactivate before configuration */
1833 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1834 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1835 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1836
1837 if (qid != IWM_MVM_CMD_QUEUE) {
1838 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1839 }
1840
1841 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1842
1843 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1844 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1845
1846 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1847 /* Set scheduler window size and frame limit. */
1848 iwm_write_mem32(sc,
1849 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1850 sizeof(uint32_t),
1851 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1852 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1853 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1854 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1855
1856 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1857 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1858 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1859 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1860 IWM_SCD_QUEUE_STTS_REG_MSK);
1861
1862 iwm_nic_unlock(sc);
1863
1864 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1865 }
1866
1867 static int
1868 iwm_post_alive(struct iwm_softc *sc)
1869 {
1870 int nwords;
1871 int error, chnl;
1872
1873 if (!iwm_nic_lock(sc))
1874 return EBUSY;
1875
1876 if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1877 DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
1878 error = EINVAL;
1879 goto out;
1880 }
1881
1882 iwm_ict_reset(sc);
1883
1884 /* Clear TX scheduler state in SRAM. */
1885 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1886 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1887 / sizeof(uint32_t);
1888 error = iwm_write_mem(sc,
1889 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1890 NULL, nwords);
1891 if (error)
1892 goto out;
1893
1894 /* Set physical address of TX scheduler rings (1KB aligned). */
1895 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1896
1897 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1898
1899 /* enable command channel */
1900 iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1901
1902 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1903
1904 /* Enable DMA channels. */
1905 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1906 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1907 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1908 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1909 }
1910
1911 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1912 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1913
1914 /* Enable L1-Active */
1915 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1916 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1917
1918 out:
1919 iwm_nic_unlock(sc);
1920 return error;
1921 }
1922
1923 /*
1924 * PHY db
1925 * iwlwifi/iwl-phy-db.c
1926 */
1927
1928 /*
1929 * BEGIN iwl-phy-db.c
1930 */
1931
1932 enum iwm_phy_db_section_type {
1933 IWM_PHY_DB_CFG = 1,
1934 IWM_PHY_DB_CALIB_NCH,
1935 IWM_PHY_DB_UNUSED,
1936 IWM_PHY_DB_CALIB_CHG_PAPD,
1937 IWM_PHY_DB_CALIB_CHG_TXP,
1938 IWM_PHY_DB_MAX
1939 };
1940
1941 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
1942
1943 /*
1944 * phy db - configure operational ucode
1945 */
1946 struct iwm_phy_db_cmd {
1947 uint16_t type;
1948 uint16_t length;
1949 uint8_t data[];
1950 } __packed;
1951
1952 /* for parsing of tx power channel group data that comes from the firmware*/
1953 struct iwm_phy_db_chg_txp {
1954 uint32_t space;
1955 uint16_t max_channel_idx;
1956 } __packed;
1957
1958 /*
1959 * phy db - Receive phy db chunk after calibrations
1960 */
1961 struct iwm_calib_res_notif_phy_db {
1962 uint16_t type;
1963 uint16_t length;
1964 uint8_t data[];
1965 } __packed;
1966
1967 /*
1968 * get phy db section: returns a pointer to a phy db section specified by
1969 * type and channel group id.
1970 */
1971 static struct iwm_phy_db_entry *
1972 iwm_phy_db_get_section(struct iwm_softc *sc,
1973 enum iwm_phy_db_section_type type, uint16_t chg_id)
1974 {
1975 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1976
1977 if (type >= IWM_PHY_DB_MAX)
1978 return NULL;
1979
1980 switch (type) {
1981 case IWM_PHY_DB_CFG:
1982 return &phy_db->cfg;
1983 case IWM_PHY_DB_CALIB_NCH:
1984 return &phy_db->calib_nch;
1985 case IWM_PHY_DB_CALIB_CHG_PAPD:
1986 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1987 return NULL;
1988 return &phy_db->calib_ch_group_papd[chg_id];
1989 case IWM_PHY_DB_CALIB_CHG_TXP:
1990 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1991 return NULL;
1992 return &phy_db->calib_ch_group_txp[chg_id];
1993 default:
1994 return NULL;
1995 }
1996 return NULL;
1997 }
1998
1999 static int
2000 iwm_phy_db_set_section(struct iwm_softc *sc,
2001 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2002 {
2003 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2004 struct iwm_phy_db_entry *entry;
2005 uint16_t chg_id = 0;
2006
2007 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2008 type == IWM_PHY_DB_CALIB_CHG_TXP)
2009 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2010
2011 entry = iwm_phy_db_get_section(sc, type, chg_id);
2012 if (!entry)
2013 return EINVAL;
2014
2015 if (entry->data)
2016 kmem_intr_free(entry->data, entry->size);
2017 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2018 if (!entry->data) {
2019 entry->size = 0;
2020 return ENOMEM;
2021 }
2022 memcpy(entry->data, phy_db_notif->data, size);
2023 entry->size = size;
2024
2025 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2026 __func__, __LINE__, type, size, entry->data));
2027
2028 return 0;
2029 }
2030
2031 static int
2032 iwm_is_valid_channel(uint16_t ch_id)
2033 {
2034 if (ch_id <= 14 ||
2035 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2036 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2037 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2038 return 1;
2039 return 0;
2040 }
2041
2042 static uint8_t
2043 iwm_ch_id_to_ch_index(uint16_t ch_id)
2044 {
2045 if (!iwm_is_valid_channel(ch_id))
2046 return 0xff;
2047
2048 if (ch_id <= 14)
2049 return ch_id - 1;
2050 if (ch_id <= 64)
2051 return (ch_id + 20) / 4;
2052 if (ch_id <= 140)
2053 return (ch_id - 12) / 4;
2054 return (ch_id - 13) / 4;
2055 }
2056
2057
2058 static uint16_t
2059 iwm_channel_id_to_papd(uint16_t ch_id)
2060 {
2061 if (!iwm_is_valid_channel(ch_id))
2062 return 0xff;
2063
2064 if (1 <= ch_id && ch_id <= 14)
2065 return 0;
2066 if (36 <= ch_id && ch_id <= 64)
2067 return 1;
2068 if (100 <= ch_id && ch_id <= 140)
2069 return 2;
2070 return 3;
2071 }
2072
2073 static uint16_t
2074 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2075 {
2076 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2077 struct iwm_phy_db_chg_txp *txp_chg;
2078 int i;
2079 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2080
2081 if (ch_index == 0xff)
2082 return 0xff;
2083
2084 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2085 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2086 if (!txp_chg)
2087 return 0xff;
2088 /*
2089 * Looking for the first channel group that its max channel is
2090 * higher then wanted channel.
2091 */
2092 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2093 return i;
2094 }
2095 return 0xff;
2096 }
2097
2098 static int
2099 iwm_phy_db_get_section_data(struct iwm_softc *sc,
2100 uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
2101 {
2102 struct iwm_phy_db_entry *entry;
2103 uint16_t ch_group_id = 0;
2104
2105 /* find wanted channel group */
2106 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2107 ch_group_id = iwm_channel_id_to_papd(ch_id);
2108 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2109 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2110
2111 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2112 if (!entry)
2113 return EINVAL;
2114
2115 *data = entry->data;
2116 *size = entry->size;
2117
2118 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2119 __func__, __LINE__, type, *size));
2120
2121 return 0;
2122 }
2123
2124 static int
2125 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
2126 uint16_t length, void *data)
2127 {
2128 struct iwm_phy_db_cmd phy_db_cmd;
2129 struct iwm_host_cmd cmd = {
2130 .id = IWM_PHY_DB_CMD,
2131 .flags = IWM_CMD_SYNC,
2132 };
2133
2134 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2135 type, length));
2136
2137 /* Set phy db cmd variables */
2138 phy_db_cmd.type = le16toh(type);
2139 phy_db_cmd.length = le16toh(length);
2140
2141 /* Set hcmd variables */
2142 cmd.data[0] = &phy_db_cmd;
2143 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2144 cmd.data[1] = data;
2145 cmd.len[1] = length;
2146 cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
2147
2148 return iwm_send_cmd(sc, &cmd);
2149 }
2150
2151 static int
2152 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2153 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2154 {
2155 uint16_t i;
2156 int err;
2157 struct iwm_phy_db_entry *entry;
2158
2159 /* Send all the channel-specific groups to operational fw */
2160 for (i = 0; i < max_ch_groups; i++) {
2161 entry = iwm_phy_db_get_section(sc, type, i);
2162 if (!entry)
2163 return EINVAL;
2164
2165 if (!entry->size)
2166 continue;
2167
2168 /* Send the requested PHY DB section */
2169 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2170 if (err) {
2171 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2172 "err %d\n", DEVNAME(sc), type, i, err));
2173 return err;
2174 }
2175
2176 DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
2177 }
2178
2179 return 0;
2180 }
2181
2182 static int
2183 iwm_send_phy_db_data(struct iwm_softc *sc)
2184 {
2185 uint8_t *data = NULL;
2186 uint16_t size = 0;
2187 int err;
2188
2189 DPRINTF(("Sending phy db data and configuration to runtime image\n"));
2190
2191 /* Send PHY DB CFG section */
2192 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2193 if (err) {
2194 DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
2195 DEVNAME(sc), err));
2196 return err;
2197 }
2198
2199 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2200 if (err) {
2201 DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
2202 DEVNAME(sc), err));
2203 return err;
2204 }
2205
2206 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2207 &data, &size, 0);
2208 if (err) {
2209 DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
2210 "%d\n", DEVNAME(sc), err));
2211 return err;
2212 }
2213
2214 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2215 if (err) {
2216 DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
2217 "sect, %d\n", DEVNAME(sc), err));
2218 return err;
2219 }
2220
2221 /* Send all the TXP channel specific data */
2222 err = iwm_phy_db_send_all_channel_groups(sc,
2223 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2224 if (err) {
2225 DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
2226 DEVNAME(sc), err));
2227 return err;
2228 }
2229
2230 /* Send all the TXP channel specific data */
2231 err = iwm_phy_db_send_all_channel_groups(sc,
2232 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2233 if (err) {
2234 DPRINTF(("%s: Cannot send channel specific TX power groups, "
2235 "%d\n", DEVNAME(sc), err));
2236 return err;
2237 }
2238
2239 DPRINTF(("Finished sending phy db non channel data\n"));
2240 return 0;
2241 }
2242
2243 /*
2244 * END iwl-phy-db.c
2245 */
2246
2247 /*
2248 * BEGIN iwlwifi/mvm/time-event.c
2249 */
2250
2251 /*
2252 * For the high priority TE use a time event type that has similar priority to
2253 * the FW's action scan priority.
2254 */
2255 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2256 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2257
2258 /* used to convert from time event API v2 to v1 */
2259 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2260 IWM_TE_V2_EVENT_SOCIOPATHIC)
2261 static inline uint16_t
2262 iwm_te_v2_get_notify(uint16_t policy)
2263 {
2264 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2265 }
2266
2267 static inline uint16_t
2268 iwm_te_v2_get_dep_policy(uint16_t policy)
2269 {
2270 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2271 IWM_TE_V2_PLACEMENT_POS;
2272 }
2273
2274 static inline uint16_t
2275 iwm_te_v2_get_absence(uint16_t policy)
2276 {
2277 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2278 }
2279
2280 static void
2281 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2282 struct iwm_time_event_cmd_v1 *cmd_v1)
2283 {
2284 cmd_v1->id_and_color = cmd_v2->id_and_color;
2285 cmd_v1->action = cmd_v2->action;
2286 cmd_v1->id = cmd_v2->id;
2287 cmd_v1->apply_time = cmd_v2->apply_time;
2288 cmd_v1->max_delay = cmd_v2->max_delay;
2289 cmd_v1->depends_on = cmd_v2->depends_on;
2290 cmd_v1->interval = cmd_v2->interval;
2291 cmd_v1->duration = cmd_v2->duration;
2292 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2293 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2294 else
2295 cmd_v1->repeat = htole32(cmd_v2->repeat);
2296 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2297 cmd_v1->interval_reciprocal = 0; /* unused */
2298
2299 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2300 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2301 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2302 }
2303
2304 static int
2305 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
2306 const struct iwm_time_event_cmd_v2 *cmd)
2307 {
2308 struct iwm_time_event_cmd_v1 cmd_v1;
2309
2310 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2311 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
2312 IWM_CMD_SYNC, sizeof(*cmd), cmd);
2313
2314 iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
2315 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
2316 sizeof(cmd_v1), &cmd_v1);
2317 }
2318
2319 static int
2320 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
2321 void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
2322 {
2323 int ret;
2324
2325 DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
2326
2327 ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
2328 if (ret) {
2329 DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
2330 DEVNAME(sc), ret));
2331 }
2332
2333 return ret;
2334 }
2335
2336 static void
2337 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2338 uint32_t duration, uint32_t min_duration, uint32_t max_delay)
2339 {
2340 struct iwm_time_event_cmd_v2 time_cmd;
2341
2342 memset(&time_cmd, 0, sizeof(time_cmd));
2343
2344 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2345 time_cmd.id_and_color =
2346 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2347 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2348
2349 time_cmd.apply_time = htole32(iwm_read_prph(sc,
2350 IWM_DEVICE_SYSTEM_TIME_REG));
2351
2352 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2353 time_cmd.max_delay = htole32(max_delay);
2354 /* TODO: why do we need to interval = bi if it is not periodic? */
2355 time_cmd.interval = htole32(1);
2356 time_cmd.duration = htole32(duration);
2357 time_cmd.repeat = 1;
2358 time_cmd.policy
2359 = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2360 IWM_TE_V2_NOTIF_HOST_EVENT_END);
2361
2362 iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
2363 }
2364
2365 /*
2366 * END iwlwifi/mvm/time-event.c
2367 */
2368
2369 /*
2370 * NVM read access and content parsing. We do not support
2371 * external NVM or writing NVM.
2372 * iwlwifi/mvm/nvm.c
2373 */
2374
2375 /* list of NVM sections we are allowed/need to read */
2376 static const int nvm_to_read[] = {
2377 IWM_NVM_SECTION_TYPE_HW,
2378 IWM_NVM_SECTION_TYPE_SW,
2379 IWM_NVM_SECTION_TYPE_CALIBRATION,
2380 IWM_NVM_SECTION_TYPE_PRODUCTION,
2381 };
2382
2383 /* Default NVM size to read */
2384 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2385 #define IWM_MAX_NVM_SECTION_SIZE 7000
2386
2387 #define IWM_NVM_WRITE_OPCODE 1
2388 #define IWM_NVM_READ_OPCODE 0
2389
2390 static int
2391 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
2392 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
2393 {
2394 offset = 0;
2395 struct iwm_nvm_access_cmd nvm_access_cmd = {
2396 .offset = htole16(offset),
2397 .length = htole16(length),
2398 .type = htole16(section),
2399 .op_code = IWM_NVM_READ_OPCODE,
2400 };
2401 struct iwm_nvm_access_resp *nvm_resp;
2402 struct iwm_rx_packet *pkt;
2403 struct iwm_host_cmd cmd = {
2404 .id = IWM_NVM_ACCESS_CMD,
2405 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
2406 IWM_CMD_SEND_IN_RFKILL,
2407 .data = { &nvm_access_cmd, },
2408 };
2409 int ret, bytes_read, offset_read;
2410 uint8_t *resp_data;
2411
2412 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2413
2414 ret = iwm_send_cmd(sc, &cmd);
2415 if (ret)
2416 return ret;
2417
2418 pkt = cmd.resp_pkt;
2419 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2420 DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
2421 DEVNAME(sc), pkt->hdr.flags));
2422 ret = EIO;
2423 goto exit;
2424 }
2425
2426 /* Extract NVM response */
2427 nvm_resp = (void *)pkt->data;
2428
2429 ret = le16toh(nvm_resp->status);
2430 bytes_read = le16toh(nvm_resp->length);
2431 offset_read = le16toh(nvm_resp->offset);
2432 resp_data = nvm_resp->data;
2433 if (ret) {
2434 DPRINTF(("%s: NVM access command failed with status %d\n",
2435 DEVNAME(sc), ret));
2436 ret = EINVAL;
2437 goto exit;
2438 }
2439
2440 if (offset_read != offset) {
2441 DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
2442 DEVNAME(sc), offset_read));
2443 ret = EINVAL;
2444 goto exit;
2445 }
2446
2447 memcpy(data + offset, resp_data, bytes_read);
2448 *len = bytes_read;
2449
2450 exit:
2451 iwm_free_resp(sc, &cmd);
2452 return ret;
2453 }
2454
2455 /*
2456 * Reads an NVM section completely.
2457 * NICs prior to 7000 family doesn't have a real NVM, but just read
2458 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2459 * by uCode, we need to manually check in this case that we don't
2460 * overflow and try to read more than the EEPROM size.
2461 * For 7000 family NICs, we supply the maximal size we can read, and
2462 * the uCode fills the response with as much data as we can,
2463 * without overflowing, so no check is needed.
2464 */
2465 static int
2466 iwm_nvm_read_section(struct iwm_softc *sc,
2467 uint16_t section, uint8_t *data, uint16_t *len)
2468 {
2469 uint16_t length, seglen;
2470 int error;
2471
2472 /* Set nvm section read length */
2473 length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2474 *len = 0;
2475
2476 /* Read the NVM until exhausted (reading less than requested) */
2477 while (seglen == length) {
2478 error = iwm_nvm_read_chunk(sc,
2479 section, *len, length, data, &seglen);
2480 if (error) {
2481 aprint_error_dev(sc->sc_dev,
2482 "Cannot read NVM from section %d offset %d, "
2483 "length %d\n", section, *len, length);
2484 return error;
2485 }
2486 *len += seglen;
2487 }
2488
2489 DPRINTFN(4, ("NVM section %d read completed\n", section));
2490 return 0;
2491 }
2492
2493 /*
2494 * BEGIN IWM_NVM_PARSE
2495 */
2496
2497 /* iwlwifi/iwl-nvm-parse.c */
2498
2499 /* NVM offsets (in words) definitions */
2500 enum wkp_nvm_offsets {
2501 /* NVM HW-Section offset (in words) definitions */
2502 IWM_HW_ADDR = 0x15,
2503
2504 /* NVM SW-Section offset (in words) definitions */
2505 IWM_NVM_SW_SECTION = 0x1C0,
2506 IWM_NVM_VERSION = 0,
2507 IWM_RADIO_CFG = 1,
2508 IWM_SKU = 2,
2509 IWM_N_HW_ADDRS = 3,
2510 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
2511
2512 /* NVM calibration section offset (in words) definitions */
2513 IWM_NVM_CALIB_SECTION = 0x2B8,
2514 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
2515 };
2516
2517 /* SKU Capabilities (actual values from NVM definition) */
2518 enum nvm_sku_bits {
2519 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
2520 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
2521 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
2522 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
2523 };
2524
2525 /* radio config bits (actual values from NVM definition) */
2526 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
2527 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
2528 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
2529 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
2530 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
2531 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2532
2533 #define DEFAULT_MAX_TX_POWER 16
2534
2535 /**
2536 * enum iwm_nvm_channel_flags - channel flags in NVM
2537 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2538 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2539 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2540 * @IWM_NVM_CHANNEL_RADAR: radar detection required
2541 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2542 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2543 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2544 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2545 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2546 */
2547 enum iwm_nvm_channel_flags {
2548 IWM_NVM_CHANNEL_VALID = (1 << 0),
2549 IWM_NVM_CHANNEL_IBSS = (1 << 1),
2550 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2551 IWM_NVM_CHANNEL_RADAR = (1 << 4),
2552 IWM_NVM_CHANNEL_DFS = (1 << 7),
2553 IWM_NVM_CHANNEL_WIDE = (1 << 8),
2554 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2555 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2556 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2557 };
2558
2559 static void
2560 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
2561 {
2562 struct ieee80211com *ic = &sc->sc_ic;
2563 struct iwm_nvm_data *data = &sc->sc_nvm;
2564 int ch_idx;
2565 struct ieee80211_channel *channel;
2566 uint16_t ch_flags;
2567 int is_5ghz;
2568 int flags, hw_value;
2569
2570 for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
2571 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2572
2573 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2574 !data->sku_cap_band_52GHz_enable)
2575 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2576
2577 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2578 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2579 iwm_nvm_channels[ch_idx],
2580 ch_flags,
2581 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2582 "5.2" : "2.4"));
2583 continue;
2584 }
2585
2586 hw_value = iwm_nvm_channels[ch_idx];
2587 channel = &ic->ic_channels[hw_value];
2588
2589 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2590 if (!is_5ghz) {
2591 flags = IEEE80211_CHAN_2GHZ;
2592 channel->ic_flags
2593 = IEEE80211_CHAN_CCK
2594 | IEEE80211_CHAN_OFDM
2595 | IEEE80211_CHAN_DYN
2596 | IEEE80211_CHAN_2GHZ;
2597 } else {
2598 flags = IEEE80211_CHAN_5GHZ;
2599 channel->ic_flags =
2600 IEEE80211_CHAN_A;
2601 }
2602 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2603
2604 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2605 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2606 }
2607 }
2608
2609 static int
2610 iwm_parse_nvm_data(struct iwm_softc *sc,
2611 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2612 const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
2613 {
2614 struct iwm_nvm_data *data = &sc->sc_nvm;
2615 uint8_t hw_addr[ETHER_ADDR_LEN];
2616 uint16_t radio_cfg, sku;
2617
2618 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2619
2620 radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2621 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2622 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2623 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2624 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2625 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
2626 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
2627
2628 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2629 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2630 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2631 data->sku_cap_11n_enable = 0;
2632
2633 if (!data->valid_tx_ant || !data->valid_rx_ant) {
2634 DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
2635 data->valid_tx_ant, data->valid_rx_ant));
2636 return EINVAL;
2637 }
2638
2639 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2640
2641 data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
2642 data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
2643
2644 /* The byte order is little endian 16 bit, meaning 214365 */
2645 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2646 data->hw_addr[0] = hw_addr[1];
2647 data->hw_addr[1] = hw_addr[0];
2648 data->hw_addr[2] = hw_addr[3];
2649 data->hw_addr[3] = hw_addr[2];
2650 data->hw_addr[4] = hw_addr[5];
2651 data->hw_addr[5] = hw_addr[4];
2652
2653 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
2654 data->calib_version = 255; /* TODO:
2655 this value will prevent some checks from
2656 failing, we need to check if this
2657 field is still needed, and if it does,
2658 where is it in the NVM */
2659
2660 return 0;
2661 }
2662
2663 /*
2664 * END NVM PARSE
2665 */
2666
2667 struct iwm_nvm_section {
2668 uint16_t length;
2669 const uint8_t *data;
2670 };
2671
2672 #define IWM_FW_VALID_TX_ANT(sc) \
2673 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
2674 >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
2675 #define IWM_FW_VALID_RX_ANT(sc) \
2676 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
2677 >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
2678
2679 static int
2680 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2681 {
2682 const uint16_t *hw, *sw, *calib;
2683
2684 /* Checking for required sections */
2685 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2686 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2687 DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
2688 return ENOENT;
2689 }
2690
2691 hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
2692 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2693 calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2694 return iwm_parse_nvm_data(sc, hw, sw, calib,
2695 IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
2696 }
2697
2698 static int
2699 iwm_nvm_init(struct iwm_softc *sc)
2700 {
2701 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2702 int i, section, error;
2703 uint16_t len;
2704 uint8_t *nvm_buffer, *temp;
2705
2706 /* Read From FW NVM */
2707 DPRINTF(("Read NVM\n"));
2708
2709 /* TODO: find correct NVM max size for a section */
2710 nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
2711 for (i = 0; i < __arraycount(nvm_to_read); i++) {
2712 section = nvm_to_read[i];
2713 KASSERT(section <= __arraycount(nvm_sections));
2714
2715 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
2716 if (error)
2717 break;
2718
2719 temp = kmem_alloc(len, KM_SLEEP);
2720 memcpy(temp, nvm_buffer, len);
2721 nvm_sections[section].data = temp;
2722 nvm_sections[section].length = len;
2723 }
2724 kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
2725 if (error)
2726 return error;
2727
2728 return iwm_parse_nvm_sections(sc, nvm_sections);
2729 }
2730
2731 /*
2732 * Firmware loading gunk. This is kind of a weird hybrid between the
2733 * iwn driver and the Linux iwlwifi driver.
2734 */
2735
2736 static int
2737 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2738 const uint8_t *section, uint32_t byte_cnt)
2739 {
2740 struct iwm_dma_info *dma = &sc->fw_dma;
2741 int error;
2742
2743 /* Copy firmware section into pre-allocated DMA-safe memory. */
2744 memcpy(dma->vaddr, section, byte_cnt);
2745 bus_dmamap_sync(sc->sc_dmat,
2746 dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2747
2748 if (!iwm_nic_lock(sc))
2749 return EBUSY;
2750
2751 sc->sc_fw_chunk_done = 0;
2752
2753 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2754 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2755 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2756 dst_addr);
2757 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2758 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2759 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2760 (iwm_get_dma_hi_addr(dma->paddr)
2761 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2762 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2763 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2764 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2765 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2766 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2767 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2768 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2769 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2770
2771 iwm_nic_unlock(sc);
2772
2773 /* wait 1s for this segment to load */
2774 while (!sc->sc_fw_chunk_done)
2775 if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
2776 break;
2777
2778 return error;
2779 }
2780
2781 static int
2782 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2783 {
2784 struct iwm_fw_sects *fws;
2785 int error, i, w;
2786 void *data;
2787 uint32_t dlen;
2788 uint32_t offset;
2789
2790 sc->sc_uc.uc_intr = 0;
2791
2792 fws = &sc->sc_fw.fw_sects[ucode_type];
2793 for (i = 0; i < fws->fw_count; i++) {
2794 data = fws->fw_sect[i].fws_data;
2795 dlen = fws->fw_sect[i].fws_len;
2796 offset = fws->fw_sect[i].fws_devoff;
2797 DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
2798 ucode_type, offset, dlen));
2799 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2800 if (error) {
2801 DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
2802 "returned error %02d\n", i, fws->fw_count, error));
2803 return error;
2804 }
2805 }
2806
2807 /* wait for the firmware to load */
2808 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2809
2810 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2811 error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
2812 }
2813
2814 return error;
2815 }
2816
2817 /* iwlwifi: pcie/trans.c */
2818 static int
2819 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2820 {
2821 int error;
2822
2823 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2824
2825 if ((error = iwm_nic_init(sc)) != 0) {
2826 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
2827 return error;
2828 }
2829
2830 /* make sure rfkill handshake bits are cleared */
2831 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2832 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2833 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2834
2835 /* clear (again), then enable host interrupts */
2836 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2837 iwm_enable_interrupts(sc);
2838
2839 /* really make sure rfkill handshake bits are cleared */
2840 /* maybe we should write a few times more? just to make sure */
2841 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2842 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2843
2844 /* Load the given image to the HW */
2845 error = iwm_load_firmware(sc, ucode_type);
2846 if (error) {
2847 aprint_error_dev(sc->sc_dev, "failed to load firmware: %d\n",
2848 error);
2849 }
2850 return error;
2851 }
2852
2853 static int
2854 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2855 {
2856 return iwm_post_alive(sc);
2857 }
2858
2859 static int
2860 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2861 {
2862 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2863 .valid = htole32(valid_tx_ant),
2864 };
2865
2866 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2867 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2868 }
2869
2870 /* iwlwifi: mvm/fw.c */
2871 static int
2872 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2873 {
2874 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2875 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2876
2877 /* Set parameters */
2878 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2879 phy_cfg_cmd.calib_control.event_trigger =
2880 sc->sc_default_calib[ucode_type].event_trigger;
2881 phy_cfg_cmd.calib_control.flow_trigger =
2882 sc->sc_default_calib[ucode_type].flow_trigger;
2883
2884 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
2885 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2886 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2887 }
2888
2889 static int
2890 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2891 enum iwm_ucode_type ucode_type)
2892 {
2893 enum iwm_ucode_type old_type = sc->sc_uc_current;
2894 int error;
2895
2896 if ((error = iwm_read_firmware(sc)) != 0)
2897 return error;
2898
2899 sc->sc_uc_current = ucode_type;
2900 error = iwm_start_fw(sc, ucode_type);
2901 if (error) {
2902 sc->sc_uc_current = old_type;
2903 return error;
2904 }
2905
2906 return iwm_fw_alive(sc, sc->sched_base);
2907 }
2908
2909 /*
2910 * mvm misc bits
2911 */
2912
2913 /*
2914 * follows iwlwifi/fw.c
2915 */
2916 static int
2917 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2918 {
2919 int error;
2920
2921 /* do not operate with rfkill switch turned on */
2922 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2923 aprint_error_dev(sc->sc_dev,
2924 "radio is disabled by hardware switch\n");
2925 return EPERM;
2926 }
2927
2928 sc->sc_init_complete = 0;
2929 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2930 IWM_UCODE_TYPE_INIT)) != 0)
2931 return error;
2932
2933 if (justnvm) {
2934 if ((error = iwm_nvm_init(sc)) != 0) {
2935 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
2936 return error;
2937 }
2938 memcpy(&sc->sc_ic.ic_myaddr,
2939 &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
2940
2941 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2942 + sc->sc_capa_max_probe_len
2943 + IWM_MAX_NUM_SCAN_CHANNELS
2944 * sizeof(struct iwm_scan_channel);
2945 sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
2946
2947 return 0;
2948 }
2949
2950 /* Send TX valid antennas before triggering calibrations */
2951 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2952 return error;
2953
2954 /*
2955 * Send phy configurations command to init uCode
2956 * to start the 16.0 uCode init image internal calibrations.
2957 */
2958 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2959 DPRINTF(("%s: failed to run internal calibration: %d\n",
2960 DEVNAME(sc), error));
2961 return error;
2962 }
2963
2964 /*
2965 * Nothing to do but wait for the init complete notification
2966 * from the firmware
2967 */
2968 while (!sc->sc_init_complete)
2969 if ((error = tsleep(&sc->sc_init_complete,
2970 0, "iwminit", 2*hz)) != 0)
2971 break;
2972
2973 return error;
2974 }
2975
2976 /*
2977 * receive side
2978 */
2979
2980 /* (re)stock rx ring, called at init-time and at runtime */
2981 static int
2982 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2983 {
2984 struct iwm_rx_ring *ring = &sc->rxq;
2985 struct iwm_rx_data *data = &ring->data[idx];
2986 struct mbuf *m;
2987 int error;
2988 int fatal = 0;
2989
2990 m = m_gethdr(M_DONTWAIT, MT_DATA);
2991 if (m == NULL)
2992 return ENOBUFS;
2993
2994 if (size <= MCLBYTES) {
2995 MCLGET(m, M_DONTWAIT);
2996 } else {
2997 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
2998 }
2999 if ((m->m_flags & M_EXT) == 0) {
3000 m_freem(m);
3001 return ENOBUFS;
3002 }
3003
3004 if (data->m != NULL) {
3005 bus_dmamap_unload(sc->sc_dmat, data->map);
3006 fatal = 1;
3007 }
3008
3009 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3010 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3011 BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
3012 /* XXX */
3013 if (fatal)
3014 panic("iwm: could not load RX mbuf");
3015 m_freem(m);
3016 return error;
3017 }
3018 data->m = m;
3019 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3020
3021 /* Update RX descriptor. */
3022 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3023 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3024 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3025
3026 return 0;
3027 }
3028
3029 /* iwlwifi: mvm/rx.c */
3030 #define IWM_RSSI_OFFSET 50
3031 static int
3032 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3033 {
3034 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3035 uint32_t agc_a, agc_b;
3036 uint32_t val;
3037
3038 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3039 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3040 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3041
3042 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3043 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3044 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3045
3046 /*
3047 * dBm = rssi dB - agc dB - constant.
3048 * Higher AGC (higher radio gain) means lower signal.
3049 */
3050 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3051 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3052 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3053
3054 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3055 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3056
3057 return max_rssi_dbm;
3058 }
3059
3060 /* iwlwifi: mvm/rx.c */
3061 /*
3062 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3063 * values are reported by the fw as positive values - need to negate
3064 * to obtain their dBM. Account for missing antennas by replacing 0
3065 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3066 */
3067 static int
3068 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
3069 struct iwm_rx_phy_info *phy_info)
3070 {
3071 int energy_a, energy_b, energy_c, max_energy;
3072 uint32_t val;
3073
3074 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3075 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3076 IWM_RX_INFO_ENERGY_ANT_A_POS;
3077 energy_a = energy_a ? -energy_a : -256;
3078 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3079 IWM_RX_INFO_ENERGY_ANT_B_POS;
3080 energy_b = energy_b ? -energy_b : -256;
3081 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3082 IWM_RX_INFO_ENERGY_ANT_C_POS;
3083 energy_c = energy_c ? -energy_c : -256;
3084 max_energy = MAX(energy_a, energy_b);
3085 max_energy = MAX(max_energy, energy_c);
3086
3087 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3088 energy_a, energy_b, energy_c, max_energy));
3089
3090 return max_energy;
3091 }
3092
3093 static void
3094 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3095 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3096 {
3097 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3098
3099 DPRINTFN(20, ("received PHY stats\n"));
3100 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3101 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3102
3103 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3104 }
3105
3106 /*
3107 * Retrieve the average noise (in dBm) among receivers.
3108 */
3109 static int
3110 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
3111 {
3112 int i, total, nbant, noise;
3113
3114 total = nbant = noise = 0;
3115 for (i = 0; i < 3; i++) {
3116 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3117 if (noise) {
3118 total += noise;
3119 nbant++;
3120 }
3121 }
3122
3123 /* There should be at least one antenna but check anyway. */
3124 return (nbant == 0) ? -127 : (total / nbant) - 107;
3125 }
3126
3127 /*
3128 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3129 *
3130 * Handles the actual data of the Rx packet from the fw
3131 */
3132 static void
3133 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3134 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3135 {
3136 struct ieee80211com *ic = &sc->sc_ic;
3137 struct ieee80211_frame *wh;
3138 struct ieee80211_node *ni;
3139 struct ieee80211_channel *c = NULL;
3140 struct mbuf *m;
3141 struct iwm_rx_phy_info *phy_info;
3142 struct iwm_rx_mpdu_res_start *rx_res;
3143 int device_timestamp;
3144 uint32_t len;
3145 uint32_t rx_pkt_status;
3146 int rssi;
3147
3148 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3149 BUS_DMASYNC_POSTREAD);
3150
3151 phy_info = &sc->sc_last_phy_info;
3152 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3153 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3154 len = le16toh(rx_res->byte_count);
3155 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3156
3157 m = data->m;
3158 m->m_data = pkt->data + sizeof(*rx_res);
3159 m->m_pkthdr.len = m->m_len = len;
3160
3161 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3162 DPRINTF(("dsp size out of range [0,20]: %d\n",
3163 phy_info->cfg_phy_cnt));
3164 return;
3165 }
3166
3167 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3168 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3169 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3170 return; /* drop */
3171 }
3172
3173 device_timestamp = le32toh(phy_info->system_timestamp);
3174
3175 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3176 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3177 } else {
3178 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3179 }
3180 rssi = -rssi;
3181
3182 if (ic->ic_state == IEEE80211_S_SCAN)
3183 iwm_fix_channel(ic, m);
3184
3185 /* replenish ring for the buffer we're going to feed to the sharks */
3186 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3187 return;
3188
3189 m->m_pkthdr.rcvif = IC2IFP(ic);
3190
3191 if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
3192 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3193 c = &ic->ic_channels[le32toh(phy_info->channel)];
3194 }
3195
3196 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3197 if (c)
3198 ni->ni_chan = c;
3199
3200 if (sc->sc_drvbpf != NULL) {
3201 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3202
3203 tap->wr_flags = 0;
3204 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3205 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3206 tap->wr_chan_freq =
3207 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3208 tap->wr_chan_flags =
3209 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3210 tap->wr_dbm_antsignal = (int8_t)rssi;
3211 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3212 tap->wr_tsft = phy_info->system_timestamp;
3213 switch (phy_info->rate) {
3214 /* CCK rates. */
3215 case 10: tap->wr_rate = 2; break;
3216 case 20: tap->wr_rate = 4; break;
3217 case 55: tap->wr_rate = 11; break;
3218 case 110: tap->wr_rate = 22; break;
3219 /* OFDM rates. */
3220 case 0xd: tap->wr_rate = 12; break;
3221 case 0xf: tap->wr_rate = 18; break;
3222 case 0x5: tap->wr_rate = 24; break;
3223 case 0x7: tap->wr_rate = 36; break;
3224 case 0x9: tap->wr_rate = 48; break;
3225 case 0xb: tap->wr_rate = 72; break;
3226 case 0x1: tap->wr_rate = 96; break;
3227 case 0x3: tap->wr_rate = 108; break;
3228 /* Unknown rate: should not happen. */
3229 default: tap->wr_rate = 0;
3230 }
3231
3232 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3233 }
3234 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3235 ieee80211_free_node(ni);
3236 }
3237
3238 static void
3239 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3240 struct iwm_node *in)
3241 {
3242 struct ieee80211com *ic = &sc->sc_ic;
3243 struct ifnet *ifp = IC2IFP(ic);
3244 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3245 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3246 int failack = tx_resp->failure_frame;
3247
3248 KASSERT(tx_resp->frame_count == 1);
3249
3250 /* Update rate control statistics. */
3251 in->in_amn.amn_txcnt++;
3252 if (failack > 0) {
3253 in->in_amn.amn_retrycnt++;
3254 }
3255
3256 if (status != IWM_TX_STATUS_SUCCESS &&
3257 status != IWM_TX_STATUS_DIRECT_DONE)
3258 ifp->if_oerrors++;
3259 else
3260 ifp->if_opackets++;
3261 }
3262
3263 static void
3264 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3265 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3266 {
3267 struct ieee80211com *ic = &sc->sc_ic;
3268 struct ifnet *ifp = IC2IFP(ic);
3269 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3270 int idx = cmd_hdr->idx;
3271 int qid = cmd_hdr->qid;
3272 struct iwm_tx_ring *ring = &sc->txq[qid];
3273 struct iwm_tx_data *txd = &ring->data[idx];
3274 struct iwm_node *in = txd->in;
3275
3276 if (txd->done) {
3277 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3278 DEVNAME(sc)));
3279 return;
3280 }
3281
3282 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3283 BUS_DMASYNC_POSTREAD);
3284
3285 sc->sc_tx_timer = 0;
3286
3287 iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3288
3289 /* Unmap and free mbuf. */
3290 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3291 BUS_DMASYNC_POSTWRITE);
3292 bus_dmamap_unload(sc->sc_dmat, txd->map);
3293 m_freem(txd->m);
3294
3295 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3296 KASSERT(txd->done == 0);
3297 txd->done = 1;
3298 KASSERT(txd->in);
3299
3300 txd->m = NULL;
3301 txd->in = NULL;
3302 ieee80211_free_node(&in->in_ni);
3303
3304 if (--ring->queued < IWM_TX_RING_LOMARK) {
3305 sc->qfullmsk &= ~(1 << ring->qid);
3306 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3307 ifp->if_flags &= ~IFF_OACTIVE;
3308 /*
3309 * Well, we're in interrupt context, but then again
3310 * I guess net80211 does all sorts of stunts in
3311 * interrupt context, so maybe this is no biggie.
3312 */
3313 (*ifp->if_start)(ifp);
3314 }
3315 }
3316 }
3317
3318 /*
3319 * BEGIN iwlwifi/mvm/binding.c
3320 */
3321
3322 static int
3323 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3324 {
3325 struct iwm_binding_cmd cmd;
3326 struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
3327 int i, ret;
3328 uint32_t status;
3329
3330 memset(&cmd, 0, sizeof(cmd));
3331
3332 cmd.id_and_color
3333 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3334 cmd.action = htole32(action);
3335 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3336
3337 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3338 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3339 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3340
3341 status = 0;
3342 ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3343 sizeof(cmd), &cmd, &status);
3344 if (ret) {
3345 DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
3346 DEVNAME(sc), action, ret));
3347 return ret;
3348 }
3349
3350 if (status) {
3351 DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
3352 status));
3353 ret = EIO;
3354 }
3355
3356 return ret;
3357 }
3358
3359 static int
3360 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
3361 {
3362 return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
3363 }
3364
3365 static int
3366 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
3367 {
3368 return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
3369 }
3370
3371 /*
3372 * END iwlwifi/mvm/binding.c
3373 */
3374
3375 /*
3376 * BEGIN iwlwifi/mvm/phy-ctxt.c
3377 */
3378
3379 /*
3380 * Construct the generic fields of the PHY context command
3381 */
3382 static void
3383 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3384 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3385 {
3386 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3387
3388 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3389 ctxt->color));
3390 cmd->action = htole32(action);
3391 cmd->apply_time = htole32(apply_time);
3392 }
3393
3394 /*
3395 * Add the phy configuration to the PHY context command
3396 */
3397 static void
3398 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
3399 struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
3400 uint8_t chains_static, uint8_t chains_dynamic)
3401 {
3402 struct ieee80211com *ic = &sc->sc_ic;
3403 uint8_t active_cnt, idle_cnt;
3404
3405 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3406 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3407
3408 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3409 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3410 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3411
3412 /* Set rx the chains */
3413 idle_cnt = chains_static;
3414 active_cnt = chains_dynamic;
3415
3416 cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
3417 IWM_PHY_RX_CHAIN_VALID_POS);
3418 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3419 cmd->rxchain_info |= htole32(active_cnt <<
3420 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3421
3422 cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
3423 }
3424
3425 /*
3426 * Send a command
3427 * only if something in the configuration changed: in case that this is the
3428 * first time that the phy configuration is applied or in case that the phy
3429 * configuration changed from the previous apply.
3430 */
3431 static int
3432 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
3433 struct iwm_mvm_phy_ctxt *ctxt,
3434 uint8_t chains_static, uint8_t chains_dynamic,
3435 uint32_t action, uint32_t apply_time)
3436 {
3437 struct iwm_phy_context_cmd cmd;
3438 int ret;
3439
3440 /* Set the command header fields */
3441 iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3442
3443 /* Set the command data */
3444 iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3445 chains_static, chains_dynamic);
3446
3447 ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
3448 sizeof(struct iwm_phy_context_cmd), &cmd);
3449 if (ret) {
3450 DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
3451 }
3452 return ret;
3453 }
3454
3455 /*
3456 * Send a command to add a PHY context based on the current HW configuration.
3457 */
3458 static int
3459 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3460 struct ieee80211_channel *chan,
3461 uint8_t chains_static, uint8_t chains_dynamic)
3462 {
3463 ctxt->channel = chan;
3464 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3465 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
3466 }
3467
3468 /*
3469 * Send a command to modify the PHY context based on the current HW
3470 * configuration. Note that the function does not check that the configuration
3471 * changed.
3472 */
3473 static int
3474 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
3475 struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
3476 uint8_t chains_static, uint8_t chains_dynamic)
3477 {
3478 ctxt->channel = chan;
3479 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3480 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
3481 }
3482
3483 /*
3484 * END iwlwifi/mvm/phy-ctxt.c
3485 */
3486
3487 /*
3488 * transmit side
3489 */
3490
3491 /*
3492 * Send a command to the firmware. We try to implement the Linux
3493 * driver interface for the routine.
3494 * mostly from if_iwn (iwn_cmd()).
3495 *
3496 * For now, we always copy the first part and map the second one (if it exists).
3497 */
3498 static int
3499 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3500 {
3501 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3502 struct iwm_tfd *desc;
3503 struct iwm_tx_data *data;
3504 struct iwm_device_cmd *cmd;
3505 struct mbuf *m;
3506 bus_addr_t paddr;
3507 uint32_t addr_lo;
3508 int error = 0, i, paylen, off, s;
3509 int code;
3510 int async, wantresp;
3511
3512 code = hcmd->id;
3513 async = hcmd->flags & IWM_CMD_ASYNC;
3514 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3515
3516 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3517 paylen += hcmd->len[i];
3518 }
3519
3520 /* if the command wants an answer, busy sc_cmd_resp */
3521 if (wantresp) {
3522 KASSERT(!async);
3523 while (sc->sc_wantresp != -1)
3524 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3525 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3526 DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
3527 }
3528
3529 /*
3530 * Is the hardware still available? (after e.g. above wait).
3531 */
3532 s = splnet();
3533 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3534 error = ENXIO;
3535 goto out;
3536 }
3537
3538 desc = &ring->desc[ring->cur];
3539 data = &ring->data[ring->cur];
3540
3541 if (paylen > sizeof(cmd->data)) {
3542 /* Command is too large */
3543 if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
3544 error = EINVAL;
3545 goto out;
3546 }
3547 m = m_gethdr(M_DONTWAIT, MT_DATA);
3548 if (m == NULL) {
3549 error = ENOMEM;
3550 goto out;
3551 }
3552 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3553 if (!(m->m_flags & M_EXT)) {
3554 m_freem(m);
3555 error = ENOMEM;
3556 goto out;
3557 }
3558 cmd = mtod(m, struct iwm_device_cmd *);
3559 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
3560 IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3561 if (error != 0) {
3562 m_freem(m);
3563 goto out;
3564 }
3565 data->m = m;
3566 paddr = data->map->dm_segs[0].ds_addr;
3567 } else {
3568 cmd = &ring->cmd[ring->cur];
3569 paddr = data->cmd_paddr;
3570 }
3571
3572 cmd->hdr.code = code;
3573 cmd->hdr.flags = 0;
3574 cmd->hdr.qid = ring->qid;
3575 cmd->hdr.idx = ring->cur;
3576
3577 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3578 if (hcmd->len[i] == 0)
3579 continue;
3580 memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
3581 off += hcmd->len[i];
3582 }
3583 KASSERT(off == paylen);
3584
3585 /* lo field is not aligned */
3586 addr_lo = htole32((uint32_t)paddr);
3587 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3588 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3589 | ((sizeof(cmd->hdr) + paylen) << 4));
3590 desc->num_tbs = 1;
3591
3592 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3593 code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
3594
3595 if (paylen > sizeof(cmd->data)) {
3596 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3597 sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3598 } else {
3599 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3600 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3601 sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3602 }
3603 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3604 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3605 sizeof (*desc), BUS_DMASYNC_PREWRITE);
3606
3607 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3608 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3609 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3610 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3611 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3612 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3613 DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
3614 error = EBUSY;
3615 goto out;
3616 }
3617
3618 #if 0
3619 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3620 #endif
3621 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3622 code, ring->qid, ring->cur));
3623
3624 /* Kick command ring. */
3625 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3626 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3627
3628 if (!async) {
3629 /* m..m-mmyy-mmyyyy-mym-ym m-my generation */
3630 int generation = sc->sc_generation;
3631 error = tsleep(desc, PCATCH, "iwmcmd", hz);
3632 if (error == 0) {
3633 /* if hardware is no longer up, return error */
3634 if (generation != sc->sc_generation) {
3635 error = ENXIO;
3636 } else {
3637 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3638 }
3639 }
3640 }
3641 out:
3642 if (wantresp && error != 0) {
3643 iwm_free_resp(sc, hcmd);
3644 }
3645 splx(s);
3646
3647 return error;
3648 }
3649
3650 /* iwlwifi: mvm/utils.c */
3651 static int
3652 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
3653 uint32_t flags, uint16_t len, const void *data)
3654 {
3655 struct iwm_host_cmd cmd = {
3656 .id = id,
3657 .len = { len, },
3658 .data = { data, },
3659 .flags = flags,
3660 };
3661
3662 return iwm_send_cmd(sc, &cmd);
3663 }
3664
3665 /* iwlwifi: mvm/utils.c */
3666 static int
3667 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
3668 struct iwm_host_cmd *cmd, uint32_t *status)
3669 {
3670 struct iwm_rx_packet *pkt;
3671 struct iwm_cmd_response *resp;
3672 int error, resp_len;
3673
3674 //lockdep_assert_held(&mvm->mutex);
3675
3676 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3677 cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
3678
3679 if ((error = iwm_send_cmd(sc, cmd)) != 0)
3680 return error;
3681 pkt = cmd->resp_pkt;
3682
3683 /* Can happen if RFKILL is asserted */
3684 if (!pkt) {
3685 error = 0;
3686 goto out_free_resp;
3687 }
3688
3689 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3690 error = EIO;
3691 goto out_free_resp;
3692 }
3693
3694 resp_len = iwm_rx_packet_payload_len(pkt);
3695 if (resp_len != sizeof(*resp)) {
3696 error = EIO;
3697 goto out_free_resp;
3698 }
3699
3700 resp = (void *)pkt->data;
3701 *status = le32toh(resp->status);
3702 out_free_resp:
3703 iwm_free_resp(sc, cmd);
3704 return error;
3705 }
3706
3707 /* iwlwifi/mvm/utils.c */
3708 static int
3709 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
3710 uint16_t len, const void *data, uint32_t *status)
3711 {
3712 struct iwm_host_cmd cmd = {
3713 .id = id,
3714 .len = { len, },
3715 .data = { data, },
3716 };
3717
3718 return iwm_mvm_send_cmd_status(sc, &cmd, status);
3719 }
3720
3721 static void
3722 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3723 {
3724 KASSERT(sc->sc_wantresp != -1);
3725 KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
3726 == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
3727 sc->sc_wantresp = -1;
3728 wakeup(&sc->sc_wantresp);
3729 }
3730
3731 /*
3732 * Process a "command done" firmware notification. This is where we wakeup
3733 * processes waiting for a synchronous command completion.
3734 * from if_iwn
3735 */
3736 static void
3737 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3738 {
3739 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3740 struct iwm_tx_data *data;
3741
3742 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3743 return; /* Not a command ack. */
3744 }
3745
3746 data = &ring->data[pkt->hdr.idx];
3747
3748 /* If the command was mapped in an mbuf, free it. */
3749 if (data->m != NULL) {
3750 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3751 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3752 bus_dmamap_unload(sc->sc_dmat, data->map);
3753 m_freem(data->m);
3754 data->m = NULL;
3755 }
3756 wakeup(&ring->desc[pkt->hdr.idx]);
3757 }
3758
3759 #if 0
3760 /*
3761 * necessary only for block ack mode
3762 */
3763 void
3764 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3765 uint16_t len)
3766 {
3767 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3768 uint16_t w_val;
3769
3770 scd_bc_tbl = sc->sched_dma.vaddr;
3771
3772 len += 8; /* magic numbers came naturally from paris */
3773 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3774 len = roundup(len, 4) / 4;
3775
3776 w_val = htole16(sta_id << 12 | len);
3777
3778 /* Update TX scheduler. */
3779 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3780 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3781 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
3782 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
3783
3784 /* I really wonder what this is ?!? */
3785 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3786 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3787 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3788 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
3789 (char *)(void *)sc->sched_dma.vaddr,
3790 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3791 }
3792 }
3793 #endif
3794
3795 /*
3796 * Fill in various bit for management frames, and leave them
3797 * unfilled for data frames (firmware takes care of that).
3798 * Return the selected TX rate.
3799 */
3800 static const struct iwm_rate *
3801 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3802 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3803 {
3804 struct ieee80211com *ic = &sc->sc_ic;
3805 struct ieee80211_node *ni = &in->in_ni;
3806 const struct iwm_rate *rinfo;
3807 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3808 int ridx, rate_flags;
3809 int nrates = ni->ni_rates.rs_nrates;
3810
3811 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3812 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3813
3814 if (type != IEEE80211_FC0_TYPE_DATA) {
3815 /* for non-data, use the lowest supported rate */
3816 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
3817 IWM_RIDX_OFDM : IWM_RIDX_CCK;
3818 } else if (ic->ic_fixed_rate != -1) {
3819 ridx = sc->sc_fixed_ridx;
3820 } else {
3821 /* for data frames, use RS table */
3822 tx->initial_rate_index = (nrates - 1) - ni->ni_txrate;
3823 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3824 DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
3825 ridx = in->in_ridx[ni->ni_txrate];
3826 return &iwm_rates[ridx];
3827 }
3828
3829 rinfo = &iwm_rates[ridx];
3830 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3831 if (IWM_RIDX_IS_CCK(ridx))
3832 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3833 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3834
3835 return rinfo;
3836 }
3837
3838 #define TB0_SIZE 16
3839 static int
3840 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3841 {
3842 struct ieee80211com *ic = &sc->sc_ic;
3843 struct iwm_node *in = (void *)ni;
3844 struct iwm_tx_ring *ring;
3845 struct iwm_tx_data *data;
3846 struct iwm_tfd *desc;
3847 struct iwm_device_cmd *cmd;
3848 struct iwm_tx_cmd *tx;
3849 struct ieee80211_frame *wh;
3850 struct ieee80211_key *k = NULL;
3851 struct mbuf *m1;
3852 const struct iwm_rate *rinfo;
3853 uint32_t flags;
3854 u_int hdrlen;
3855 bus_dma_segment_t *seg;
3856 uint8_t tid, type;
3857 int i, totlen, error, pad;
3858 int hdrlen2;
3859
3860 wh = mtod(m, struct ieee80211_frame *);
3861 hdrlen = ieee80211_anyhdrsize(wh);
3862 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3863
3864 hdrlen2 = (ieee80211_has_qos(wh)) ?
3865 sizeof (struct ieee80211_qosframe) :
3866 sizeof (struct ieee80211_frame);
3867
3868 if (hdrlen != hdrlen2)
3869 DPRINTF(("%s: hdrlen error (%d != %d)\n",
3870 DEVNAME(sc), hdrlen, hdrlen2));
3871
3872 tid = 0;
3873
3874 ring = &sc->txq[ac];
3875 desc = &ring->desc[ring->cur];
3876 memset(desc, 0, sizeof(*desc));
3877 data = &ring->data[ring->cur];
3878
3879 /* Fill out iwm_tx_cmd to send to the firmware */
3880 cmd = &ring->cmd[ring->cur];
3881 cmd->hdr.code = IWM_TX_CMD;
3882 cmd->hdr.flags = 0;
3883 cmd->hdr.qid = ring->qid;
3884 cmd->hdr.idx = ring->cur;
3885
3886 tx = (void *)cmd->data;
3887 memset(tx, 0, sizeof(*tx));
3888
3889 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3890
3891 if (sc->sc_drvbpf != NULL) {
3892 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3893
3894 tap->wt_flags = 0;
3895 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3896 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3897 tap->wt_rate = rinfo->rate;
3898 tap->wt_hwqueue = ac;
3899 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
3900 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3901
3902 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
3903 }
3904
3905 /* Encrypt the frame if need be. */
3906 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3907 k = ieee80211_crypto_encap(ic, ni, m);
3908 if (k == NULL) {
3909 m_freem(m);
3910 return ENOBUFS;
3911 }
3912 /* Packet header may have moved, reset our local pointer. */
3913 wh = mtod(m, struct ieee80211_frame *);
3914 }
3915 totlen = m->m_pkthdr.len;
3916
3917 flags = 0;
3918 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3919 flags |= IWM_TX_CMD_FLG_ACK;
3920 }
3921
3922 if (type != IEEE80211_FC0_TYPE_DATA
3923 && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
3924 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3925 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3926 }
3927
3928 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3929 type != IEEE80211_FC0_TYPE_DATA)
3930 tx->sta_id = sc->sc_aux_sta.sta_id;
3931 else
3932 tx->sta_id = IWM_STATION_ID;
3933
3934 if (type == IEEE80211_FC0_TYPE_MGT) {
3935 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3936
3937 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3938 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3939 tx->pm_frame_timeout = htole16(3);
3940 else
3941 tx->pm_frame_timeout = htole16(2);
3942 } else {
3943 tx->pm_frame_timeout = htole16(0);
3944 }
3945
3946 if (hdrlen & 3) {
3947 /* First segment length must be a multiple of 4. */
3948 flags |= IWM_TX_CMD_FLG_MH_PAD;
3949 pad = 4 - (hdrlen & 3);
3950 } else
3951 pad = 0;
3952
3953 tx->driver_txop = 0;
3954 tx->next_frame_len = 0;
3955
3956 tx->len = htole16(totlen);
3957 tx->tid_tspec = tid;
3958 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3959
3960 /* Set physical address of "scratch area". */
3961 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3962 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3963
3964 /* Copy 802.11 header in TX command. */
3965 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3966
3967 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3968
3969 tx->sec_ctl = 0;
3970 tx->tx_flags |= htole32(flags);
3971
3972 /* Trim 802.11 header. */
3973 m_adj(m, hdrlen);
3974
3975 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3976 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3977 if (error != 0) {
3978 if (error != EFBIG) {
3979 aprint_error_dev(sc->sc_dev,
3980 "can't map mbuf (error %d)\n", error);
3981 m_freem(m);
3982 return error;
3983 }
3984 /* Too many DMA segments, linearize mbuf. */
3985 MGETHDR(m1, M_DONTWAIT, MT_DATA);
3986 if (m1 == NULL) {
3987 m_freem(m);
3988 return ENOBUFS;
3989 }
3990 if (m->m_pkthdr.len > MHLEN) {
3991 MCLGET(m1, M_DONTWAIT);
3992 if (!(m1->m_flags & M_EXT)) {
3993 m_freem(m);
3994 m_freem(m1);
3995 return ENOBUFS;
3996 }
3997 }
3998 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
3999 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4000 m_freem(m);
4001 m = m1;
4002
4003 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4004 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4005 if (error != 0) {
4006 aprint_error_dev(sc->sc_dev,
4007 "can't map mbuf (error %d)\n", error);
4008 m_freem(m);
4009 return error;
4010 }
4011 }
4012 data->m = m;
4013 data->in = in;
4014 data->done = 0;
4015
4016 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4017 KASSERT(data->in != NULL);
4018
4019 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4020 ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4021
4022 /* Fill TX descriptor. */
4023 desc->num_tbs = 2 + data->map->dm_nsegs;
4024
4025 desc->tbs[0].lo = htole32(data->cmd_paddr);
4026 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4027 (TB0_SIZE << 4);
4028 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4029 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4030 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4031 + hdrlen + pad - TB0_SIZE) << 4);
4032
4033 /* Other DMA segments are for data payload. */
4034 seg = data->map->dm_segs;
4035 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4036 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4037 desc->tbs[i+2].hi_n_len = \
4038 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4039 | ((seg->ds_len) << 4);
4040 }
4041
4042 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4043 BUS_DMASYNC_PREWRITE);
4044 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4045 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4046 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4047 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4048 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4049 sizeof (*desc), BUS_DMASYNC_PREWRITE);
4050
4051 #if 0
4052 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4053 #endif
4054
4055 /* Kick TX ring. */
4056 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4057 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4058
4059 /* Mark TX ring as full if we reach a certain threshold. */
4060 if (++ring->queued > IWM_TX_RING_HIMARK) {
4061 sc->qfullmsk |= 1 << ring->qid;
4062 }
4063
4064 return 0;
4065 }
4066
4067 #if 0
4068 /* not necessary? */
4069 static int
4070 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4071 {
4072 struct iwm_tx_path_flush_cmd flush_cmd = {
4073 .queues_ctl = htole32(tfd_msk),
4074 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4075 };
4076 int ret;
4077
4078 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
4079 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
4080 sizeof(flush_cmd), &flush_cmd);
4081 if (ret)
4082 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4083 ret);
4084 return ret;
4085 }
4086 #endif
4087
4088
4089 /*
4090 * BEGIN mvm/power.c
4091 */
4092
4093 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4094
4095 static int
4096 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
4097 struct iwm_beacon_filter_cmd *cmd)
4098 {
4099 int ret;
4100
4101 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4102 IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
4103
4104 if (!ret) {
4105 DPRINTF(("ba_enable_beacon_abort is: %d\n",
4106 le32toh(cmd->ba_enable_beacon_abort)));
4107 DPRINTF(("ba_escape_timer is: %d\n",
4108 le32toh(cmd->ba_escape_timer)));
4109 DPRINTF(("bf_debug_flag is: %d\n",
4110 le32toh(cmd->bf_debug_flag)));
4111 DPRINTF(("bf_enable_beacon_filter is: %d\n",
4112 le32toh(cmd->bf_enable_beacon_filter)));
4113 DPRINTF(("bf_energy_delta is: %d\n",
4114 le32toh(cmd->bf_energy_delta)));
4115 DPRINTF(("bf_escape_timer is: %d\n",
4116 le32toh(cmd->bf_escape_timer)));
4117 DPRINTF(("bf_roaming_energy_delta is: %d\n",
4118 le32toh(cmd->bf_roaming_energy_delta)));
4119 DPRINTF(("bf_roaming_state is: %d\n",
4120 le32toh(cmd->bf_roaming_state)));
4121 DPRINTF(("bf_temp_threshold is: %d\n",
4122 le32toh(cmd->bf_temp_threshold)));
4123 DPRINTF(("bf_temp_fast_filter is: %d\n",
4124 le32toh(cmd->bf_temp_fast_filter)));
4125 DPRINTF(("bf_temp_slow_filter is: %d\n",
4126 le32toh(cmd->bf_temp_slow_filter)));
4127 }
4128 return ret;
4129 }
4130
4131 static void
4132 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
4133 struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
4134 {
4135 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4136 }
4137
4138 static int
4139 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
4140 int enable)
4141 {
4142 struct iwm_beacon_filter_cmd cmd = {
4143 IWM_BF_CMD_CONFIG_DEFAULTS,
4144 .bf_enable_beacon_filter = htole32(1),
4145 .ba_enable_beacon_abort = htole32(enable),
4146 };
4147
4148 if (!sc->sc_bf.bf_enabled)
4149 return 0;
4150
4151 sc->sc_bf.ba_enabled = enable;
4152 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4153 return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4154 }
4155
4156 static void
4157 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
4158 {
4159 DPRINTF(("Sending power table command on mac id 0x%X for "
4160 "power level %d, flags = 0x%X\n",
4161 cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
4162 DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
4163
4164 if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
4165 DPRINTF(("Disable power management\n"));
4166 return;
4167 }
4168 KASSERT(0);
4169
4170 #if 0
4171 DPRINTF(mvm, "Rx timeout = %u usec\n",
4172 le32_to_cpu(cmd->rx_data_timeout));
4173 DPRINTF(mvm, "Tx timeout = %u usec\n",
4174 le32_to_cpu(cmd->tx_data_timeout));
4175 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
4176 DPRINTF(mvm, "DTIM periods to skip = %u\n",
4177 cmd->skip_dtim_periods);
4178 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
4179 DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
4180 cmd->lprx_rssi_threshold);
4181 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
4182 DPRINTF(mvm, "uAPSD enabled\n");
4183 DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
4184 le32_to_cpu(cmd->rx_data_timeout_uapsd));
4185 DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
4186 le32_to_cpu(cmd->tx_data_timeout_uapsd));
4187 DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
4188 DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
4189 DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
4190 }
4191 #endif
4192 }
4193
4194 static void
4195 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4196 struct iwm_mac_power_cmd *cmd)
4197 {
4198 struct ieee80211com *ic = &sc->sc_ic;
4199 struct ieee80211_node *ni = &in->in_ni;
4200 int dtimper, dtimper_msec;
4201 int keep_alive;
4202
4203 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4204 in->in_color));
4205 dtimper = ic->ic_dtim_period ?: 1;
4206
4207 /*
4208 * Regardless of power management state the driver must set
4209 * keep alive period. FW will use it for sending keep alive NDPs
4210 * immediately after association. Check that keep alive period
4211 * is at least 3 * DTIM
4212 */
4213 dtimper_msec = dtimper * ni->ni_intval;
4214 keep_alive
4215 = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4216 keep_alive = roundup(keep_alive, 1000) / 1000;
4217 cmd->keep_alive_seconds = htole16(keep_alive);
4218 }
4219
4220 static int
4221 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4222 {
4223 int ret;
4224 int ba_enable;
4225 struct iwm_mac_power_cmd cmd;
4226
4227 memset(&cmd, 0, sizeof(cmd));
4228
4229 iwm_mvm_power_build_cmd(sc, in, &cmd);
4230 iwm_mvm_power_log(sc, &cmd);
4231
4232 if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
4233 IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
4234 return ret;
4235
4236 ba_enable = !!(cmd.flags &
4237 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4238 return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
4239 }
4240
4241 static int
4242 iwm_mvm_power_update_device(struct iwm_softc *sc)
4243 {
4244 struct iwm_device_power_cmd cmd = {
4245 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4246 };
4247
4248 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4249 return 0;
4250
4251 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4252 DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
4253
4254 return iwm_mvm_send_cmd_pdu(sc,
4255 IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
4256 }
4257
4258 static int
4259 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4260 {
4261 struct iwm_beacon_filter_cmd cmd = {
4262 IWM_BF_CMD_CONFIG_DEFAULTS,
4263 .bf_enable_beacon_filter = htole32(1),
4264 };
4265 int ret;
4266
4267 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4268 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4269
4270 if (ret == 0)
4271 sc->sc_bf.bf_enabled = 1;
4272
4273 return ret;
4274 }
4275
4276 static int
4277 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4278 {
4279 struct iwm_beacon_filter_cmd cmd;
4280 int ret;
4281
4282 memset(&cmd, 0, sizeof(cmd));
4283 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4284 return 0;
4285
4286 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4287 if (ret == 0)
4288 sc->sc_bf.bf_enabled = 0;
4289
4290 return ret;
4291 }
4292
4293 #if 0
4294 static int
4295 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4296 {
4297 if (!sc->sc_bf.bf_enabled)
4298 return 0;
4299
4300 return iwm_mvm_enable_beacon_filter(sc, in);
4301 }
4302 #endif
4303
4304 /*
4305 * END mvm/power.c
4306 */
4307
4308 /*
4309 * BEGIN mvm/sta.c
4310 */
4311
4312 static void
4313 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
4314 struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
4315 {
4316 memset(cmd_v5, 0, sizeof(*cmd_v5));
4317
4318 cmd_v5->add_modify = cmd_v6->add_modify;
4319 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
4320 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
4321 memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
4322 cmd_v5->sta_id = cmd_v6->sta_id;
4323 cmd_v5->modify_mask = cmd_v6->modify_mask;
4324 cmd_v5->station_flags = cmd_v6->station_flags;
4325 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
4326 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
4327 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
4328 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
4329 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
4330 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
4331 cmd_v5->assoc_id = cmd_v6->assoc_id;
4332 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
4333 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
4334 }
4335
4336 static int
4337 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
4338 struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
4339 {
4340 struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
4341
4342 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
4343 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
4344 sizeof(*cmd), cmd, status);
4345 }
4346
4347 iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
4348
4349 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
4350 &cmd_v5, status);
4351 }
4352
4353 /* send station add/update command to firmware */
4354 static int
4355 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4356 {
4357 struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
4358 int ret;
4359 uint32_t status;
4360
4361 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4362
4363 add_sta_cmd.sta_id = IWM_STATION_ID;
4364 add_sta_cmd.mac_id_n_color
4365 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4366 if (!update) {
4367 add_sta_cmd.tfd_queue_msk = htole32(0xf);
4368 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4369 }
4370 add_sta_cmd.add_modify = update ? 1 : 0;
4371 add_sta_cmd.station_flags_msk
4372 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4373
4374 status = IWM_ADD_STA_SUCCESS;
4375 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4376 if (ret)
4377 return ret;
4378
4379 switch (status) {
4380 case IWM_ADD_STA_SUCCESS:
4381 break;
4382 default:
4383 ret = EIO;
4384 DPRINTF(("IWM_ADD_STA failed\n"));
4385 break;
4386 }
4387
4388 return ret;
4389 }
4390
4391 static int
4392 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4393 {
4394 int ret;
4395
4396 ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
4397 if (ret)
4398 return ret;
4399
4400 return 0;
4401 }
4402
4403 static int
4404 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4405 {
4406 return iwm_mvm_sta_send_to_fw(sc, in, 1);
4407 }
4408
4409 static int
4410 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4411 const uint8_t *addr, uint16_t mac_id, uint16_t color)
4412 {
4413 struct iwm_mvm_add_sta_cmd_v6 cmd;
4414 int ret;
4415 uint32_t status;
4416
4417 memset(&cmd, 0, sizeof(cmd));
4418 cmd.sta_id = sta->sta_id;
4419 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4420
4421 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4422
4423 if (addr)
4424 memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
4425
4426 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4427 if (ret)
4428 return ret;
4429
4430 switch (status) {
4431 case IWM_ADD_STA_SUCCESS:
4432 DPRINTF(("Internal station added.\n"));
4433 return 0;
4434 default:
4435 DPRINTF(("%s: Add internal station failed, status=0x%x\n",
4436 DEVNAME(sc), status));
4437 ret = EIO;
4438 break;
4439 }
4440 return ret;
4441 }
4442
4443 static int
4444 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4445 {
4446 int ret;
4447
4448 sc->sc_aux_sta.sta_id = 3;
4449 sc->sc_aux_sta.tfd_queue_msk = 0;
4450
4451 ret = iwm_mvm_add_int_sta_common(sc,
4452 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4453
4454 if (ret)
4455 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4456 return ret;
4457 }
4458
4459 /*
4460 * END mvm/sta.c
4461 */
4462
4463 /*
4464 * BEGIN mvm/scan.c
4465 */
4466
4467 #define IWM_PLCP_QUIET_THRESH 1
4468 #define IWM_ACTIVE_QUIET_TIME 10
4469 #define LONG_OUT_TIME_PERIOD 600
4470 #define SHORT_OUT_TIME_PERIOD 200
4471 #define SUSPEND_TIME_PERIOD 100
4472
4473 static uint16_t
4474 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
4475 {
4476 uint16_t rx_chain;
4477 uint8_t rx_ant;
4478
4479 rx_ant = IWM_FW_VALID_RX_ANT(sc);
4480 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4481 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4482 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4483 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4484 return htole16(rx_chain);
4485 }
4486
4487 #define ieee80211_tu_to_usec(a) (1024*(a))
4488
4489 static uint32_t
4490 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
4491 {
4492 if (!is_assoc)
4493 return 0;
4494 if (flags & 0x1)
4495 return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
4496 return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
4497 }
4498
4499 static uint32_t
4500 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
4501 {
4502 if (!is_assoc)
4503 return 0;
4504 return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
4505 }
4506
4507 static uint32_t
4508 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
4509 {
4510 if (flags & IEEE80211_CHAN_2GHZ)
4511 return htole32(IWM_PHY_BAND_24);
4512 else
4513 return htole32(IWM_PHY_BAND_5);
4514 }
4515
4516 static uint32_t
4517 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4518 {
4519 uint32_t tx_ant;
4520 int i, ind;
4521
4522 for (i = 0, ind = sc->sc_scan_last_antenna;
4523 i < IWM_RATE_MCS_ANT_NUM; i++) {
4524 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4525 if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
4526 sc->sc_scan_last_antenna = ind;
4527 break;
4528 }
4529 }
4530 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4531
4532 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4533 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4534 tx_ant);
4535 else
4536 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4537 }
4538
4539 /*
4540 * If req->n_ssids > 0, it means we should do an active scan.
4541 * In case of active scan w/o directed scan, we receive a zero-length SSID
4542 * just to notify that this scan is active and not passive.
4543 * In order to notify the FW of the number of SSIDs we wish to scan (including
4544 * the zero-length one), we need to set the corresponding bits in chan->type,
4545 * one for each SSID, and set the active bit (first). If the first SSID is
4546 * already included in the probe template, so we need to set only
4547 * req->n_ssids - 1 bits in addition to the first bit.
4548 */
4549 static uint16_t
4550 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4551 {
4552 if (flags & IEEE80211_CHAN_2GHZ)
4553 return 30 + 3 * (n_ssids + 1);
4554 return 20 + 2 * (n_ssids + 1);
4555 }
4556
4557 static uint16_t
4558 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
4559 {
4560 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4561 }
4562
4563 static int
4564 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
4565 int flags, int n_ssids, int basic_ssid)
4566 {
4567 struct ieee80211com *ic = &sc->sc_ic;
4568 uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
4569 uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
4570 struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
4571 (cmd->data + le16toh(cmd->tx_cmd.len));
4572 int type = (1 << n_ssids) - 1;
4573 struct ieee80211_channel *c;
4574 int nchan;
4575
4576 if (!basic_ssid)
4577 type |= (1 << n_ssids);
4578
4579 for (nchan = 0, c = &ic->ic_channels[1];
4580 c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
4581 c++) {
4582 if ((c->ic_flags & flags) != flags)
4583 continue;
4584
4585 chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
4586 chan->type = htole32(type);
4587 if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
4588 chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4589 chan->active_dwell = htole16(active_dwell);
4590 chan->passive_dwell = htole16(passive_dwell);
4591 chan->iteration_count = htole16(1);
4592 chan++;
4593 nchan++;
4594 }
4595 if (nchan == 0)
4596 DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
4597 return nchan;
4598 }
4599
4600 /*
4601 * Fill in probe request with the following parameters:
4602 * TA is our vif HW address, which mac80211 ensures we have.
4603 * Packet is broadcasted, so this is both SA and DA.
4604 * The probe request IE is made out of two: first comes the most prioritized
4605 * SSID if a directed scan is requested. Second comes whatever extra
4606 * information was given to us as the scan request IE.
4607 */
4608 static uint16_t
4609 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
4610 const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
4611 const uint8_t *ie, int ie_len, int left)
4612 {
4613 int len = 0;
4614 uint8_t *pos = NULL;
4615
4616 /* Make sure there is enough space for the probe request,
4617 * two mandatory IEs and the data */
4618 left -= sizeof(*frame);
4619 if (left < 0)
4620 return 0;
4621
4622 frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4623 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4624 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4625 IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
4626 memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
4627 IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
4628
4629 len += sizeof(*frame);
4630 CTASSERT(sizeof(*frame) == 24);
4631
4632 /* for passive scans, no need to fill anything */
4633 if (n_ssids == 0)
4634 return (uint16_t)len;
4635
4636 /* points to the payload of the request */
4637 pos = (uint8_t *)frame + sizeof(*frame);
4638
4639 /* fill in our SSID IE */
4640 left -= ssid_len + 2;
4641 if (left < 0)
4642 return 0;
4643 *pos++ = IEEE80211_ELEMID_SSID;
4644 *pos++ = ssid_len;
4645 if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
4646 memcpy(pos, ssid, ssid_len);
4647 pos += ssid_len;
4648 }
4649
4650 len += ssid_len + 2;
4651
4652 if (left < ie_len)
4653 return len;
4654
4655 if (ie && ie_len) {
4656 memcpy(pos, ie, ie_len);
4657 len += ie_len;
4658 }
4659
4660 return (uint16_t)len;
4661 }
4662
4663 static int
4664 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
4665 int n_ssids, uint8_t *ssid, int ssid_len)
4666 {
4667 struct ieee80211com *ic = &sc->sc_ic;
4668 struct iwm_host_cmd hcmd = {
4669 .id = IWM_SCAN_REQUEST_CMD,
4670 .len = { 0, },
4671 .data = { sc->sc_scan_cmd, },
4672 .flags = IWM_CMD_SYNC,
4673 .dataflags = { IWM_HCMD_DFL_NOCOPY, },
4674 };
4675 struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
4676 int is_assoc = 0;
4677 int ret;
4678 uint32_t status;
4679 int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
4680
4681 //lockdep_assert_held(&mvm->mutex);
4682
4683 sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
4684
4685 DPRINTF(("Handling ieee80211 scan request\n"));
4686 memset(cmd, 0, sc->sc_scan_cmd_len);
4687
4688 cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
4689 cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
4690 cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
4691 cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
4692 cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
4693 cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
4694 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
4695 IWM_MAC_FILTER_IN_BEACON);
4696
4697 cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
4698 cmd->repeats = htole32(1);
4699
4700 /*
4701 * If the user asked for passive scan, don't change to active scan if
4702 * you see any activity on the channel - remain passive.
4703 */
4704 if (n_ssids > 0) {
4705 cmd->passive2active = htole16(1);
4706 cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4707 #if 0
4708 if (basic_ssid) {
4709 ssid = req->ssids[0].ssid;
4710 ssid_len = req->ssids[0].ssid_len;
4711 }
4712 #endif
4713 } else {
4714 cmd->passive2active = 0;
4715 cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4716 }
4717
4718 cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4719 IWM_TX_CMD_FLG_BT_DIS);
4720 cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
4721 cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4722 cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
4723
4724 cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
4725 (struct ieee80211_frame *)cmd->data,
4726 ic->ic_myaddr, n_ssids, ssid, ssid_len,
4727 NULL, 0, sc->sc_capa_max_probe_len));
4728
4729 cmd->channel_count
4730 = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
4731
4732 cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
4733 le16toh(cmd->tx_cmd.len) +
4734 (cmd->channel_count * sizeof(struct iwm_scan_channel)));
4735 hcmd.len[0] = le16toh(cmd->len);
4736
4737 status = IWM_SCAN_RESPONSE_OK;
4738 ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
4739 if (!ret && status == IWM_SCAN_RESPONSE_OK) {
4740 DPRINTF(("Scan request was sent successfully\n"));
4741 } else {
4742 /*
4743 * If the scan failed, it usually means that the FW was unable
4744 * to allocate the time events. Warn on it, but maybe we
4745 * should try to send the command again with different params.
4746 */
4747 sc->sc_scanband = 0;
4748 ret = EIO;
4749 }
4750 return ret;
4751 }
4752
4753 /*
4754 * END mvm/scan.c
4755 */
4756
4757 /*
4758 * BEGIN mvm/mac-ctxt.c
4759 */
4760
4761 static void
4762 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
4763 int *cck_rates, int *ofdm_rates)
4764 {
4765 struct ieee80211_node *ni = &in->in_ni;
4766 int lowest_present_ofdm = 100;
4767 int lowest_present_cck = 100;
4768 uint8_t cck = 0;
4769 uint8_t ofdm = 0;
4770 int i;
4771
4772 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
4773 for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
4774 cck |= (1 << i);
4775 if (lowest_present_cck > i)
4776 lowest_present_cck = i;
4777 }
4778 }
4779 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
4780 int adj = i - IWM_FIRST_OFDM_RATE;
4781 ofdm |= (1 << adj);
4782 if (lowest_present_ofdm > i)
4783 lowest_present_ofdm = i;
4784 }
4785
4786 /*
4787 * Now we've got the basic rates as bitmaps in the ofdm and cck
4788 * variables. This isn't sufficient though, as there might not
4789 * be all the right rates in the bitmap. E.g. if the only basic
4790 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
4791 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
4792 *
4793 * [...] a STA responding to a received frame shall transmit
4794 * its Control Response frame [...] at the highest rate in the
4795 * BSSBasicRateSet parameter that is less than or equal to the
4796 * rate of the immediately previous frame in the frame exchange
4797 * sequence ([...]) and that is of the same modulation class
4798 * ([...]) as the received frame. If no rate contained in the
4799 * BSSBasicRateSet parameter meets these conditions, then the
4800 * control frame sent in response to a received frame shall be
4801 * transmitted at the highest mandatory rate of the PHY that is
4802 * less than or equal to the rate of the received frame, and
4803 * that is of the same modulation class as the received frame.
4804 *
4805 * As a consequence, we need to add all mandatory rates that are
4806 * lower than all of the basic rates to these bitmaps.
4807 */
4808
4809 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
4810 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
4811 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
4812 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
4813 /* 6M already there or needed so always add */
4814 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
4815
4816 /*
4817 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
4818 * Note, however:
4819 * - if no CCK rates are basic, it must be ERP since there must
4820 * be some basic rates at all, so they're OFDM => ERP PHY
4821 * (or we're in 5 GHz, and the cck bitmap will never be used)
4822 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
4823 * - if 5.5M is basic, 1M and 2M are mandatory
4824 * - if 2M is basic, 1M is mandatory
4825 * - if 1M is basic, that's the only valid ACK rate.
4826 * As a consequence, it's not as complicated as it sounds, just add
4827 * any lower rates to the ACK rate bitmap.
4828 */
4829 if (IWM_RATE_11M_INDEX < lowest_present_cck)
4830 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
4831 if (IWM_RATE_5M_INDEX < lowest_present_cck)
4832 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
4833 if (IWM_RATE_2M_INDEX < lowest_present_cck)
4834 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
4835 /* 1M already there or needed so always add */
4836 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
4837
4838 *cck_rates = cck;
4839 *ofdm_rates = ofdm;
4840 }
4841
4842 static void
4843 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
4844 struct iwm_mac_ctx_cmd *cmd, uint32_t action)
4845 {
4846 struct ieee80211com *ic = &sc->sc_ic;
4847 struct ieee80211_node *ni = ic->ic_bss;
4848 int cck_ack_rates, ofdm_ack_rates;
4849 int i;
4850
4851 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4852 in->in_color));
4853 cmd->action = htole32(action);
4854
4855 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
4856 cmd->tsf_id = htole32(in->in_tsfid);
4857
4858 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
4859 if (in->in_assoc) {
4860 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
4861 } else {
4862 memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
4863 }
4864 iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
4865 cmd->cck_rates = htole32(cck_ack_rates);
4866 cmd->ofdm_rates = htole32(ofdm_ack_rates);
4867
4868 cmd->cck_short_preamble
4869 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4870 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
4871 cmd->short_slot
4872 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
4873 ? IWM_MAC_FLG_SHORT_SLOT : 0);
4874
4875 for (i = 0; i < IWM_AC_NUM+1; i++) {
4876 int txf = i;
4877
4878 cmd->ac[txf].cw_min = htole16(0x0f);
4879 cmd->ac[txf].cw_max = htole16(0x3f);
4880 cmd->ac[txf].aifsn = 1;
4881 cmd->ac[txf].fifos_mask = (1 << txf);
4882 cmd->ac[txf].edca_txop = 0;
4883 }
4884
4885 if (ic->ic_flags & IEEE80211_F_USEPROT)
4886 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
4887
4888 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
4889 }
4890
4891 static int
4892 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
4893 {
4894 int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
4895 sizeof(*cmd), cmd);
4896 if (ret)
4897 DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
4898 DEVNAME(sc), le32toh(cmd->action), ret));
4899 return ret;
4900 }
4901
4902 /*
4903 * Fill the specific data for mac context of type station or p2p client
4904 */
4905 static void
4906 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
4907 struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
4908 {
4909 struct ieee80211_node *ni = &in->in_ni;
4910 unsigned dtim_period, dtim_count;
4911
4912 dtim_period = ni->ni_dtim_period;
4913 dtim_count = ni->ni_dtim_count;
4914
4915 /* We need the dtim_period to set the MAC as associated */
4916 if (in->in_assoc && dtim_period && !force_assoc_off) {
4917 uint64_t tsf;
4918 uint32_t dtim_offs;
4919
4920 /*
4921 * The DTIM count counts down, so when it is N that means N
4922 * more beacon intervals happen until the DTIM TBTT. Therefore
4923 * add this to the current time. If that ends up being in the
4924 * future, the firmware will handle it.
4925 *
4926 * Also note that the system_timestamp (which we get here as
4927 * "sync_device_ts") and TSF timestamp aren't at exactly the
4928 * same offset in the frame -- the TSF is at the first symbol
4929 * of the TSF, the system timestamp is at signal acquisition
4930 * time. This means there's an offset between them of at most
4931 * a few hundred microseconds (24 * 8 bits + PLCP time gives
4932 * 384us in the longest case), this is currently not relevant
4933 * as the firmware wakes up around 2ms before the TBTT.
4934 */
4935 dtim_offs = dtim_count * ni->ni_intval;
4936 /* convert TU to usecs */
4937 dtim_offs *= 1024;
4938
4939 tsf = ni->ni_tstamp.tsf;
4940
4941 ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
4942 ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
4943
4944 DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
4945 (long long)le64toh(ctxt_sta->dtim_tsf),
4946 le32toh(ctxt_sta->dtim_time), dtim_offs));
4947
4948 ctxt_sta->is_assoc = htole32(1);
4949 } else {
4950 ctxt_sta->is_assoc = htole32(0);
4951 }
4952
4953 ctxt_sta->bi = htole32(ni->ni_intval);
4954 ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
4955 ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
4956 ctxt_sta->dtim_reciprocal =
4957 htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
4958
4959 /* 10 = CONN_MAX_LISTEN_INTERVAL */
4960 ctxt_sta->listen_interval = htole32(10);
4961 ctxt_sta->assoc_id = htole32(ni->ni_associd);
4962 }
4963
4964 static int
4965 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
4966 uint32_t action)
4967 {
4968 struct iwm_mac_ctx_cmd cmd;
4969
4970 memset(&cmd, 0, sizeof(cmd));
4971
4972 /* Fill the common data for all mac context types */
4973 iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
4974
4975 /* Allow beacons to pass through as long as we are not associated,or we
4976 * do not have dtim period information */
4977 if (!in->in_assoc || !sc->sc_ic.ic_dtim_period)
4978 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
4979 else
4980 cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
4981
4982 /* Fill the data specific for station mode */
4983 iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
4984 &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
4985
4986 return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
4987 }
4988
4989 static int
4990 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4991 {
4992 return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
4993 }
4994
4995 static int
4996 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
4997 {
4998 int ret;
4999
5000 ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
5001 if (ret)
5002 return ret;
5003
5004 return 0;
5005 }
5006
5007 static int
5008 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
5009 {
5010 return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
5011 }
5012
5013 #if 0
5014 static int
5015 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
5016 {
5017 struct iwm_mac_ctx_cmd cmd;
5018 int ret;
5019
5020 if (!in->in_uploaded) {
5021 print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
5022 return EIO;
5023 }
5024
5025 memset(&cmd, 0, sizeof(cmd));
5026
5027 cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5028 in->in_color));
5029 cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
5030
5031 ret = iwm_mvm_send_cmd_pdu(sc,
5032 IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
5033 if (ret) {
5034 aprint_error_dev(sc->sc_dev,
5035 "Failed to remove MAC context: %d\n", ret);
5036 return ret;
5037 }
5038 in->in_uploaded = 0;
5039
5040 return 0;
5041 }
5042 #endif
5043
5044 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
5045
5046 static void
5047 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
5048 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5049 {
5050 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5051
5052 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5053 le32toh(mb->mac_id),
5054 le32toh(mb->consec_missed_beacons),
5055 le32toh(mb->consec_missed_beacons_since_last_rx),
5056 le32toh(mb->num_recvd_beacons),
5057 le32toh(mb->num_expected_beacons)));
5058
5059 /*
5060 * TODO: the threshold should be adjusted based on latency conditions,
5061 * and/or in case of a CS flow on one of the other AP vifs.
5062 */
5063 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5064 IWM_MVM_MISSED_BEACONS_THRESHOLD)
5065 ieee80211_beacon_miss(&sc->sc_ic);
5066 }
5067
5068 /*
5069 * END mvm/mac-ctxt.c
5070 */
5071
5072 /*
5073 * BEGIN mvm/quota.c
5074 */
5075
5076 static int
5077 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5078 {
5079 struct iwm_time_quota_cmd cmd;
5080 int i, idx, ret, num_active_macs, quota, quota_rem;
5081 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5082 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5083 uint16_t id;
5084
5085 memset(&cmd, 0, sizeof(cmd));
5086
5087 /* currently, PHY ID == binding ID */
5088 if (in) {
5089 id = in->in_phyctxt->id;
5090 KASSERT(id < IWM_MAX_BINDINGS);
5091 colors[id] = in->in_phyctxt->color;
5092
5093 if (1)
5094 n_ifs[id] = 1;
5095 }
5096
5097 /*
5098 * The FW's scheduling session consists of
5099 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
5100 * equally between all the bindings that require quota
5101 */
5102 num_active_macs = 0;
5103 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5104 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5105 num_active_macs += n_ifs[i];
5106 }
5107
5108 quota = 0;
5109 quota_rem = 0;
5110 if (num_active_macs) {
5111 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
5112 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
5113 }
5114
5115 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5116 if (colors[i] < 0)
5117 continue;
5118
5119 cmd.quotas[idx].id_and_color =
5120 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5121
5122 if (n_ifs[i] <= 0) {
5123 cmd.quotas[idx].quota = htole32(0);
5124 cmd.quotas[idx].max_duration = htole32(0);
5125 } else {
5126 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5127 cmd.quotas[idx].max_duration = htole32(0);
5128 }
5129 idx++;
5130 }
5131
5132 /* Give the remainder of the session to the first binding */
5133 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5134
5135 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
5136 sizeof(cmd), &cmd);
5137 if (ret)
5138 DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
5139 return ret;
5140 }
5141
5142 /*
5143 * END mvm/quota.c
5144 */
5145
5146 /*
5147 * aieee80211 routines
5148 */
5149
5150 /*
5151 * Change to AUTH state in 80211 state machine. Roughly matches what
5152 * Linux does in bss_info_changed().
5153 */
5154 static int
5155 iwm_auth(struct iwm_softc *sc)
5156 {
5157 struct ieee80211com *ic = &sc->sc_ic;
5158 struct iwm_node *in = (void *)ic->ic_bss;
5159 uint32_t duration;
5160 uint32_t min_duration;
5161 int error;
5162
5163 in->in_assoc = 0;
5164
5165 if ((error = iwm_allow_mcast(sc)) != 0)
5166 return error;
5167
5168 if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
5169 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5170 return error;
5171 }
5172
5173 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
5174 in->in_ni.ni_chan, 1, 1)) != 0) {
5175 DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
5176 return error;
5177 }
5178 in->in_phyctxt = &sc->sc_phyctxt[0];
5179
5180 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
5181 DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
5182 return error;
5183 }
5184
5185 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
5186 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5187 return error;
5188 }
5189
5190 /* a bit superfluous? */
5191 while (sc->sc_auth_prot)
5192 tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
5193 sc->sc_auth_prot = 1;
5194
5195 duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
5196 200 + in->in_ni.ni_intval);
5197 min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
5198 100 + in->in_ni.ni_intval);
5199 iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
5200
5201 while (sc->sc_auth_prot != 2) {
5202 /*
5203 * well, meh, but if the kernel is sleeping for half a
5204 * second, we have bigger problems
5205 */
5206 if (sc->sc_auth_prot == 0) {
5207 DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
5208 return ETIMEDOUT;
5209 } else if (sc->sc_auth_prot == -1) {
5210 DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
5211 sc->sc_auth_prot = 0;
5212 return EAUTH;
5213 }
5214 tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
5215 }
5216
5217 return 0;
5218 }
5219
5220 static int
5221 iwm_assoc(struct iwm_softc *sc)
5222 {
5223 struct ieee80211com *ic = &sc->sc_ic;
5224 struct iwm_node *in = (void *)ic->ic_bss;
5225 int error;
5226
5227 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
5228 DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
5229 return error;
5230 }
5231
5232 in->in_assoc = 1;
5233 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5234 DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
5235 return error;
5236 }
5237
5238 return 0;
5239 }
5240
5241 static int
5242 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
5243 {
5244 /*
5245 * Ok, so *technically* the proper set of calls for going
5246 * from RUN back to SCAN is:
5247 *
5248 * iwm_mvm_power_mac_disable(sc, in);
5249 * iwm_mvm_mac_ctxt_changed(sc, in);
5250 * iwm_mvm_rm_sta(sc, in);
5251 * iwm_mvm_update_quotas(sc, NULL);
5252 * iwm_mvm_mac_ctxt_changed(sc, in);
5253 * iwm_mvm_binding_remove_vif(sc, in);
5254 * iwm_mvm_mac_ctxt_remove(sc, in);
5255 *
5256 * However, that freezes the device not matter which permutations
5257 * and modifications are attempted. Obviously, this driver is missing
5258 * something since it works in the Linux driver, but figuring out what
5259 * is missing is a little more complicated. Now, since we're going
5260 * back to nothing anyway, we'll just do a complete device reset.
5261 * Up your's, device!
5262 */
5263 //iwm_mvm_flush_tx_path(sc, 0xf, 1);
5264 iwm_stop_device(sc);
5265 iwm_init_hw(sc);
5266 if (in)
5267 in->in_assoc = 0;
5268 return 0;
5269
5270 #if 0
5271 int error;
5272
5273 iwm_mvm_power_mac_disable(sc, in);
5274
5275 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5276 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
5277 error);
5278 return error;
5279 }
5280
5281 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
5282 aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
5283 return error;
5284 }
5285 error = iwm_mvm_rm_sta(sc, in);
5286 in->in_assoc = 0;
5287 iwm_mvm_update_quotas(sc, NULL);
5288 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5289 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
5290 error);
5291 return error;
5292 }
5293 iwm_mvm_binding_remove_vif(sc, in);
5294
5295 iwm_mvm_mac_ctxt_remove(sc, in);
5296
5297 return error;
5298 #endif
5299 }
5300
5301
5302 static struct ieee80211_node *
5303 iwm_node_alloc(struct ieee80211_node_table *nt)
5304 {
5305 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5306 }
5307
5308 static void
5309 iwm_calib_timeout(void *arg)
5310 {
5311 struct iwm_softc *sc = arg;
5312 struct ieee80211com *ic = &sc->sc_ic;
5313 int s;
5314
5315 s = splnet();
5316 if (ic->ic_fixed_rate == -1
5317 && ic->ic_opmode == IEEE80211_M_STA
5318 && ic->ic_bss) {
5319 struct iwm_node *in = (void *)ic->ic_bss;
5320 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5321 }
5322 splx(s);
5323
5324 callout_schedule(&sc->sc_calib_to, hz/2);
5325 }
5326
5327 static void
5328 iwm_setrates(struct iwm_node *in)
5329 {
5330 struct ieee80211_node *ni = &in->in_ni;
5331 struct ieee80211com *ic = ni->ni_ic;
5332 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5333 struct iwm_lq_cmd *lq = &in->in_lq;
5334 int nrates = ni->ni_rates.rs_nrates;
5335 int i, ridx, tab = 0;
5336 int txant = 0;
5337
5338 if (nrates > __arraycount(lq->rs_table) ||
5339 nrates > IEEE80211_RATE_MAXSIZE) {
5340 DPRINTF(("%s: node supports %d rates, driver handles only "
5341 "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
5342 return;
5343 }
5344
5345 /* first figure out which rates we should support */
5346 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
5347 for (i = 0; i < nrates; i++) {
5348 int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
5349
5350 /* Map 802.11 rate to HW rate index. */
5351 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5352 if (iwm_rates[ridx].rate == rate)
5353 break;
5354 if (ridx > IWM_RIDX_MAX)
5355 DPRINTF(("%s: WARNING: device rate for %d not found!\n",
5356 DEVNAME(sc), rate));
5357 else
5358 in->in_ridx[i] = ridx;
5359 }
5360
5361 /* then construct a lq_cmd based on those */
5362 memset(lq, 0, sizeof(*lq));
5363 lq->sta_id = IWM_STATION_ID;
5364
5365 /*
5366 * are these used? (we don't do SISO or MIMO)
5367 * need to set them to non-zero, though, or we get an error.
5368 */
5369 lq->single_stream_ant_msk = 1;
5370 lq->dual_stream_ant_msk = 1;
5371
5372 /*
5373 * Build the actual rate selection table.
5374 * The lowest bits are the rates. Additionally,
5375 * CCK needs bit 9 to be set. The rest of the bits
5376 * we add to the table select the tx antenna
5377 * Note that we add the rates in the highest rate first
5378 * (opposite of ni_rates).
5379 */
5380 for (i = 0; i < nrates; i++) {
5381 int nextant;
5382
5383 if (txant == 0)
5384 txant = IWM_FW_VALID_TX_ANT(sc);
5385 nextant = 1<<(ffs(txant)-1);
5386 txant &= ~nextant;
5387
5388 ridx = in->in_ridx[(nrates-1)-i];
5389 tab = iwm_rates[ridx].plcp;
5390 tab |= nextant << IWM_RATE_MCS_ANT_POS;
5391 if (IWM_RIDX_IS_CCK(ridx))
5392 tab |= IWM_RATE_MCS_CCK_MSK;
5393 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5394 lq->rs_table[i] = htole32(tab);
5395 }
5396 /* then fill the rest with the lowest possible rate */
5397 for (i = nrates; i < __arraycount(lq->rs_table); i++) {
5398 KASSERT(tab != 0);
5399 lq->rs_table[i] = htole32(tab);
5400 }
5401
5402 /* init amrr */
5403 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5404 /* Start at lowest available bit-rate, AMRR will raise. */
5405 ni->ni_txrate = 0;
5406 }
5407
5408 static int
5409 iwm_media_change(struct ifnet *ifp)
5410 {
5411 struct iwm_softc *sc = ifp->if_softc;
5412 struct ieee80211com *ic = &sc->sc_ic;
5413 uint8_t rate, ridx;
5414 int error;
5415
5416 error = ieee80211_media_change(ifp);
5417 if (error != ENETRESET)
5418 return error;
5419
5420 if (ic->ic_fixed_rate != -1) {
5421 rate = ic->ic_sup_rates[ic->ic_curmode].
5422 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5423 /* Map 802.11 rate to HW rate index. */
5424 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5425 if (iwm_rates[ridx].rate == rate)
5426 break;
5427 sc->sc_fixed_ridx = ridx;
5428 }
5429
5430 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5431 (IFF_UP | IFF_RUNNING)) {
5432 iwm_stop(ifp, 0);
5433 error = iwm_init(ifp);
5434 }
5435 return error;
5436 }
5437
5438 static void
5439 iwm_newstate_cb(struct work *wk, void *v)
5440 {
5441 struct iwm_softc *sc = v;
5442 struct ieee80211com *ic = &sc->sc_ic;
5443 struct iwm_newstate_state *iwmns = (void *)wk;
5444 enum ieee80211_state nstate = iwmns->ns_nstate;
5445 int generation = iwmns->ns_generation;
5446 struct iwm_node *in;
5447 int arg = iwmns->ns_arg;
5448 int error;
5449
5450 kmem_free(iwmns, sizeof(*iwmns));
5451
5452 DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
5453 if (sc->sc_generation != generation) {
5454 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5455 if (nstate == IEEE80211_S_INIT) {
5456 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5457 sc->sc_newstate(ic, nstate, arg);
5458 }
5459 return;
5460 }
5461
5462 DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
5463
5464 /* disable beacon filtering if we're hopping out of RUN */
5465 if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
5466 iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
5467
5468 if (((in = (void *)ic->ic_bss) != NULL))
5469 in->in_assoc = 0;
5470 iwm_release(sc, NULL);
5471
5472 /*
5473 * It's impossible to directly go RUN->SCAN. If we iwm_release()
5474 * above then the card will be completely reinitialized,
5475 * so the driver must do everything necessary to bring the card
5476 * from INIT to SCAN.
5477 *
5478 * Additionally, upon receiving deauth frame from AP,
5479 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
5480 * state. This will also fail with this driver, so bring the FSM
5481 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
5482 */
5483 if (nstate == IEEE80211_S_SCAN ||
5484 nstate == IEEE80211_S_AUTH ||
5485 nstate == IEEE80211_S_ASSOC) {
5486 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5487 sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
5488 DPRINTF(("Going INIT->SCAN\n"));
5489 nstate = IEEE80211_S_SCAN;
5490 }
5491 }
5492
5493 switch (nstate) {
5494 case IEEE80211_S_INIT:
5495 sc->sc_scanband = 0;
5496 break;
5497
5498 case IEEE80211_S_SCAN:
5499 if (sc->sc_scanband)
5500 break;
5501
5502 if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
5503 ic->ic_des_esslen != 0,
5504 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5505 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5506 return;
5507 }
5508 ic->ic_state = nstate;
5509 return;
5510
5511 case IEEE80211_S_AUTH:
5512 if ((error = iwm_auth(sc)) != 0) {
5513 DPRINTF(("%s: could not move to auth state: %d\n",
5514 DEVNAME(sc), error));
5515 return;
5516 }
5517
5518 break;
5519
5520 case IEEE80211_S_ASSOC:
5521 if ((error = iwm_assoc(sc)) != 0) {
5522 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5523 error));
5524 return;
5525 }
5526 break;
5527
5528 case IEEE80211_S_RUN: {
5529 struct iwm_host_cmd cmd = {
5530 .id = IWM_LQ_CMD,
5531 .len = { sizeof(in->in_lq), },
5532 .flags = IWM_CMD_SYNC,
5533 };
5534
5535 in = (struct iwm_node *)ic->ic_bss;
5536 iwm_mvm_power_mac_update_mode(sc, in);
5537 iwm_mvm_enable_beacon_filter(sc, in);
5538 iwm_mvm_update_quotas(sc, in);
5539 iwm_setrates(in);
5540
5541 cmd.data[0] = &in->in_lq;
5542 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
5543 DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
5544 }
5545
5546 callout_schedule(&sc->sc_calib_to, hz/2);
5547
5548 break; }
5549
5550 default:
5551 DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
5552 break;
5553 }
5554
5555 sc->sc_newstate(ic, nstate, arg);
5556 }
5557
5558 static int
5559 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5560 {
5561 struct iwm_newstate_state *iwmns;
5562 struct ifnet *ifp = IC2IFP(ic);
5563 struct iwm_softc *sc = ifp->if_softc;
5564
5565 callout_stop(&sc->sc_calib_to);
5566
5567 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5568 if (!iwmns) {
5569 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5570 return ENOMEM;
5571 }
5572
5573 iwmns->ns_nstate = nstate;
5574 iwmns->ns_arg = arg;
5575 iwmns->ns_generation = sc->sc_generation;
5576
5577 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5578
5579 return 0;
5580 }
5581
5582 static void
5583 iwm_endscan_cb(struct work *work __unused, void *arg)
5584 {
5585 struct iwm_softc *sc = arg;
5586 struct ieee80211com *ic = &sc->sc_ic;
5587 int done;
5588
5589 DPRINTF(("scan ended\n"));
5590
5591 if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
5592 sc->sc_nvm.sku_cap_band_52GHz_enable) {
5593 int error;
5594 done = 0;
5595 if ((error = iwm_mvm_scan_request(sc,
5596 IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
5597 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5598 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5599 done = 1;
5600 }
5601 } else {
5602 done = 1;
5603 }
5604
5605 if (done) {
5606 if (!sc->sc_scanband) {
5607 ieee80211_cancel_scan(ic);
5608 } else {
5609 ieee80211_end_scan(ic);
5610 }
5611 sc->sc_scanband = 0;
5612 }
5613 }
5614
5615 static int
5616 iwm_init_hw(struct iwm_softc *sc)
5617 {
5618 struct ieee80211com *ic = &sc->sc_ic;
5619 int error, i, qid;
5620
5621 if ((error = iwm_preinit(sc)) != 0)
5622 return error;
5623
5624 if ((error = iwm_start_hw(sc)) != 0)
5625 return error;
5626
5627 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
5628 return error;
5629 }
5630
5631 /*
5632 * should stop and start HW since that INIT
5633 * image just loaded
5634 */
5635 iwm_stop_device(sc);
5636 if ((error = iwm_start_hw(sc)) != 0) {
5637 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
5638 return error;
5639 }
5640
5641 /* omstart, this time with the regular firmware */
5642 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
5643 if (error) {
5644 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
5645 goto error;
5646 }
5647
5648 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
5649 goto error;
5650
5651 /* Send phy db control command and then phy db calibration*/
5652 if ((error = iwm_send_phy_db_data(sc)) != 0)
5653 goto error;
5654
5655 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
5656 goto error;
5657
5658 /* Add auxiliary station for scanning */
5659 if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
5660 goto error;
5661
5662 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5663 /*
5664 * The channel used here isn't relevant as it's
5665 * going to be overwritten in the other flows.
5666 * For now use the first channel we have.
5667 */
5668 if ((error = iwm_mvm_phy_ctxt_add(sc,
5669 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5670 goto error;
5671 }
5672
5673 error = iwm_mvm_power_update_device(sc);
5674 if (error)
5675 goto error;
5676
5677 /* Mark TX rings as active. */
5678 for (qid = 0; qid < 4; qid++) {
5679 iwm_enable_txq(sc, qid, qid);
5680 }
5681
5682 return 0;
5683
5684 error:
5685 iwm_stop_device(sc);
5686 return error;
5687 }
5688
5689 /* Allow multicast from our BSSID. */
5690 static int
5691 iwm_allow_mcast(struct iwm_softc *sc)
5692 {
5693 struct ieee80211com *ic = &sc->sc_ic;
5694 struct ieee80211_node *ni = ic->ic_bss;
5695 struct iwm_mcast_filter_cmd *cmd;
5696 size_t size;
5697 int error;
5698
5699 size = roundup(sizeof(*cmd), 4);
5700 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
5701 if (cmd == NULL)
5702 return ENOMEM;
5703 cmd->filter_own = 1;
5704 cmd->port_id = 0;
5705 cmd->count = 0;
5706 cmd->pass_all = 1;
5707 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5708
5709 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5710 IWM_CMD_SYNC, size, cmd);
5711 kmem_intr_free(cmd, size);
5712 return error;
5713 }
5714
5715 /*
5716 * ifnet interfaces
5717 */
5718
5719 static int
5720 iwm_init(struct ifnet *ifp)
5721 {
5722 struct iwm_softc *sc = ifp->if_softc;
5723 int error;
5724
5725 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5726 return 0;
5727 }
5728 sc->sc_generation++;
5729 sc->sc_flags &= ~IWM_FLAG_STOPPED;
5730
5731 if ((error = iwm_init_hw(sc)) != 0) {
5732 iwm_stop(ifp, 1);
5733 return error;
5734 }
5735
5736 /*
5737 * Ok, firmware loaded and we are jogging
5738 */
5739
5740 ifp->if_flags &= ~IFF_OACTIVE;
5741 ifp->if_flags |= IFF_RUNNING;
5742
5743 ieee80211_begin_scan(&sc->sc_ic, 0);
5744 sc->sc_flags |= IWM_FLAG_HW_INITED;
5745
5746 return 0;
5747 }
5748
5749 /*
5750 * Dequeue packets from sendq and call send.
5751 * mostly from iwn
5752 */
5753 static void
5754 iwm_start(struct ifnet *ifp)
5755 {
5756 struct iwm_softc *sc = ifp->if_softc;
5757 struct ieee80211com *ic = &sc->sc_ic;
5758 struct ieee80211_node *ni;
5759 struct ether_header *eh;
5760 struct mbuf *m;
5761 int ac;
5762
5763 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
5764 return;
5765
5766 for (;;) {
5767 /* why isn't this done per-queue? */
5768 if (sc->qfullmsk != 0) {
5769 ifp->if_flags |= IFF_OACTIVE;
5770 break;
5771 }
5772
5773 /* need to send management frames even if we're not RUNning */
5774 IF_DEQUEUE(&ic->ic_mgtq, m);
5775 if (m) {
5776 ni = (void *)m->m_pkthdr.rcvif;
5777 ac = 0;
5778 goto sendit;
5779 }
5780 if (ic->ic_state != IEEE80211_S_RUN) {
5781 break;
5782 }
5783
5784 IFQ_DEQUEUE(&ifp->if_snd, m);
5785 if (!m)
5786 break;
5787 if (m->m_len < sizeof (*eh) &&
5788 (m = m_pullup(m, sizeof (*eh))) == NULL) {
5789 ifp->if_oerrors++;
5790 continue;
5791 }
5792 if (ifp->if_bpf != NULL)
5793 bpf_mtap(ifp, m);
5794
5795 eh = mtod(m, struct ether_header *);
5796 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
5797 if (ni == NULL) {
5798 m_freem(m);
5799 ifp->if_oerrors++;
5800 continue;
5801 }
5802 /* classify mbuf so we can find which tx ring to use */
5803 if (ieee80211_classify(ic, m, ni) != 0) {
5804 m_freem(m);
5805 ieee80211_free_node(ni);
5806 ifp->if_oerrors++;
5807 continue;
5808 }
5809
5810 /* No QoS encapsulation for EAPOL frames. */
5811 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
5812 M_WME_GETAC(m) : WME_AC_BE;
5813
5814 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
5815 ieee80211_free_node(ni);
5816 ifp->if_oerrors++;
5817 continue;
5818 }
5819
5820 sendit:
5821 if (ic->ic_rawbpf != NULL)
5822 bpf_mtap3(ic->ic_rawbpf, m);
5823 if (iwm_tx(sc, m, ni, ac) != 0) {
5824 ieee80211_free_node(ni);
5825 ifp->if_oerrors++;
5826 continue;
5827 }
5828
5829 if (ifp->if_flags & IFF_UP) {
5830 sc->sc_tx_timer = 15;
5831 ifp->if_timer = 1;
5832 }
5833 }
5834
5835 return;
5836 }
5837
5838 static void
5839 iwm_stop(struct ifnet *ifp, int disable)
5840 {
5841 struct iwm_softc *sc = ifp->if_softc;
5842 struct ieee80211com *ic = &sc->sc_ic;
5843
5844 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5845 sc->sc_flags |= IWM_FLAG_STOPPED;
5846 sc->sc_generation++;
5847 sc->sc_scanband = 0;
5848 sc->sc_auth_prot = 0;
5849 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5850
5851 if (ic->ic_state != IEEE80211_S_INIT)
5852 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5853
5854 callout_stop(&sc->sc_calib_to);
5855 ifp->if_timer = sc->sc_tx_timer = 0;
5856 iwm_stop_device(sc);
5857 }
5858
5859 static void
5860 iwm_watchdog(struct ifnet *ifp)
5861 {
5862 struct iwm_softc *sc = ifp->if_softc;
5863
5864 ifp->if_timer = 0;
5865 if (sc->sc_tx_timer > 0) {
5866 if (--sc->sc_tx_timer == 0) {
5867 aprint_error_dev(sc->sc_dev, "device timeout\n");
5868 #ifdef IWM_DEBUG
5869 iwm_nic_error(sc);
5870 #endif
5871 ifp->if_flags &= ~IFF_UP;
5872 iwm_stop(ifp, 1);
5873 ifp->if_oerrors++;
5874 return;
5875 }
5876 ifp->if_timer = 1;
5877 }
5878
5879 ieee80211_watchdog(&sc->sc_ic);
5880 }
5881
5882 static int
5883 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
5884 {
5885 struct iwm_softc *sc = ifp->if_softc;
5886 struct ieee80211com *ic = &sc->sc_ic;
5887 const struct sockaddr *sa;
5888 int s, error = 0;
5889
5890 s = splnet();
5891
5892 switch (cmd) {
5893 case SIOCSIFADDR:
5894 ifp->if_flags |= IFF_UP;
5895 /* FALLTHROUGH */
5896 case SIOCSIFFLAGS:
5897 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5898 break;
5899 if (ifp->if_flags & IFF_UP) {
5900 if (!(ifp->if_flags & IFF_RUNNING)) {
5901 if ((error = iwm_init(ifp)) != 0)
5902 ifp->if_flags &= ~IFF_UP;
5903 }
5904 } else {
5905 if (ifp->if_flags & IFF_RUNNING)
5906 iwm_stop(ifp, 1);
5907 }
5908 break;
5909
5910 case SIOCADDMULTI:
5911 case SIOCDELMULTI:
5912 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
5913 error = (cmd == SIOCADDMULTI) ?
5914 ether_addmulti(sa, &sc->sc_ec) :
5915 ether_delmulti(sa, &sc->sc_ec);
5916
5917 if (error == ENETRESET)
5918 error = 0;
5919 break;
5920
5921 default:
5922 error = ieee80211_ioctl(ic, cmd, data);
5923 }
5924
5925 if (error == ENETRESET) {
5926 error = 0;
5927 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5928 (IFF_UP | IFF_RUNNING)) {
5929 iwm_stop(ifp, 0);
5930 error = iwm_init(ifp);
5931 }
5932 }
5933
5934 splx(s);
5935 return error;
5936 }
5937
5938 /*
5939 * The interrupt side of things
5940 */
5941
5942 /*
5943 * error dumping routines are from iwlwifi/mvm/utils.c
5944 */
5945
5946 /*
5947 * Note: This structure is read from the device with IO accesses,
5948 * and the reading already does the endian conversion. As it is
5949 * read with uint32_t-sized accesses, any members with a different size
5950 * need to be ordered correctly though!
5951 */
5952 struct iwm_error_event_table {
5953 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5954 uint32_t error_id; /* type of error */
5955 uint32_t pc; /* program counter */
5956 uint32_t blink1; /* branch link */
5957 uint32_t blink2; /* branch link */
5958 uint32_t ilink1; /* interrupt link */
5959 uint32_t ilink2; /* interrupt link */
5960 uint32_t data1; /* error-specific data */
5961 uint32_t data2; /* error-specific data */
5962 uint32_t data3; /* error-specific data */
5963 uint32_t bcon_time; /* beacon timer */
5964 uint32_t tsf_low; /* network timestamp function timer */
5965 uint32_t tsf_hi; /* network timestamp function timer */
5966 uint32_t gp1; /* GP1 timer register */
5967 uint32_t gp2; /* GP2 timer register */
5968 uint32_t gp3; /* GP3 timer register */
5969 uint32_t ucode_ver; /* uCode version */
5970 uint32_t hw_ver; /* HW Silicon version */
5971 uint32_t brd_ver; /* HW board version */
5972 uint32_t log_pc; /* log program counter */
5973 uint32_t frame_ptr; /* frame pointer */
5974 uint32_t stack_ptr; /* stack pointer */
5975 uint32_t hcmd; /* last host command header */
5976 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5977 * rxtx_flag */
5978 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5979 * host_flag */
5980 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5981 * enc_flag */
5982 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5983 * time_flag */
5984 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5985 * wico interrupt */
5986 uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
5987 uint32_t wait_event; /* wait event() caller address */
5988 uint32_t l2p_control; /* L2pControlField */
5989 uint32_t l2p_duration; /* L2pDurationField */
5990 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5991 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5992 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5993 * (LMPM_PMG_SEL) */
5994 uint32_t u_timestamp; /* indicate when the date and time of the
5995 * compilation */
5996 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5997 } __packed;
5998
5999 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
6000 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
6001
6002 #ifdef IWM_DEBUG
6003 static const struct {
6004 const char *name;
6005 uint8_t num;
6006 } advanced_lookup[] = {
6007 { "NMI_INTERRUPT_WDG", 0x34 },
6008 { "SYSASSERT", 0x35 },
6009 { "UCODE_VERSION_MISMATCH", 0x37 },
6010 { "BAD_COMMAND", 0x38 },
6011 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6012 { "FATAL_ERROR", 0x3D },
6013 { "NMI_TRM_HW_ERR", 0x46 },
6014 { "NMI_INTERRUPT_TRM", 0x4C },
6015 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6016 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6017 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6018 { "NMI_INTERRUPT_HOST", 0x66 },
6019 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6020 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6021 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6022 { "ADVANCED_SYSASSERT", 0 },
6023 };
6024
6025 static const char *
6026 iwm_desc_lookup(uint32_t num)
6027 {
6028 int i;
6029
6030 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6031 if (advanced_lookup[i].num == num)
6032 return advanced_lookup[i].name;
6033
6034 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6035 return advanced_lookup[i].name;
6036 }
6037
6038 /*
6039 * Support for dumping the error log seemed like a good idea ...
6040 * but it's mostly hex junk and the only sensible thing is the
6041 * hw/ucode revision (which we know anyway). Since it's here,
6042 * I'll just leave it in, just in case e.g. the Intel guys want to
6043 * help us decipher some "ADVANCED_SYSASSERT" later.
6044 */
6045 static void
6046 iwm_nic_error(struct iwm_softc *sc)
6047 {
6048 struct iwm_error_event_table table;
6049 uint32_t base;
6050
6051 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6052 base = sc->sc_uc.uc_error_event_table;
6053 if (base < 0x800000 || base >= 0x80C000) {
6054 aprint_error_dev(sc->sc_dev,
6055 "Not valid error log pointer 0x%08x\n", base);
6056 return;
6057 }
6058
6059 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
6060 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6061 return;
6062 }
6063
6064 if (!table.valid) {
6065 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6066 return;
6067 }
6068
6069 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6070 aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
6071 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6072 sc->sc_flags, table.valid);
6073 }
6074
6075 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
6076 iwm_desc_lookup(table.error_id));
6077 aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
6078 aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
6079 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
6080 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
6081 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
6082 aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
6083 aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
6084 aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
6085 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
6086 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
6087 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
6088 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
6089 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
6090 aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
6091 aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
6092 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
6093 aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
6094 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
6095 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
6096 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
6097 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
6098 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
6099 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
6100 aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
6101 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
6102 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
6103 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
6104 table.l2p_duration);
6105 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
6106 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6107 table.l2p_addr_match);
6108 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
6109 table.lmpm_pmg_sel);
6110 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
6111 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
6112 table.flow_handler);
6113 }
6114 #endif
6115
6116 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6117 do { \
6118 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6119 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6120 _var_ = (void *)((_pkt_)+1); \
6121 } while (/*CONSTCOND*/0)
6122
6123 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6124 do { \
6125 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6126 sizeof(len), BUS_DMASYNC_POSTREAD); \
6127 _ptr_ = (void *)((_pkt_)+1); \
6128 } while (/*CONSTCOND*/0)
6129
6130 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6131
6132 /*
6133 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
6134 * Basic structure from if_iwn
6135 */
6136 static void
6137 iwm_notif_intr(struct iwm_softc *sc)
6138 {
6139 uint16_t hw;
6140
6141 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6142 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6143
6144 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6145 while (sc->rxq.cur != hw) {
6146 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6147 struct iwm_rx_packet *pkt, tmppkt;
6148 struct iwm_cmd_response *cresp;
6149 int qid, idx;
6150
6151 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6152 BUS_DMASYNC_POSTREAD);
6153 pkt = mtod(data->m, struct iwm_rx_packet *);
6154
6155 qid = pkt->hdr.qid & ~0x80;
6156 idx = pkt->hdr.idx;
6157
6158 DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
6159 pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
6160 pkt->hdr.code, sc->rxq.cur, hw));
6161
6162 /*
6163 * randomly get these from the firmware, no idea why.
6164 * they at least seem harmless, so just ignore them for now
6165 */
6166 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6167 || pkt->len_n_flags == htole32(0x55550000))) {
6168 ADVANCE_RXQ(sc);
6169 continue;
6170 }
6171
6172 switch (pkt->hdr.code) {
6173 case IWM_REPLY_RX_PHY_CMD:
6174 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
6175 break;
6176
6177 case IWM_REPLY_RX_MPDU_CMD:
6178 tmppkt = *pkt; // XXX m is freed by ieee80211_input()
6179 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
6180 pkt = &tmppkt;
6181 break;
6182
6183 case IWM_TX_CMD:
6184 iwm_mvm_rx_tx_cmd(sc, pkt, data);
6185 break;
6186
6187 case IWM_MISSED_BEACONS_NOTIFICATION:
6188 iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
6189 break;
6190
6191 case IWM_MVM_ALIVE: {
6192 struct iwm_mvm_alive_resp *resp;
6193 SYNC_RESP_STRUCT(resp, pkt);
6194
6195 sc->sc_uc.uc_error_event_table
6196 = le32toh(resp->error_event_table_ptr);
6197 sc->sc_uc.uc_log_event_table
6198 = le32toh(resp->log_event_table_ptr);
6199 sc->sched_base = le32toh(resp->scd_base_ptr);
6200 sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
6201
6202 sc->sc_uc.uc_intr = 1;
6203 wakeup(&sc->sc_uc);
6204 break; }
6205
6206 case IWM_CALIB_RES_NOTIF_PHY_DB: {
6207 struct iwm_calib_res_notif_phy_db *phy_db_notif;
6208 SYNC_RESP_STRUCT(phy_db_notif, pkt);
6209
6210 uint16_t size = le16toh(phy_db_notif->length);
6211 bus_dmamap_sync(sc->sc_dmat, data->map,
6212 sizeof(*pkt) + sizeof(*phy_db_notif),
6213 size, BUS_DMASYNC_POSTREAD);
6214 iwm_phy_db_set_section(sc, phy_db_notif, size);
6215
6216 break; }
6217
6218 case IWM_STATISTICS_NOTIFICATION: {
6219 struct iwm_notif_statistics *stats;
6220 SYNC_RESP_STRUCT(stats, pkt);
6221 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6222 sc->sc_noise = iwm_get_noise(&stats->rx.general);
6223 break; }
6224
6225 case IWM_NVM_ACCESS_CMD:
6226 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6227 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6228 sizeof(sc->sc_cmd_resp),
6229 BUS_DMASYNC_POSTREAD);
6230 memcpy(sc->sc_cmd_resp,
6231 pkt, sizeof(sc->sc_cmd_resp));
6232 }
6233 break;
6234
6235 case IWM_PHY_CONFIGURATION_CMD:
6236 case IWM_TX_ANT_CONFIGURATION_CMD:
6237 case IWM_ADD_STA:
6238 case IWM_MAC_CONTEXT_CMD:
6239 case IWM_REPLY_SF_CFG_CMD:
6240 case IWM_POWER_TABLE_CMD:
6241 case IWM_PHY_CONTEXT_CMD:
6242 case IWM_BINDING_CONTEXT_CMD:
6243 case IWM_TIME_EVENT_CMD:
6244 case IWM_SCAN_REQUEST_CMD:
6245 case IWM_REPLY_BEACON_FILTERING_CMD:
6246 case IWM_MAC_PM_POWER_TABLE:
6247 case IWM_TIME_QUOTA_CMD:
6248 case IWM_REMOVE_STA:
6249 case IWM_TXPATH_FLUSH:
6250 case IWM_LQ_CMD:
6251 SYNC_RESP_STRUCT(cresp, pkt);
6252 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6253 memcpy(sc->sc_cmd_resp,
6254 pkt, sizeof(*pkt)+sizeof(*cresp));
6255 }
6256 break;
6257
6258 /* ignore */
6259 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
6260 break;
6261
6262 case IWM_INIT_COMPLETE_NOTIF:
6263 sc->sc_init_complete = 1;
6264 wakeup(&sc->sc_init_complete);
6265 break;
6266
6267 case IWM_SCAN_COMPLETE_NOTIFICATION: {
6268 struct iwm_scan_complete_notif *notif;
6269 SYNC_RESP_STRUCT(notif, pkt);
6270
6271 workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6272 break; }
6273
6274 case IWM_REPLY_ERROR: {
6275 struct iwm_error_resp *resp;
6276 SYNC_RESP_STRUCT(resp, pkt);
6277
6278 aprint_error_dev(sc->sc_dev,
6279 "firmware error 0x%x, cmd 0x%x\n",
6280 le32toh(resp->error_type), resp->cmd_id);
6281 break; }
6282
6283 case IWM_TIME_EVENT_NOTIFICATION: {
6284 struct iwm_time_event_notif *notif;
6285 SYNC_RESP_STRUCT(notif, pkt);
6286
6287 if (notif->status) {
6288 if (le32toh(notif->action) &
6289 IWM_TE_V2_NOTIF_HOST_EVENT_START)
6290 sc->sc_auth_prot = 2;
6291 else
6292 sc->sc_auth_prot = 0;
6293 } else {
6294 sc->sc_auth_prot = -1;
6295 }
6296 wakeup(&sc->sc_auth_prot);
6297 break; }
6298
6299 case IWM_MCAST_FILTER_CMD:
6300 break;
6301
6302 default:
6303 aprint_error_dev(sc->sc_dev,
6304 "code %02x frame %d/%d %x UNHANDLED "
6305 "(this should not happen)\n",
6306 pkt->hdr.code, qid, idx, pkt->len_n_flags);
6307 break;
6308 }
6309
6310 /*
6311 * Why test bit 0x80? The Linux driver:
6312 *
6313 * There is one exception: uCode sets bit 15 when it
6314 * originates the response/notification, i.e. when the
6315 * response/notification is not a direct response to a
6316 * command sent by the driver. For example, uCode issues
6317 * IWM_REPLY_RX when it sends a received frame to the driver;
6318 * it is not a direct response to any driver command.
6319 *
6320 * Ok, so since when is 7 == 15? Well, the Linux driver
6321 * uses a slightly different format for pkt->hdr, and "qid"
6322 * is actually the upper byte of a two-byte field.
6323 */
6324 if (!(pkt->hdr.qid & (1 << 7))) {
6325 iwm_cmd_done(sc, pkt);
6326 }
6327
6328 ADVANCE_RXQ(sc);
6329 }
6330
6331 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
6332 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6333
6334 /*
6335 * Tell the firmware what we have processed.
6336 * Seems like the hardware gets upset unless we align
6337 * the write by 8??
6338 */
6339 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
6340 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
6341 }
6342
6343 static int
6344 iwm_intr(void *arg)
6345 {
6346 struct iwm_softc *sc = arg;
6347 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6348 int handled = 0;
6349 int r1, r2, rv = 0;
6350 int isperiodic = 0;
6351
6352 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
6353
6354 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
6355 uint32_t *ict = sc->ict_dma.vaddr;
6356 int tmp;
6357
6358 tmp = htole32(ict[sc->ict_cur]);
6359 if (!tmp)
6360 goto out_ena;
6361
6362 /*
6363 * ok, there was something. keep plowing until we have all.
6364 */
6365 r1 = r2 = 0;
6366 while (tmp) {
6367 r1 |= tmp;
6368 ict[sc->ict_cur] = 0;
6369 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
6370 tmp = htole32(ict[sc->ict_cur]);
6371 }
6372
6373 /* this is where the fun begins. don't ask */
6374 if (r1 == 0xffffffff)
6375 r1 = 0;
6376
6377 /* i am not expected to understand this */
6378 if (r1 & 0xc0000)
6379 r1 |= 0x8000;
6380 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
6381 } else {
6382 r1 = IWM_READ(sc, IWM_CSR_INT);
6383 /* "hardware gone" (where, fishing?) */
6384 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
6385 goto out;
6386 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
6387 }
6388 if (r1 == 0 && r2 == 0) {
6389 goto out_ena;
6390 }
6391
6392 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
6393
6394 /* ignored */
6395 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
6396
6397 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
6398 #ifdef IWM_DEBUG
6399 int i;
6400
6401 iwm_nic_error(sc);
6402
6403 /* Dump driver status (TX and RX rings) while we're here. */
6404 DPRINTF(("driver status:\n"));
6405 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
6406 struct iwm_tx_ring *ring = &sc->txq[i];
6407 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
6408 "queued=%-3d\n",
6409 i, ring->qid, ring->cur, ring->queued));
6410 }
6411 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
6412 DPRINTF((" 802.11 state %d\n", sc->sc_ic.ic_state));
6413 #endif
6414
6415 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
6416 ifp->if_flags &= ~IFF_UP;
6417 iwm_stop(ifp, 1);
6418 rv = 1;
6419 goto out;
6420
6421 }
6422
6423 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
6424 handled |= IWM_CSR_INT_BIT_HW_ERR;
6425 aprint_error_dev(sc->sc_dev,
6426 "hardware error, stopping device\n");
6427 ifp->if_flags &= ~IFF_UP;
6428 iwm_stop(ifp, 1);
6429 rv = 1;
6430 goto out;
6431 }
6432
6433 /* firmware chunk loaded */
6434 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
6435 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
6436 handled |= IWM_CSR_INT_BIT_FH_TX;
6437
6438 sc->sc_fw_chunk_done = 1;
6439 wakeup(&sc->sc_fw);
6440 }
6441
6442 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
6443 handled |= IWM_CSR_INT_BIT_RF_KILL;
6444 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
6445 DPRINTF(("%s: rfkill switch, disabling interface\n",
6446 DEVNAME(sc)));
6447 ifp->if_flags &= ~IFF_UP;
6448 iwm_stop(ifp, 1);
6449 }
6450 }
6451
6452 /*
6453 * The Linux driver uses periodic interrupts to avoid races.
6454 * We cargo-cult like it's going out of fashion.
6455 */
6456 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
6457 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
6458 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
6459 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
6460 IWM_WRITE_1(sc,
6461 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
6462 isperiodic = 1;
6463 }
6464
6465 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
6466 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
6467 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
6468
6469 iwm_notif_intr(sc);
6470
6471 /* enable periodic interrupt, see above */
6472 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
6473 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
6474 IWM_CSR_INT_PERIODIC_ENA);
6475 }
6476
6477 if (__predict_false(r1 & ~handled))
6478 DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
6479 rv = 1;
6480
6481 out_ena:
6482 iwm_restore_interrupts(sc);
6483 out:
6484 return rv;
6485 }
6486
6487 /*
6488 * Autoconf glue-sniffing
6489 */
6490
6491 static const pci_product_id_t iwm_devices[] = {
6492 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
6493 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
6494 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
6495 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
6496 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
6497 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
6498 };
6499
6500 static int
6501 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
6502 {
6503 struct pci_attach_args *pa = aux;
6504
6505 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
6506 return 0;
6507
6508 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
6509 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
6510 return 1;
6511
6512 return 0;
6513 }
6514
6515 static int
6516 iwm_preinit(struct iwm_softc *sc)
6517 {
6518 int error;
6519
6520 if ((error = iwm_prepare_card_hw(sc)) != 0) {
6521 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6522 return error;
6523 }
6524
6525 if (sc->sc_flags & IWM_FLAG_ATTACHED)
6526 return 0;
6527
6528 if ((error = iwm_start_hw(sc)) != 0) {
6529 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6530 return error;
6531 }
6532
6533 error = iwm_run_init_mvm_ucode(sc, 1);
6534 iwm_stop_device(sc);
6535 return error;
6536 }
6537
6538 static void
6539 iwm_attach_hook(device_t dev)
6540 {
6541 struct iwm_softc *sc = device_private(dev);
6542 struct ieee80211com *ic = &sc->sc_ic;
6543 struct ifnet *ifp = &sc->sc_ec.ec_if;
6544
6545 KASSERT(!cold);
6546
6547 if (iwm_preinit(sc) != 0)
6548 return;
6549
6550 sc->sc_flags |= IWM_FLAG_ATTACHED;
6551
6552 aprint_normal_dev(sc->sc_dev,
6553 "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
6554 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6555 IWM_UCODE_MAJOR(sc->sc_fwver),
6556 IWM_UCODE_MINOR(sc->sc_fwver),
6557 IWM_UCODE_API(sc->sc_fwver),
6558 ether_sprintf(sc->sc_nvm.hw_addr));
6559
6560 ic->ic_ifp = ifp;
6561 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6562 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6563 ic->ic_state = IEEE80211_S_INIT;
6564
6565 /* Set device capabilities. */
6566 ic->ic_caps =
6567 IEEE80211_C_WEP | /* WEP */
6568 IEEE80211_C_WPA | /* 802.11i */
6569 IEEE80211_C_SHSLOT | /* short slot time supported */
6570 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
6571
6572 /* not all hardware can do 5GHz band */
6573 if (sc->sc_nvm.sku_cap_band_52GHz_enable)
6574 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
6575 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
6576 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
6577
6578 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
6579 sc->sc_phyctxt[i].id = i;
6580 }
6581
6582 sc->sc_amrr.amrr_min_success_threshold = 1;
6583 sc->sc_amrr.amrr_max_success_threshold = 15;
6584
6585 /* IBSS channel undefined for now. */
6586 ic->ic_ibss_chan = &ic->ic_channels[1];
6587
6588 #if 0
6589 /* Max RSSI */
6590 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6591 #endif
6592
6593 ifp->if_softc = sc;
6594 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
6595 ifp->if_init = iwm_init;
6596 ifp->if_stop = iwm_stop;
6597 ifp->if_ioctl = iwm_ioctl;
6598 ifp->if_start = iwm_start;
6599 ifp->if_watchdog = iwm_watchdog;
6600 IFQ_SET_READY(&ifp->if_snd);
6601 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
6602
6603 if_initialize(ifp);
6604 ieee80211_ifattach(ic);
6605 if_register(ifp);
6606
6607 ic->ic_node_alloc = iwm_node_alloc;
6608
6609 /* Override 802.11 state transition machine. */
6610 sc->sc_newstate = ic->ic_newstate;
6611 ic->ic_newstate = iwm_newstate;
6612 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
6613 ieee80211_announce(ic);
6614
6615 iwm_radiotap_attach(sc);
6616 callout_init(&sc->sc_calib_to, 0);
6617 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
6618
6619 //task_set(&sc->init_task, iwm_init_task, sc);
6620
6621 if (pmf_device_register(dev, NULL, NULL))
6622 pmf_class_network_register(dev, ifp);
6623 else
6624 aprint_error_dev(dev, "couldn't establish power handler\n");
6625 }
6626
6627 static void
6628 iwm_attach(device_t parent, device_t self, void *aux)
6629 {
6630 struct iwm_softc *sc = device_private(self);
6631 struct pci_attach_args *pa = aux;
6632 #ifndef __HAVE_PCI_MSI_MSIX
6633 pci_intr_handle_t ih;
6634 #endif
6635 pcireg_t reg, memtype;
6636 const char *intrstr;
6637 int error;
6638 int txq_i;
6639
6640 sc->sc_dev = self;
6641 sc->sc_pct = pa->pa_pc;
6642 sc->sc_pcitag = pa->pa_tag;
6643 sc->sc_dmat = pa->pa_dmat;
6644 sc->sc_pciid = pa->pa_id;
6645
6646 pci_aprint_devinfo(pa, NULL);
6647
6648 /*
6649 * Get the offset of the PCI Express Capability Structure in PCI
6650 * Configuration Space.
6651 */
6652 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
6653 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
6654 if (error == 0) {
6655 aprint_error_dev(self,
6656 "PCIe capability structure not found!\n");
6657 return;
6658 }
6659
6660 /* Clear device-specific "PCI retry timeout" register (41h). */
6661 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6662 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6663
6664 /* Enable bus-mastering and hardware bug workaround. */
6665 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
6666 reg |= PCI_COMMAND_MASTER_ENABLE;
6667 /* if !MSI */
6668 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
6669 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
6670 }
6671 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
6672
6673 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
6674 error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
6675 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
6676 if (error != 0) {
6677 aprint_error_dev(self, "can't map mem space\n");
6678 return;
6679 }
6680
6681 /* Install interrupt handler. */
6682 #ifdef __HAVE_PCI_MSI_MSIX
6683 error = ENODEV;
6684 if (pci_msi_count(pa) > 0)
6685 error = pci_msi_alloc_exact(pa, &sc->sc_pihp, 1);
6686 if (error != 0) {
6687 if (pci_intx_alloc(pa, &sc->sc_pihp)) {
6688 aprint_error_dev(self, "can't map interrupt\n");
6689 return;
6690 }
6691 }
6692 #else /* !__HAVE_PCI_MSI_MSIX */
6693 if (pci_intr_map(pa, &ih)) {
6694 aprint_error_dev(self, "can't map interrupt\n");
6695 return;
6696 }
6697 #endif /* __HAVE_PCI_MSI_MSIX */
6698
6699 char intrbuf[PCI_INTRSTR_LEN];
6700 #ifdef __HAVE_PCI_MSI_MSIX
6701 intrstr = pci_intr_string(sc->sc_pct, sc->sc_pihp[0], intrbuf,
6702 sizeof(intrbuf));
6703 sc->sc_ih = pci_intr_establish(sc->sc_pct, sc->sc_pihp[0], IPL_NET,
6704 iwm_intr, sc);
6705 #else /* !__HAVE_PCI_MSI_MSIX */
6706 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
6707 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
6708 #endif /* __HAVE_PCI_MSI_MSIX */
6709 if (sc->sc_ih == NULL) {
6710 aprint_error_dev(self, "can't establish interrupt");
6711 if (intrstr != NULL)
6712 aprint_error(" at %s", intrstr);
6713 aprint_error("\n");
6714 return;
6715 }
6716 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
6717
6718 sc->sc_wantresp = -1;
6719
6720 switch (PCI_PRODUCT(sc->sc_pciid)) {
6721 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
6722 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
6723 sc->sc_fwname = "iwlwifi-7260-9.ucode";
6724 sc->host_interrupt_operation_mode = 1;
6725 break;
6726 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
6727 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
6728 sc->sc_fwname = "iwlwifi-3160-9.ucode";
6729 sc->host_interrupt_operation_mode = 1;
6730 break;
6731 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
6732 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
6733 sc->sc_fwname = "iwlwifi-7265-9.ucode";
6734 sc->host_interrupt_operation_mode = 0;
6735 break;
6736 default:
6737 aprint_error_dev(self, "unknown product %#x",
6738 PCI_PRODUCT(sc->sc_pciid));
6739 return;
6740 }
6741 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
6742 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6743
6744 /*
6745 * We now start fiddling with the hardware
6746 */
6747
6748 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6749 if (iwm_prepare_card_hw(sc) != 0) {
6750 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6751 return;
6752 }
6753
6754 /* Allocate DMA memory for firmware transfers. */
6755 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6756 aprint_error_dev(sc->sc_dev,
6757 "could not allocate memory for firmware\n");
6758 return;
6759 }
6760
6761 /* Allocate "Keep Warm" page. */
6762 if ((error = iwm_alloc_kw(sc)) != 0) {
6763 aprint_error_dev(sc->sc_dev,
6764 "could not allocate keep warm page\n");
6765 goto fail1;
6766 }
6767
6768 /* We use ICT interrupts */
6769 if ((error = iwm_alloc_ict(sc)) != 0) {
6770 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
6771 goto fail2;
6772 }
6773
6774 /* Allocate TX scheduler "rings". */
6775 if ((error = iwm_alloc_sched(sc)) != 0) {
6776 aprint_error_dev(sc->sc_dev,
6777 "could not allocate TX scheduler rings\n");
6778 goto fail3;
6779 }
6780
6781 /* Allocate TX rings */
6782 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
6783 if ((error = iwm_alloc_tx_ring(sc,
6784 &sc->txq[txq_i], txq_i)) != 0) {
6785 aprint_error_dev(sc->sc_dev,
6786 "could not allocate TX ring %d\n", txq_i);
6787 goto fail4;
6788 }
6789 }
6790
6791 /* Allocate RX ring. */
6792 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6793 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
6794 goto fail4;
6795 }
6796
6797 workqueue_create(&sc->sc_eswq, "iwmes",
6798 iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
6799 workqueue_create(&sc->sc_nswq, "iwmns",
6800 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
6801
6802 /* Clear pending interrupts. */
6803 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6804
6805 /*
6806 * We can't do normal attach before the file system is mounted
6807 * because we cannot read the MAC address without loading the
6808 * firmware from disk. So we postpone until mountroot is done.
6809 * Notably, this will require a full driver unload/load cycle
6810 * (or reboot) in case the firmware is not present when the
6811 * hook runs.
6812 */
6813 config_mountroot(self, iwm_attach_hook);
6814
6815 return;
6816
6817 /* Free allocated memory if something failed during attachment. */
6818 fail4: while (--txq_i >= 0)
6819 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
6820 iwm_free_sched(sc);
6821 fail3: if (sc->ict_dma.vaddr != NULL)
6822 iwm_free_ict(sc);
6823 fail2: iwm_free_kw(sc);
6824 fail1: iwm_free_fwmem(sc);
6825 }
6826
6827 /*
6828 * Attach the interface to 802.11 radiotap.
6829 */
6830 void
6831 iwm_radiotap_attach(struct iwm_softc *sc)
6832 {
6833 struct ifnet *ifp = sc->sc_ic.ic_ifp;
6834
6835 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
6836 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
6837 &sc->sc_drvbpf);
6838
6839 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
6840 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
6841 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
6842
6843 sc->sc_txtap_len = sizeof sc->sc_txtapu;
6844 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
6845 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
6846 }
6847
6848 #if 0
6849 static void
6850 iwm_init_task(void *arg1)
6851 {
6852 struct iwm_softc *sc = arg1;
6853 struct ifnet *ifp = &sc->sc_ic.ic_if;
6854 int s;
6855
6856 s = splnet();
6857 while (sc->sc_flags & IWM_FLAG_BUSY)
6858 tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
6859 sc->sc_flags |= IWM_FLAG_BUSY;
6860
6861 iwm_stop(ifp, 0);
6862 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
6863 iwm_init(ifp);
6864
6865 sc->sc_flags &= ~IWM_FLAG_BUSY;
6866 wakeup(&sc->sc_flags);
6867 splx(s);
6868 }
6869
6870 static void
6871 iwm_wakeup(struct iwm_softc *sc)
6872 {
6873 pcireg_t reg;
6874
6875 /* Clear device-specific "PCI retry timeout" register (41h). */
6876 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6877 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6878
6879 iwm_init_task(sc);
6880 }
6881
6882 static int
6883 iwm_activate(device_t self, enum devact act)
6884 {
6885 struct iwm_softc *sc = device_private(self);
6886 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6887
6888 switch (act) {
6889 case DVACT_DEACTIVATE:
6890 if (ifp->if_flags & IFF_RUNNING)
6891 iwm_stop(ifp, 0);
6892 return 0;
6893 default:
6894 return EOPNOTSUPP;
6895 }
6896 }
6897 #endif
6898
6899 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
6900 NULL, NULL);
6901
6902 #ifdef IWM_DEBUG
6903 SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
6904 {
6905 const struct sysctlnode *rnode, *cnode;
6906 int rc;
6907
6908 if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
6909 CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
6910 SYSCTL_DESCR("iwm global controls"),
6911 NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
6912 goto err;
6913
6914 /* control debugging printfs */
6915 if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
6916 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
6917 "debug", SYSCTL_DESCR("Enable debugging output"),
6918 NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
6919 goto err;
6920
6921 return;
6922
6923 err:
6924 aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
6925 }
6926 #endif /* IWM_DEBUG */
6927