if_iwm.c revision 1.13 1 /* $NetBSD: if_iwm.c,v 1.13 2015/02/24 02:26:15 nonaka Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.18 2015/02/11 01:12:42 brad Exp */
3
4 /*
5 * Copyright (c) 2014 genua mbh <info (at) genua.de>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /*-
22 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
23 * which were used as the reference documentation for this implementation.
24 *
25 * Driver version we are currently based off of is
26 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 *
28 ***********************************************************************
29 *
30 * This file is provided under a dual BSD/GPLv2 license. When using or
31 * redistributing this file, you may do so under either license.
32 *
33 * GPL LICENSE SUMMARY
34 *
35 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * All rights reserved.
63 *
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
66 * are met:
67 *
68 * * Redistributions of source code must retain the above copyright
69 * notice, this list of conditions and the following disclaimer.
70 * * Redistributions in binary form must reproduce the above copyright
71 * notice, this list of conditions and the following disclaimer in
72 * the documentation and/or other materials provided with the
73 * distribution.
74 * * Neither the name Intel Corporation nor the names of its
75 * contributors may be used to endorse or promote products derived
76 * from this software without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
79 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
80 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
81 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
82 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
83 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
84 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
85 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
86 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
87 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
88 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89 */
90
91 /*-
92 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
93 *
94 * Permission to use, copy, modify, and distribute this software for any
95 * purpose with or without fee is hereby granted, provided that the above
96 * copyright notice and this permission notice appear in all copies.
97 *
98 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
99 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
100 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
101 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
102 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
103 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
104 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 */
106
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.13 2015/02/24 02:26:15 nonaka Exp $");
109
110 #include <sys/param.h>
111 #include <sys/conf.h>
112 #include <sys/kernel.h>
113 #include <sys/kmem.h>
114 #include <sys/mbuf.h>
115 #include <sys/mutex.h>
116 #include <sys/proc.h>
117 #include <sys/socket.h>
118 #include <sys/sockio.h>
119 #include <sys/systm.h>
120
121 #include <sys/cpu.h>
122 #include <sys/bus.h>
123 #include <sys/workqueue.h>
124 #include <machine/endian.h>
125 #include <machine/intr.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 #include <dev/firmload.h>
131
132 #include <net/bpf.h>
133 #include <net/if.h>
134 #include <net/if_arp.h>
135 #include <net/if_dl.h>
136 #include <net/if_media.h>
137 #include <net/if_types.h>
138 #include <net/if_ether.h>
139
140 #include <netinet/in.h>
141 #include <netinet/in_systm.h>
142 #include <netinet/ip.h>
143
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147
148 #define DEVNAME(_s) device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
150
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 1;
158 #else
159 #define DPRINTF(x) do { ; } while (0)
160 #define DPRINTFN(n, x) do { ; } while (0)
161 #endif
162
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165
166 static const uint8_t iwm_nvm_channels[] = {
167 /* 2.4 GHz */
168 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 /* 5 GHz */
170 36, 40, 44 , 48, 52, 56, 60, 64,
171 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 149, 153, 157, 161, 165
173 };
174 #define IWM_NUM_2GHZ_CHANNELS 14
175
176 /* It looks like 11a TX is broken, unfortunately. */
177 #define IWM_NO_5GHZ 1
178
179 static const struct iwm_rate {
180 uint8_t rate;
181 uint8_t plcp;
182 } iwm_rates[] = {
183 { 2, IWM_RATE_1M_PLCP },
184 { 4, IWM_RATE_2M_PLCP },
185 { 11, IWM_RATE_5M_PLCP },
186 { 22, IWM_RATE_11M_PLCP },
187 { 12, IWM_RATE_6M_PLCP },
188 { 18, IWM_RATE_9M_PLCP },
189 { 24, IWM_RATE_12M_PLCP },
190 { 36, IWM_RATE_18M_PLCP },
191 { 48, IWM_RATE_24M_PLCP },
192 { 72, IWM_RATE_36M_PLCP },
193 { 96, IWM_RATE_48M_PLCP },
194 { 108, IWM_RATE_54M_PLCP },
195 };
196 #define IWM_RIDX_CCK 0
197 #define IWM_RIDX_OFDM 4
198 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
199 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
200 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
201
202 struct iwm_newstate_state {
203 struct work ns_wk;
204 enum ieee80211_state ns_nstate;
205 int ns_arg;
206 int ns_generation;
207 };
208
209 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
210 static int iwm_firmware_store_section(struct iwm_softc *,
211 enum iwm_ucode_type, uint8_t *, size_t);
212 static int iwm_set_default_calib(struct iwm_softc *, const void *);
213 static int iwm_read_firmware(struct iwm_softc *);
214 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
215 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
216 #ifdef IWM_DEBUG
217 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
218 #endif
219 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
220 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
221 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
222 static int iwm_nic_lock(struct iwm_softc *);
223 static void iwm_nic_unlock(struct iwm_softc *);
224 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
225 uint32_t);
226 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
227 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
228 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
229 bus_size_t, bus_size_t);
230 static void iwm_dma_contig_free(struct iwm_dma_info *);
231 static int iwm_alloc_fwmem(struct iwm_softc *);
232 static void iwm_free_fwmem(struct iwm_softc *);
233 static int iwm_alloc_sched(struct iwm_softc *);
234 static void iwm_free_sched(struct iwm_softc *);
235 static int iwm_alloc_kw(struct iwm_softc *);
236 static void iwm_free_kw(struct iwm_softc *);
237 static int iwm_alloc_ict(struct iwm_softc *);
238 static void iwm_free_ict(struct iwm_softc *);
239 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
240 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
241 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
242 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
243 int);
244 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
245 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
246 static void iwm_enable_rfkill_int(struct iwm_softc *);
247 static int iwm_check_rfkill(struct iwm_softc *);
248 static void iwm_enable_interrupts(struct iwm_softc *);
249 static void iwm_restore_interrupts(struct iwm_softc *);
250 static void iwm_disable_interrupts(struct iwm_softc *);
251 static void iwm_ict_reset(struct iwm_softc *);
252 static int iwm_set_hw_ready(struct iwm_softc *);
253 static int iwm_prepare_card_hw(struct iwm_softc *);
254 static void iwm_apm_config(struct iwm_softc *);
255 static int iwm_apm_init(struct iwm_softc *);
256 static void iwm_apm_stop(struct iwm_softc *);
257 static int iwm_allow_mcast(struct iwm_softc *);
258 static int iwm_start_hw(struct iwm_softc *);
259 static void iwm_stop_device(struct iwm_softc *);
260 static void iwm_set_pwr(struct iwm_softc *);
261 static void iwm_mvm_nic_config(struct iwm_softc *);
262 static int iwm_nic_rx_init(struct iwm_softc *);
263 static int iwm_nic_tx_init(struct iwm_softc *);
264 static int iwm_nic_init(struct iwm_softc *);
265 static void iwm_enable_txq(struct iwm_softc *, int, int);
266 static int iwm_post_alive(struct iwm_softc *);
267 static int iwm_is_valid_channel(uint16_t);
268 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
269 static uint16_t iwm_channel_id_to_papd(uint16_t);
270 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
271 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
272 uint8_t **, uint16_t *, uint16_t);
273 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
274 void *);
275 static int iwm_send_phy_db_data(struct iwm_softc *);
276 static int iwm_send_phy_db_data(struct iwm_softc *);
277 static void iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
278 struct iwm_time_event_cmd_v1 *);
279 static int iwm_mvm_send_time_event_cmd(struct iwm_softc *,
280 const struct iwm_time_event_cmd_v2 *);
281 static int iwm_mvm_time_event_send_add(struct iwm_softc *,
282 struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
283 static void iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
284 uint32_t, uint32_t, uint32_t);
285 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
286 uint16_t, uint8_t *, uint16_t *);
287 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
288 uint16_t *);
289 static void iwm_init_channel_map(struct iwm_softc *,
290 const uint16_t * const);
291 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
292 const uint16_t *, const uint16_t *, uint8_t, uint8_t);
293 static int iwm_nvm_init(struct iwm_softc *);
294 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
295 const uint8_t *, uint32_t);
296 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
297 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
298 static int iwm_fw_alive(struct iwm_softc *, uint32_t);
299 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
300 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
301 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
302 enum iwm_ucode_type);
303 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
304 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
305 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
306 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
307 struct iwm_rx_phy_info *);
308 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
309 struct iwm_rx_packet *, struct iwm_rx_data *);
310 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
311 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
312 struct iwm_rx_data *);
313 static void iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
314 struct iwm_rx_packet *, struct iwm_node *);
315 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
316 struct iwm_rx_data *);
317 static int iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
318 uint32_t);
319 static int iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
320 int);
321 static int iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
322 static void iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
323 struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
324 uint32_t, uint32_t);
325 static void iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
326 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
327 uint8_t, uint8_t);
328 static int iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
329 struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
330 uint32_t);
331 static int iwm_mvm_phy_ctxt_add(struct iwm_softc *,
332 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
333 uint8_t, uint8_t);
334 static int iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
335 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
336 uint8_t, uint8_t);
337 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
338 static int iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
339 uint16_t, const void *);
340 static int iwm_mvm_send_cmd_status(struct iwm_softc *,
341 struct iwm_host_cmd *, uint32_t *);
342 static int iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
343 uint16_t, const void *, uint32_t *);
344 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
345 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
346 #if 0
347 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
348 uint16_t);
349 #endif
350 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
351 struct iwm_node *, struct ieee80211_frame *,
352 struct iwm_tx_cmd *);
353 static int iwm_tx(struct iwm_softc *, struct mbuf *,
354 struct ieee80211_node *, int);
355 static int iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
356 struct iwm_beacon_filter_cmd *);
357 static void iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
358 struct iwm_node *, struct iwm_beacon_filter_cmd *);
359 static int iwm_mvm_update_beacon_abort(struct iwm_softc *,
360 struct iwm_node *, int);
361 static void iwm_mvm_power_log(struct iwm_softc *,
362 struct iwm_mac_power_cmd *);
363 static void iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
364 struct iwm_mac_power_cmd *);
365 static int iwm_mvm_power_mac_update_mode(struct iwm_softc *,
366 struct iwm_node *);
367 static int iwm_mvm_power_update_device(struct iwm_softc *);
368 static int iwm_mvm_enable_beacon_filter(struct iwm_softc *,
369 struct iwm_node *);
370 static int iwm_mvm_disable_beacon_filter(struct iwm_softc *,
371 struct iwm_node *);
372 static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
373 struct iwm_mvm_add_sta_cmd_v5 *);
374 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
375 struct iwm_mvm_add_sta_cmd_v6 *, int *);
376 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
377 int);
378 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
379 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
380 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
381 struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
382 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
383 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
384 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
385 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
386 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
387 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
388 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
389 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
390 static int iwm_mvm_scan_fill_channels(struct iwm_softc *,
391 struct iwm_scan_cmd *, int, int, int);
392 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
393 struct ieee80211_frame *, const uint8_t *, int,
394 const uint8_t *, int, const uint8_t *, int, int);
395 static int iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
396 int);
397 static void iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
398 int *);
399 static void iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
400 struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
401 static int iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
402 struct iwm_mac_ctx_cmd *);
403 static void iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
404 struct iwm_node *, struct iwm_mac_data_sta *, int);
405 static int iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
406 struct iwm_node *, uint32_t);
407 static int iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
408 uint32_t);
409 static int iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
410 static int iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
411 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
412 static int iwm_auth(struct iwm_softc *);
413 static int iwm_assoc(struct iwm_softc *);
414 static int iwm_release(struct iwm_softc *, struct iwm_node *);
415 static void iwm_calib_timeout(void *);
416 static void iwm_setrates(struct iwm_node *);
417 static int iwm_media_change(struct ifnet *);
418 static void iwm_newstate_cb(struct work *, void *);
419 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
420 static void iwm_endscan_cb(struct work *, void *);
421 static int iwm_init_hw(struct iwm_softc *);
422 static int iwm_init(struct ifnet *);
423 static void iwm_start(struct ifnet *);
424 static void iwm_stop(struct ifnet *, int);
425 static void iwm_watchdog(struct ifnet *);
426 static int iwm_ioctl(struct ifnet *, u_long, void *);
427 #ifdef IWM_DEBUG
428 static const char *iwm_desc_lookup(uint32_t);
429 static void iwm_nic_error(struct iwm_softc *);
430 #endif
431 static void iwm_notif_intr(struct iwm_softc *);
432 static int iwm_intr(void *);
433 static int iwm_preinit(struct iwm_softc *);
434 static void iwm_attach_hook(device_t);
435 static void iwm_attach(device_t, device_t, void *);
436 #if 0
437 static void iwm_init_task(void *);
438 static int iwm_activate(device_t, enum devact);
439 static void iwm_wakeup(struct iwm_softc *);
440 #endif
441 static void iwm_radiotap_attach(struct iwm_softc *);
442
443 static int
444 iwm_firmload(struct iwm_softc *sc)
445 {
446 struct iwm_fw_info *fw = &sc->sc_fw;
447 firmware_handle_t fwh;
448 int error;
449
450 /* Open firmware image. */
451 if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
452 aprint_error_dev(sc->sc_dev,
453 "could not get firmware handle %s\n", sc->sc_fwname);
454 return error;
455 }
456
457 fw->fw_rawsize = firmware_get_size(fwh);
458 /*
459 * Well, this is how the Linux driver checks it ....
460 */
461 if (fw->fw_rawsize < sizeof(uint32_t)) {
462 aprint_error_dev(sc->sc_dev,
463 "firmware too short: %zd bytes\n", fw->fw_rawsize);
464 error = EINVAL;
465 goto out;
466 }
467
468 /* some sanity */
469 if (fw->fw_rawsize > IWM_FWMAXSIZE) {
470 aprint_error_dev(sc->sc_dev,
471 "firmware size is ridiculous: %zd bytes\n",
472 fw->fw_rawsize);
473 error = EINVAL;
474 goto out;
475 }
476
477 /* Read the firmware. */
478 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
479 if (fw->fw_rawdata == NULL) {
480 aprint_error_dev(sc->sc_dev,
481 "not enough memory to stock firmware %s\n", sc->sc_fwname);
482 error = ENOMEM;
483 goto out;
484 }
485 error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
486 if (error) {
487 aprint_error_dev(sc->sc_dev,
488 "could not read firmware %s\n", sc->sc_fwname);
489 goto out;
490 }
491
492 out:
493 /* caller will release memory, if necessary */
494
495 firmware_close(fwh);
496 return error;
497 }
498
499 /*
500 * just maintaining status quo.
501 */
502 static void
503 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
504 {
505 struct ieee80211_frame *wh;
506 uint8_t subtype;
507 uint8_t *frm, *efrm;
508
509 wh = mtod(m, struct ieee80211_frame *);
510
511 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
512 return;
513
514 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
515
516 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
517 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
518 return;
519
520 frm = (uint8_t *)(wh + 1);
521 efrm = mtod(m, uint8_t *) + m->m_len;
522
523 frm += 12; /* skip tstamp, bintval and capinfo fields */
524 while (frm < efrm) {
525 if (*frm == IEEE80211_ELEMID_DSPARMS) {
526 #if IEEE80211_CHAN_MAX < 255
527 if (frm[2] <= IEEE80211_CHAN_MAX)
528 #endif
529 ic->ic_curchan = &ic->ic_channels[frm[2]];
530 }
531 frm += frm[1] + 2;
532 }
533 }
534
535 /*
536 * Firmware parser.
537 */
538
539 static int
540 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
541 {
542 struct iwm_fw_cscheme_list *l = (void *)data;
543
544 if (dlen < sizeof(*l) ||
545 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
546 return EINVAL;
547
548 /* we don't actually store anything for now, always use s/w crypto */
549
550 return 0;
551 }
552
553 static int
554 iwm_firmware_store_section(struct iwm_softc *sc,
555 enum iwm_ucode_type type, uint8_t *data, size_t dlen)
556 {
557 struct iwm_fw_sects *fws;
558 struct iwm_fw_onesect *fwone;
559
560 if (type >= IWM_UCODE_TYPE_MAX)
561 return EINVAL;
562 if (dlen < sizeof(uint32_t))
563 return EINVAL;
564
565 fws = &sc->sc_fw.fw_sects[type];
566 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
567 return EINVAL;
568
569 fwone = &fws->fw_sect[fws->fw_count];
570
571 /* first 32bit are device load offset */
572 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
573
574 /* rest is data */
575 fwone->fws_data = data + sizeof(uint32_t);
576 fwone->fws_len = dlen - sizeof(uint32_t);
577
578 /* for freeing the buffer during driver unload */
579 fwone->fws_alloc = data;
580 fwone->fws_allocsize = dlen;
581
582 fws->fw_count++;
583 fws->fw_totlen += fwone->fws_len;
584
585 return 0;
586 }
587
588 /* iwlwifi: iwl-drv.c */
589 struct iwm_tlv_calib_data {
590 uint32_t ucode_type;
591 struct iwm_tlv_calib_ctrl calib;
592 } __packed;
593
594 static int
595 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
596 {
597 const struct iwm_tlv_calib_data *def_calib = data;
598 uint32_t ucode_type = le32toh(def_calib->ucode_type);
599
600 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
601 DPRINTF(("%s: Wrong ucode_type %u for default "
602 "calibration.\n", DEVNAME(sc), ucode_type));
603 return EINVAL;
604 }
605
606 sc->sc_default_calib[ucode_type].flow_trigger =
607 def_calib->calib.flow_trigger;
608 sc->sc_default_calib[ucode_type].event_trigger =
609 def_calib->calib.event_trigger;
610
611 return 0;
612 }
613
614 static int
615 iwm_read_firmware(struct iwm_softc *sc)
616 {
617 struct iwm_fw_info *fw = &sc->sc_fw;
618 struct iwm_tlv_ucode_header *uhdr;
619 struct iwm_ucode_tlv tlv;
620 enum iwm_ucode_tlv_type tlv_type;
621 uint8_t *data;
622 int error, status;
623 size_t len;
624
625 if (fw->fw_status == IWM_FW_STATUS_NONE) {
626 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
627 } else {
628 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
629 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
630 }
631 status = fw->fw_status;
632
633 if (status == IWM_FW_STATUS_DONE)
634 return 0;
635
636 /*
637 * Load firmware into driver memory.
638 * fw_rawdata and fw_rawsize will be set.
639 */
640 error = iwm_firmload(sc);
641 if (error != 0) {
642 aprint_error_dev(sc->sc_dev,
643 "could not read firmware %s (error %d)\n",
644 sc->sc_fwname, error);
645 goto out;
646 }
647
648 /*
649 * Parse firmware contents
650 */
651
652 uhdr = (void *)fw->fw_rawdata;
653 if (*(uint32_t *)fw->fw_rawdata != 0
654 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
655 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
656 sc->sc_fwname);
657 error = EINVAL;
658 goto out;
659 }
660
661 sc->sc_fwver = le32toh(uhdr->ver);
662 data = uhdr->data;
663 len = fw->fw_rawsize - sizeof(*uhdr);
664
665 while (len >= sizeof(tlv)) {
666 size_t tlv_len;
667 void *tlv_data;
668
669 memcpy(&tlv, data, sizeof(tlv));
670 tlv_len = le32toh(tlv.length);
671 tlv_type = le32toh(tlv.type);
672
673 len -= sizeof(tlv);
674 data += sizeof(tlv);
675 tlv_data = data;
676
677 if (len < tlv_len) {
678 aprint_error_dev(sc->sc_dev,
679 "firmware too short: %zu bytes\n", len);
680 error = EINVAL;
681 goto parse_out;
682 }
683
684 switch ((int)tlv_type) {
685 case IWM_UCODE_TLV_PROBE_MAX_LEN:
686 if (tlv_len < sizeof(uint32_t)) {
687 error = EINVAL;
688 goto parse_out;
689 }
690 sc->sc_capa_max_probe_len
691 = le32toh(*(uint32_t *)tlv_data);
692 /* limit it to something sensible */
693 if (sc->sc_capa_max_probe_len > (1<<16)) {
694 DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
695 "ridiculous\n", DEVNAME(sc)));
696 error = EINVAL;
697 goto parse_out;
698 }
699 break;
700 case IWM_UCODE_TLV_PAN:
701 if (tlv_len) {
702 error = EINVAL;
703 goto parse_out;
704 }
705 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
706 break;
707 case IWM_UCODE_TLV_FLAGS:
708 if (tlv_len < sizeof(uint32_t)) {
709 error = EINVAL;
710 goto parse_out;
711 }
712 /*
713 * Apparently there can be many flags, but Linux driver
714 * parses only the first one, and so do we.
715 *
716 * XXX: why does this override IWM_UCODE_TLV_PAN?
717 * Intentional or a bug? Observations from
718 * current firmware file:
719 * 1) TLV_PAN is parsed first
720 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
721 * ==> this resets TLV_PAN to itself... hnnnk
722 */
723 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
724 break;
725 case IWM_UCODE_TLV_CSCHEME:
726 if ((error = iwm_store_cscheme(sc,
727 tlv_data, tlv_len)) != 0)
728 goto parse_out;
729 break;
730 case IWM_UCODE_TLV_NUM_OF_CPU:
731 if (tlv_len != sizeof(uint32_t)) {
732 error = EINVAL;
733 goto parse_out;
734 }
735 if (le32toh(*(uint32_t*)tlv_data) != 1) {
736 DPRINTF(("%s: driver supports "
737 "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
738 error = EINVAL;
739 goto parse_out;
740 }
741 break;
742 case IWM_UCODE_TLV_SEC_RT:
743 if ((error = iwm_firmware_store_section(sc,
744 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
745 goto parse_out;
746 break;
747 case IWM_UCODE_TLV_SEC_INIT:
748 if ((error = iwm_firmware_store_section(sc,
749 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
750 goto parse_out;
751 break;
752 case IWM_UCODE_TLV_SEC_WOWLAN:
753 if ((error = iwm_firmware_store_section(sc,
754 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
755 goto parse_out;
756 break;
757 case IWM_UCODE_TLV_DEF_CALIB:
758 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
759 error = EINVAL;
760 goto parse_out;
761 }
762 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
763 goto parse_out;
764 break;
765 case IWM_UCODE_TLV_PHY_SKU:
766 if (tlv_len != sizeof(uint32_t)) {
767 error = EINVAL;
768 goto parse_out;
769 }
770 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
771 break;
772
773 case IWM_UCODE_TLV_API_CHANGES_SET:
774 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
775 /* ignore, not used by current driver */
776 break;
777
778 default:
779 DPRINTF(("%s: unknown firmware section %d, abort\n",
780 DEVNAME(sc), tlv_type));
781 error = EINVAL;
782 goto parse_out;
783 }
784
785 len -= roundup(tlv_len, 4);
786 data += roundup(tlv_len, 4);
787 }
788
789 KASSERT(error == 0);
790
791 parse_out:
792 if (error) {
793 aprint_error_dev(sc->sc_dev,
794 "firmware parse error, section type %d\n", tlv_type);
795 }
796
797 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
798 aprint_error_dev(sc->sc_dev,
799 "device uses unsupported power ops\n");
800 error = ENOTSUP;
801 }
802
803 out:
804 if (error)
805 fw->fw_status = IWM_FW_STATUS_NONE;
806 else
807 fw->fw_status = IWM_FW_STATUS_DONE;
808 wakeup(&sc->sc_fw);
809
810 if (error) {
811 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
812 fw->fw_rawdata = NULL;
813 }
814 return error;
815 }
816
817 /*
818 * basic device access
819 */
820
821 static uint32_t
822 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
823 {
824 IWM_WRITE(sc,
825 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
826 IWM_BARRIER_READ_WRITE(sc);
827 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
828 }
829
830 static void
831 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
832 {
833 IWM_WRITE(sc,
834 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
835 IWM_BARRIER_WRITE(sc);
836 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
837 }
838
839 #ifdef IWM_DEBUG
840 /* iwlwifi: pcie/trans.c */
841 static int
842 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
843 {
844 int offs, ret = 0;
845 uint32_t *vals = buf;
846
847 if (iwm_nic_lock(sc)) {
848 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
849 for (offs = 0; offs < dwords; offs++)
850 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
851 iwm_nic_unlock(sc);
852 } else {
853 ret = EBUSY;
854 }
855 return ret;
856 }
857 #endif
858
859 /* iwlwifi: pcie/trans.c */
860 static int
861 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
862 {
863 int offs;
864 const uint32_t *vals = buf;
865
866 if (iwm_nic_lock(sc)) {
867 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
868 /* WADDR auto-increments */
869 for (offs = 0; offs < dwords; offs++) {
870 uint32_t val = vals ? vals[offs] : 0;
871 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
872 }
873 iwm_nic_unlock(sc);
874 } else {
875 DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
876 return EBUSY;
877 }
878 return 0;
879 }
880
881 static int
882 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
883 {
884 return iwm_write_mem(sc, addr, &val, 1);
885 }
886
887 static int
888 iwm_poll_bit(struct iwm_softc *sc, int reg,
889 uint32_t bits, uint32_t mask, int timo)
890 {
891 for (;;) {
892 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
893 return 1;
894 }
895 if (timo < 10) {
896 return 0;
897 }
898 timo -= 10;
899 DELAY(10);
900 }
901 }
902
903 static int
904 iwm_nic_lock(struct iwm_softc *sc)
905 {
906 int rv = 0;
907
908 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
909 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
910
911 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
912 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
913 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
914 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
915 rv = 1;
916 } else {
917 /* jolt */
918 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
919 }
920
921 return rv;
922 }
923
924 static void
925 iwm_nic_unlock(struct iwm_softc *sc)
926 {
927 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
928 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
929 }
930
931 static void
932 iwm_set_bits_mask_prph(struct iwm_softc *sc,
933 uint32_t reg, uint32_t bits, uint32_t mask)
934 {
935 uint32_t val;
936
937 /* XXX: no error path? */
938 if (iwm_nic_lock(sc)) {
939 val = iwm_read_prph(sc, reg) & mask;
940 val |= bits;
941 iwm_write_prph(sc, reg, val);
942 iwm_nic_unlock(sc);
943 }
944 }
945
946 static void
947 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
948 {
949 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
950 }
951
952 static void
953 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
954 {
955 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
956 }
957
958 /*
959 * DMA resource routines
960 */
961
962 static int
963 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
964 bus_size_t size, bus_size_t alignment)
965 {
966 int nsegs, error;
967 void *va;
968
969 dma->tag = tag;
970 dma->size = size;
971
972 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
973 &dma->map);
974 if (error != 0)
975 goto fail;
976
977 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
978 BUS_DMA_NOWAIT);
979 if (error != 0)
980 goto fail;
981
982 error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
983 BUS_DMA_NOWAIT);
984 if (error != 0)
985 goto fail;
986 dma->vaddr = va;
987
988 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
989 BUS_DMA_NOWAIT);
990 if (error != 0)
991 goto fail;
992
993 memset(dma->vaddr, 0, size);
994 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
995 dma->paddr = dma->map->dm_segs[0].ds_addr;
996
997 return 0;
998
999 fail: iwm_dma_contig_free(dma);
1000 return error;
1001 }
1002
1003 static void
1004 iwm_dma_contig_free(struct iwm_dma_info *dma)
1005 {
1006 if (dma->map != NULL) {
1007 if (dma->vaddr != NULL) {
1008 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1009 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1010 bus_dmamap_unload(dma->tag, dma->map);
1011 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1012 bus_dmamem_free(dma->tag, &dma->seg, 1);
1013 dma->vaddr = NULL;
1014 }
1015 bus_dmamap_destroy(dma->tag, dma->map);
1016 dma->map = NULL;
1017 }
1018 }
1019
1020 /* fwmem is used to load firmware onto the card */
1021 static int
1022 iwm_alloc_fwmem(struct iwm_softc *sc)
1023 {
1024 /* Must be aligned on a 16-byte boundary. */
1025 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1026 sc->sc_fwdmasegsz, 16);
1027 }
1028
1029 static void
1030 iwm_free_fwmem(struct iwm_softc *sc)
1031 {
1032 iwm_dma_contig_free(&sc->fw_dma);
1033 }
1034
1035 /* tx scheduler rings. not used? */
1036 static int
1037 iwm_alloc_sched(struct iwm_softc *sc)
1038 {
1039 int rv;
1040
1041 /* TX scheduler rings must be aligned on a 1KB boundary. */
1042 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1043 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1044 return rv;
1045 }
1046
1047 static void
1048 iwm_free_sched(struct iwm_softc *sc)
1049 {
1050 iwm_dma_contig_free(&sc->sched_dma);
1051 }
1052
1053 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
1054 static int
1055 iwm_alloc_kw(struct iwm_softc *sc)
1056 {
1057 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1058 }
1059
1060 static void
1061 iwm_free_kw(struct iwm_softc *sc)
1062 {
1063 iwm_dma_contig_free(&sc->kw_dma);
1064 }
1065
1066 /* interrupt cause table */
1067 static int
1068 iwm_alloc_ict(struct iwm_softc *sc)
1069 {
1070 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1071 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1072 }
1073
1074 static void
1075 iwm_free_ict(struct iwm_softc *sc)
1076 {
1077 iwm_dma_contig_free(&sc->ict_dma);
1078 }
1079
1080 static int
1081 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1082 {
1083 bus_size_t size;
1084 int i, error;
1085
1086 ring->cur = 0;
1087
1088 /* Allocate RX descriptors (256-byte aligned). */
1089 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1090 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1091 if (error != 0) {
1092 aprint_error_dev(sc->sc_dev,
1093 "could not allocate RX ring DMA memory\n");
1094 goto fail;
1095 }
1096 ring->desc = ring->desc_dma.vaddr;
1097
1098 /* Allocate RX status area (16-byte aligned). */
1099 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1100 sizeof(*ring->stat), 16);
1101 if (error != 0) {
1102 aprint_error_dev(sc->sc_dev,
1103 "could not allocate RX status DMA memory\n");
1104 goto fail;
1105 }
1106 ring->stat = ring->stat_dma.vaddr;
1107
1108 /*
1109 * Allocate and map RX buffers.
1110 */
1111 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1112 struct iwm_rx_data *data = &ring->data[i];
1113
1114 memset(data, 0, sizeof(*data));
1115 error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1116 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1117 &data->map);
1118 if (error != 0) {
1119 aprint_error_dev(sc->sc_dev,
1120 "could not create RX buf DMA map\n");
1121 goto fail;
1122 }
1123
1124 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1125 goto fail;
1126 }
1127 }
1128 return 0;
1129
1130 fail: iwm_free_rx_ring(sc, ring);
1131 return error;
1132 }
1133
1134 static void
1135 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1136 {
1137 int ntries;
1138
1139 if (iwm_nic_lock(sc)) {
1140 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1141 for (ntries = 0; ntries < 1000; ntries++) {
1142 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1143 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1144 break;
1145 DELAY(10);
1146 }
1147 iwm_nic_unlock(sc);
1148 }
1149 ring->cur = 0;
1150 }
1151
1152 static void
1153 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1154 {
1155 int i;
1156
1157 iwm_dma_contig_free(&ring->desc_dma);
1158 iwm_dma_contig_free(&ring->stat_dma);
1159
1160 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1161 struct iwm_rx_data *data = &ring->data[i];
1162
1163 if (data->m != NULL) {
1164 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1165 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1166 bus_dmamap_unload(sc->sc_dmat, data->map);
1167 m_freem(data->m);
1168 }
1169 if (data->map != NULL)
1170 bus_dmamap_destroy(sc->sc_dmat, data->map);
1171 }
1172 }
1173
1174 static int
1175 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1176 {
1177 bus_addr_t paddr;
1178 bus_size_t size;
1179 int i, error;
1180
1181 ring->qid = qid;
1182 ring->queued = 0;
1183 ring->cur = 0;
1184
1185 /* Allocate TX descriptors (256-byte aligned). */
1186 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1187 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1188 if (error != 0) {
1189 aprint_error_dev(sc->sc_dev,
1190 "could not allocate TX ring DMA memory\n");
1191 goto fail;
1192 }
1193 ring->desc = ring->desc_dma.vaddr;
1194
1195 /*
1196 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1197 * to allocate commands space for other rings.
1198 */
1199 if (qid > IWM_MVM_CMD_QUEUE)
1200 return 0;
1201
1202 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1203 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1204 if (error != 0) {
1205 aprint_error_dev(sc->sc_dev,
1206 "could not allocate TX cmd DMA memory\n");
1207 goto fail;
1208 }
1209 ring->cmd = ring->cmd_dma.vaddr;
1210
1211 paddr = ring->cmd_dma.paddr;
1212 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213 struct iwm_tx_data *data = &ring->data[i];
1214
1215 data->cmd_paddr = paddr;
1216 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1217 + offsetof(struct iwm_tx_cmd, scratch);
1218 paddr += sizeof(struct iwm_device_cmd);
1219
1220 error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE,
1221 IWM_NUM_OF_TBS, IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT,
1222 &data->map);
1223 if (error != 0) {
1224 aprint_error_dev(sc->sc_dev,
1225 "could not create TX buf DMA map\n");
1226 goto fail;
1227 }
1228 }
1229 KASSERT(paddr == ring->cmd_dma.paddr + size);
1230 return 0;
1231
1232 fail: iwm_free_tx_ring(sc, ring);
1233 return error;
1234 }
1235
1236 static void
1237 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1238 {
1239 int i;
1240
1241 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1242 struct iwm_tx_data *data = &ring->data[i];
1243
1244 if (data->m != NULL) {
1245 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1246 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1247 bus_dmamap_unload(sc->sc_dmat, data->map);
1248 m_freem(data->m);
1249 data->m = NULL;
1250 }
1251 }
1252 /* Clear TX descriptors. */
1253 memset(ring->desc, 0, ring->desc_dma.size);
1254 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1255 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1256 sc->qfullmsk &= ~(1 << ring->qid);
1257 ring->queued = 0;
1258 ring->cur = 0;
1259 }
1260
1261 static void
1262 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1263 {
1264 int i;
1265
1266 iwm_dma_contig_free(&ring->desc_dma);
1267 iwm_dma_contig_free(&ring->cmd_dma);
1268
1269 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1270 struct iwm_tx_data *data = &ring->data[i];
1271
1272 if (data->m != NULL) {
1273 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1274 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1275 bus_dmamap_unload(sc->sc_dmat, data->map);
1276 m_freem(data->m);
1277 }
1278 if (data->map != NULL)
1279 bus_dmamap_destroy(sc->sc_dmat, data->map);
1280 }
1281 }
1282
1283 /*
1284 * High-level hardware frobbing routines
1285 */
1286
1287 static void
1288 iwm_enable_rfkill_int(struct iwm_softc *sc)
1289 {
1290 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1291 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1292 }
1293
1294 static int
1295 iwm_check_rfkill(struct iwm_softc *sc)
1296 {
1297 uint32_t v;
1298 int s;
1299 int rv;
1300
1301 s = splnet();
1302
1303 /*
1304 * "documentation" is not really helpful here:
1305 * 27: HW_RF_KILL_SW
1306 * Indicates state of (platform's) hardware RF-Kill switch
1307 *
1308 * But apparently when it's off, it's on ...
1309 */
1310 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1311 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1312 if (rv) {
1313 sc->sc_flags |= IWM_FLAG_RFKILL;
1314 } else {
1315 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1316 }
1317
1318 splx(s);
1319 return rv;
1320 }
1321
1322 static void
1323 iwm_enable_interrupts(struct iwm_softc *sc)
1324 {
1325 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1326 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1327 }
1328
1329 static void
1330 iwm_restore_interrupts(struct iwm_softc *sc)
1331 {
1332 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1333 }
1334
1335 static void
1336 iwm_disable_interrupts(struct iwm_softc *sc)
1337 {
1338 int s = splnet();
1339
1340 /* disable interrupts */
1341 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1342
1343 /* acknowledge all interrupts */
1344 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1345 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1346
1347 splx(s);
1348 }
1349
1350 static void
1351 iwm_ict_reset(struct iwm_softc *sc)
1352 {
1353 iwm_disable_interrupts(sc);
1354
1355 /* Reset ICT table. */
1356 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1357 sc->ict_cur = 0;
1358
1359 /* Set physical address of ICT table (4KB aligned). */
1360 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1361 IWM_CSR_DRAM_INT_TBL_ENABLE
1362 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1363 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1364
1365 /* Switch to ICT interrupt mode in driver. */
1366 sc->sc_flags |= IWM_FLAG_USE_ICT;
1367
1368 /* Re-enable interrupts. */
1369 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1370 iwm_enable_interrupts(sc);
1371 }
1372
1373 #define IWM_HW_READY_TIMEOUT 50
1374 static int
1375 iwm_set_hw_ready(struct iwm_softc *sc)
1376 {
1377 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1378 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1379
1380 return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1381 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1382 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1383 IWM_HW_READY_TIMEOUT);
1384 }
1385 #undef IWM_HW_READY_TIMEOUT
1386
1387 static int
1388 iwm_prepare_card_hw(struct iwm_softc *sc)
1389 {
1390 int rv = 0;
1391 int t = 0;
1392
1393 if (iwm_set_hw_ready(sc))
1394 goto out;
1395
1396 /* If HW is not ready, prepare the conditions to check again */
1397 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1398 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1399
1400 do {
1401 if (iwm_set_hw_ready(sc))
1402 goto out;
1403 DELAY(200);
1404 t += 200;
1405 } while (t < 150000);
1406
1407 rv = ETIMEDOUT;
1408
1409 out:
1410 return rv;
1411 }
1412
1413 static void
1414 iwm_apm_config(struct iwm_softc *sc)
1415 {
1416 pcireg_t reg;
1417
1418 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1419 sc->sc_cap_off + PCIE_LCSR);
1420 if (reg & PCIE_LCSR_ASPM_L1) {
1421 /* Um the Linux driver prints "Disabling L0S for this one ... */
1422 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1423 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1424 } else {
1425 /* ... and "Enabling" here */
1426 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1427 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1428 }
1429 }
1430
1431 /*
1432 * Start up NIC's basic functionality after it has been reset
1433 * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
1434 * NOTE: This does not load uCode nor start the embedded processor
1435 */
1436 static int
1437 iwm_apm_init(struct iwm_softc *sc)
1438 {
1439 int error = 0;
1440
1441 DPRINTF(("iwm apm start\n"));
1442
1443 /* Disable L0S exit timer (platform NMI Work/Around) */
1444 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1445 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1446
1447 /*
1448 * Disable L0s without affecting L1;
1449 * don't wait for ICH L0s (ICH bug W/A)
1450 */
1451 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1452 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1453
1454 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1455 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1456
1457 /*
1458 * Enable HAP INTA (interrupt from management bus) to
1459 * wake device's PCI Express link L1a -> L0s
1460 */
1461 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1462 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1463
1464 iwm_apm_config(sc);
1465
1466 #if 0 /* not for 7k */
1467 /* Configure analog phase-lock-loop before activating to D0A */
1468 if (trans->cfg->base_params->pll_cfg_val)
1469 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1470 trans->cfg->base_params->pll_cfg_val);
1471 #endif
1472
1473 /*
1474 * Set "initialization complete" bit to move adapter from
1475 * D0U* --> D0A* (powered-up active) state.
1476 */
1477 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1478
1479 /*
1480 * Wait for clock stabilization; once stabilized, access to
1481 * device-internal resources is supported, e.g. iwm_write_prph()
1482 * and accesses to uCode SRAM.
1483 */
1484 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1485 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1486 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1487 aprint_error_dev(sc->sc_dev,
1488 "timeout waiting for clock stabilization\n");
1489 goto out;
1490 }
1491
1492 /*
1493 * This is a bit of an abuse - This is needed for 7260 / 3160
1494 * only check host_interrupt_operation_mode even if this is
1495 * not related to host_interrupt_operation_mode.
1496 *
1497 * Enable the oscillator to count wake up time for L1 exit. This
1498 * consumes slightly more power (100uA) - but allows to be sure
1499 * that we wake up from L1 on time.
1500 *
1501 * This looks weird: read twice the same register, discard the
1502 * value, set a bit, and yet again, read that same register
1503 * just to discard the value. But that's the way the hardware
1504 * seems to like it.
1505 */
1506 iwm_read_prph(sc, IWM_OSC_CLK);
1507 iwm_read_prph(sc, IWM_OSC_CLK);
1508 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1509 iwm_read_prph(sc, IWM_OSC_CLK);
1510 iwm_read_prph(sc, IWM_OSC_CLK);
1511
1512 /*
1513 * Enable DMA clock and wait for it to stabilize.
1514 *
1515 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1516 * do not disable clocks. This preserves any hardware bits already
1517 * set by default in "CLK_CTRL_REG" after reset.
1518 */
1519 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1520 //kpause("iwmapm", 0, mstohz(20), NULL);
1521 DELAY(20);
1522
1523 /* Disable L1-Active */
1524 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1525 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1526
1527 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1528 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1529 IWM_APMG_RTC_INT_STT_RFKILL);
1530
1531 out:
1532 if (error)
1533 aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
1534 return error;
1535 }
1536
1537 /* iwlwifi/pcie/trans.c */
1538 static void
1539 iwm_apm_stop(struct iwm_softc *sc)
1540 {
1541 /* stop device's busmaster DMA activity */
1542 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1543
1544 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1545 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1546 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1547 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1548 DPRINTF(("iwm apm stop\n"));
1549 }
1550
1551 /* iwlwifi pcie/trans.c */
1552 static int
1553 iwm_start_hw(struct iwm_softc *sc)
1554 {
1555 int error;
1556
1557 if ((error = iwm_prepare_card_hw(sc)) != 0)
1558 return error;
1559
1560 /* Reset the entire device */
1561 IWM_WRITE(sc, IWM_CSR_RESET,
1562 IWM_CSR_RESET_REG_FLAG_SW_RESET |
1563 IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1564 DELAY(10);
1565
1566 if ((error = iwm_apm_init(sc)) != 0)
1567 return error;
1568
1569 iwm_enable_rfkill_int(sc);
1570 iwm_check_rfkill(sc);
1571
1572 return 0;
1573 }
1574
1575 /* iwlwifi pcie/trans.c */
1576
1577 static void
1578 iwm_stop_device(struct iwm_softc *sc)
1579 {
1580 int chnl, ntries;
1581 int qid;
1582
1583 /* tell the device to stop sending interrupts */
1584 iwm_disable_interrupts(sc);
1585
1586 /* device going down, Stop using ICT table */
1587 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1588
1589 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1590
1591 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1592
1593 /* Stop all DMA channels. */
1594 if (iwm_nic_lock(sc)) {
1595 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1596 IWM_WRITE(sc,
1597 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1598 for (ntries = 0; ntries < 200; ntries++) {
1599 uint32_t r;
1600
1601 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1602 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1603 chnl))
1604 break;
1605 DELAY(20);
1606 }
1607 }
1608 iwm_nic_unlock(sc);
1609 }
1610
1611 /* Stop RX ring. */
1612 iwm_reset_rx_ring(sc, &sc->rxq);
1613
1614 /* Reset all TX rings. */
1615 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1616 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1617
1618 /*
1619 * Power-down device's busmaster DMA clocks
1620 */
1621 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1622 DELAY(5);
1623
1624 /* Make sure (redundant) we've released our request to stay awake */
1625 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1626 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1627
1628 /* Stop the device, and put it in low power state */
1629 iwm_apm_stop(sc);
1630
1631 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1632 * Clean again the interrupt here
1633 */
1634 iwm_disable_interrupts(sc);
1635 /* stop and reset the on-board processor */
1636 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1637
1638 /*
1639 * Even if we stop the HW, we still want the RF kill
1640 * interrupt
1641 */
1642 iwm_enable_rfkill_int(sc);
1643 iwm_check_rfkill(sc);
1644 }
1645
1646 /* iwlwifi pcie/trans.c (always main power) */
1647 static void
1648 iwm_set_pwr(struct iwm_softc *sc)
1649 {
1650 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1651 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1652 }
1653
1654 /* iwlwifi: mvm/ops.c */
1655 static void
1656 iwm_mvm_nic_config(struct iwm_softc *sc)
1657 {
1658 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1659 uint32_t reg_val = 0;
1660
1661 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1662 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1663 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1664 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1665 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1666 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1667
1668 /* SKU control */
1669 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1670 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1671 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1672 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1673
1674 /* radio configuration */
1675 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1676 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1677 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1678
1679 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1680
1681 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1682 radio_cfg_step, radio_cfg_dash));
1683
1684 /*
1685 * W/A : NIC is stuck in a reset state after Early PCIe power off
1686 * (PCIe power is lost before PERST# is asserted), causing ME FW
1687 * to lose ownership and not being able to obtain it back.
1688 */
1689 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1690 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1691 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1692 }
1693
1694 static int
1695 iwm_nic_rx_init(struct iwm_softc *sc)
1696 {
1697 if (!iwm_nic_lock(sc))
1698 return EBUSY;
1699
1700 /*
1701 * Initialize RX ring. This is from the iwn driver.
1702 */
1703 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1704
1705 /* stop DMA */
1706 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1707 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1708 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1709 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1710 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1711
1712 /* Set physical address of RX ring (256-byte aligned). */
1713 IWM_WRITE(sc,
1714 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1715
1716 /* Set physical address of RX status (16-byte aligned). */
1717 IWM_WRITE(sc,
1718 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1719
1720 /* Enable RX. */
1721 /*
1722 * Note: Linux driver also sets this:
1723 * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1724 *
1725 * It causes weird behavior. YMMV.
1726 */
1727 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1728 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1729 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1730 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1731 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1732 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1733
1734 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1735 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1736
1737 /*
1738 * Thus sayeth el jefe (iwlwifi) via a comment:
1739 *
1740 * This value should initially be 0 (before preparing any
1741 * RBs), should be 8 after preparing the first 8 RBs (for example)
1742 */
1743 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1744
1745 iwm_nic_unlock(sc);
1746
1747 return 0;
1748 }
1749
1750 static int
1751 iwm_nic_tx_init(struct iwm_softc *sc)
1752 {
1753 int qid;
1754
1755 if (!iwm_nic_lock(sc))
1756 return EBUSY;
1757
1758 /* Deactivate TX scheduler. */
1759 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1760
1761 /* Set physical address of "keep warm" page (16-byte aligned). */
1762 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1763
1764 /* Initialize TX rings. */
1765 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1766 struct iwm_tx_ring *txq = &sc->txq[qid];
1767
1768 /* Set physical address of TX ring (256-byte aligned). */
1769 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1770 txq->desc_dma.paddr >> 8);
1771 DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1772 qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1773 }
1774 iwm_nic_unlock(sc);
1775
1776 return 0;
1777 }
1778
1779 static int
1780 iwm_nic_init(struct iwm_softc *sc)
1781 {
1782 int error;
1783
1784 iwm_apm_init(sc);
1785 iwm_set_pwr(sc);
1786
1787 iwm_mvm_nic_config(sc);
1788
1789 if ((error = iwm_nic_rx_init(sc)) != 0)
1790 return error;
1791
1792 /*
1793 * Ditto for TX, from iwn
1794 */
1795 if ((error = iwm_nic_tx_init(sc)) != 0)
1796 return error;
1797
1798 DPRINTF(("shadow registers enabled\n"));
1799 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1800
1801 return 0;
1802 }
1803
1804 #if 0
1805 enum iwm_mvm_tx_fifo {
1806 IWM_MVM_TX_FIFO_BK = 0,
1807 IWM_MVM_TX_FIFO_BE,
1808 IWM_MVM_TX_FIFO_VI,
1809 IWM_MVM_TX_FIFO_VO,
1810 IWM_MVM_TX_FIFO_MCAST = 5,
1811 };
1812
1813 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1814 IWM_MVM_TX_FIFO_VO,
1815 IWM_MVM_TX_FIFO_VI,
1816 IWM_MVM_TX_FIFO_BE,
1817 IWM_MVM_TX_FIFO_BK,
1818 };
1819 #endif
1820
1821 static void
1822 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1823 {
1824 if (!iwm_nic_lock(sc)) {
1825 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1826 return; /* XXX return EBUSY */
1827 }
1828
1829 /* unactivate before configuration */
1830 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1831 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1832 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1833
1834 if (qid != IWM_MVM_CMD_QUEUE) {
1835 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1836 }
1837
1838 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1839
1840 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1841 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1842
1843 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1844 /* Set scheduler window size and frame limit. */
1845 iwm_write_mem32(sc,
1846 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1847 sizeof(uint32_t),
1848 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1849 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1850 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1851 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1852
1853 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1854 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1855 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1856 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1857 IWM_SCD_QUEUE_STTS_REG_MSK);
1858
1859 iwm_nic_unlock(sc);
1860
1861 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1862 }
1863
1864 static int
1865 iwm_post_alive(struct iwm_softc *sc)
1866 {
1867 int nwords;
1868 int error, chnl;
1869
1870 if (!iwm_nic_lock(sc))
1871 return EBUSY;
1872
1873 if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1874 DPRINTF(("%s: sched addr mismatch\n", DEVNAME(sc)));
1875 error = EINVAL;
1876 goto out;
1877 }
1878
1879 iwm_ict_reset(sc);
1880
1881 /* Clear TX scheduler state in SRAM. */
1882 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1883 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1884 / sizeof(uint32_t);
1885 error = iwm_write_mem(sc,
1886 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1887 NULL, nwords);
1888 if (error)
1889 goto out;
1890
1891 /* Set physical address of TX scheduler rings (1KB aligned). */
1892 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1893
1894 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1895
1896 /* enable command channel */
1897 iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1898
1899 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1900
1901 /* Enable DMA channels. */
1902 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1903 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1904 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1905 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1906 }
1907
1908 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1909 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1910
1911 /* Enable L1-Active */
1912 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1913 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1914
1915 out:
1916 iwm_nic_unlock(sc);
1917 return error;
1918 }
1919
1920 /*
1921 * PHY db
1922 * iwlwifi/iwl-phy-db.c
1923 */
1924
1925 /*
1926 * BEGIN iwl-phy-db.c
1927 */
1928
1929 enum iwm_phy_db_section_type {
1930 IWM_PHY_DB_CFG = 1,
1931 IWM_PHY_DB_CALIB_NCH,
1932 IWM_PHY_DB_UNUSED,
1933 IWM_PHY_DB_CALIB_CHG_PAPD,
1934 IWM_PHY_DB_CALIB_CHG_TXP,
1935 IWM_PHY_DB_MAX
1936 };
1937
1938 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
1939
1940 /*
1941 * phy db - configure operational ucode
1942 */
1943 struct iwm_phy_db_cmd {
1944 uint16_t type;
1945 uint16_t length;
1946 uint8_t data[];
1947 } __packed;
1948
1949 /* for parsing of tx power channel group data that comes from the firmware*/
1950 struct iwm_phy_db_chg_txp {
1951 uint32_t space;
1952 uint16_t max_channel_idx;
1953 } __packed;
1954
1955 /*
1956 * phy db - Receive phy db chunk after calibrations
1957 */
1958 struct iwm_calib_res_notif_phy_db {
1959 uint16_t type;
1960 uint16_t length;
1961 uint8_t data[];
1962 } __packed;
1963
1964 /*
1965 * get phy db section: returns a pointer to a phy db section specified by
1966 * type and channel group id.
1967 */
1968 static struct iwm_phy_db_entry *
1969 iwm_phy_db_get_section(struct iwm_softc *sc,
1970 enum iwm_phy_db_section_type type, uint16_t chg_id)
1971 {
1972 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1973
1974 if (type >= IWM_PHY_DB_MAX)
1975 return NULL;
1976
1977 switch (type) {
1978 case IWM_PHY_DB_CFG:
1979 return &phy_db->cfg;
1980 case IWM_PHY_DB_CALIB_NCH:
1981 return &phy_db->calib_nch;
1982 case IWM_PHY_DB_CALIB_CHG_PAPD:
1983 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1984 return NULL;
1985 return &phy_db->calib_ch_group_papd[chg_id];
1986 case IWM_PHY_DB_CALIB_CHG_TXP:
1987 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1988 return NULL;
1989 return &phy_db->calib_ch_group_txp[chg_id];
1990 default:
1991 return NULL;
1992 }
1993 return NULL;
1994 }
1995
1996 static int
1997 iwm_phy_db_set_section(struct iwm_softc *sc,
1998 struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
1999 {
2000 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2001 struct iwm_phy_db_entry *entry;
2002 uint16_t chg_id = 0;
2003
2004 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2005 type == IWM_PHY_DB_CALIB_CHG_TXP)
2006 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2007
2008 entry = iwm_phy_db_get_section(sc, type, chg_id);
2009 if (!entry)
2010 return EINVAL;
2011
2012 if (entry->data)
2013 kmem_intr_free(entry->data, entry->size);
2014 entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2015 if (!entry->data) {
2016 entry->size = 0;
2017 return ENOMEM;
2018 }
2019 memcpy(entry->data, phy_db_notif->data, size);
2020 entry->size = size;
2021
2022 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2023 __func__, __LINE__, type, size, entry->data));
2024
2025 return 0;
2026 }
2027
2028 static int
2029 iwm_is_valid_channel(uint16_t ch_id)
2030 {
2031 if (ch_id <= 14 ||
2032 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2033 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2034 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2035 return 1;
2036 return 0;
2037 }
2038
2039 static uint8_t
2040 iwm_ch_id_to_ch_index(uint16_t ch_id)
2041 {
2042 if (!iwm_is_valid_channel(ch_id))
2043 return 0xff;
2044
2045 if (ch_id <= 14)
2046 return ch_id - 1;
2047 if (ch_id <= 64)
2048 return (ch_id + 20) / 4;
2049 if (ch_id <= 140)
2050 return (ch_id - 12) / 4;
2051 return (ch_id - 13) / 4;
2052 }
2053
2054
2055 static uint16_t
2056 iwm_channel_id_to_papd(uint16_t ch_id)
2057 {
2058 if (!iwm_is_valid_channel(ch_id))
2059 return 0xff;
2060
2061 if (1 <= ch_id && ch_id <= 14)
2062 return 0;
2063 if (36 <= ch_id && ch_id <= 64)
2064 return 1;
2065 if (100 <= ch_id && ch_id <= 140)
2066 return 2;
2067 return 3;
2068 }
2069
2070 static uint16_t
2071 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2072 {
2073 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2074 struct iwm_phy_db_chg_txp *txp_chg;
2075 int i;
2076 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2077
2078 if (ch_index == 0xff)
2079 return 0xff;
2080
2081 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2082 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2083 if (!txp_chg)
2084 return 0xff;
2085 /*
2086 * Looking for the first channel group that its max channel is
2087 * higher then wanted channel.
2088 */
2089 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2090 return i;
2091 }
2092 return 0xff;
2093 }
2094
2095 static int
2096 iwm_phy_db_get_section_data(struct iwm_softc *sc,
2097 uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
2098 {
2099 struct iwm_phy_db_entry *entry;
2100 uint16_t ch_group_id = 0;
2101
2102 /* find wanted channel group */
2103 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2104 ch_group_id = iwm_channel_id_to_papd(ch_id);
2105 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2106 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2107
2108 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2109 if (!entry)
2110 return EINVAL;
2111
2112 *data = entry->data;
2113 *size = entry->size;
2114
2115 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2116 __func__, __LINE__, type, *size));
2117
2118 return 0;
2119 }
2120
2121 static int
2122 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
2123 uint16_t length, void *data)
2124 {
2125 struct iwm_phy_db_cmd phy_db_cmd;
2126 struct iwm_host_cmd cmd = {
2127 .id = IWM_PHY_DB_CMD,
2128 .flags = IWM_CMD_SYNC,
2129 };
2130
2131 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2132 type, length));
2133
2134 /* Set phy db cmd variables */
2135 phy_db_cmd.type = le16toh(type);
2136 phy_db_cmd.length = le16toh(length);
2137
2138 /* Set hcmd variables */
2139 cmd.data[0] = &phy_db_cmd;
2140 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2141 cmd.data[1] = data;
2142 cmd.len[1] = length;
2143 cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
2144
2145 return iwm_send_cmd(sc, &cmd);
2146 }
2147
2148 static int
2149 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2150 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2151 {
2152 uint16_t i;
2153 int err;
2154 struct iwm_phy_db_entry *entry;
2155
2156 /* Send all the channel-specific groups to operational fw */
2157 for (i = 0; i < max_ch_groups; i++) {
2158 entry = iwm_phy_db_get_section(sc, type, i);
2159 if (!entry)
2160 return EINVAL;
2161
2162 if (!entry->size)
2163 continue;
2164
2165 /* Send the requested PHY DB section */
2166 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2167 if (err) {
2168 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2169 "err %d\n", DEVNAME(sc), type, i, err));
2170 return err;
2171 }
2172
2173 DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
2174 }
2175
2176 return 0;
2177 }
2178
2179 static int
2180 iwm_send_phy_db_data(struct iwm_softc *sc)
2181 {
2182 uint8_t *data = NULL;
2183 uint16_t size = 0;
2184 int err;
2185
2186 DPRINTF(("Sending phy db data and configuration to runtime image\n"));
2187
2188 /* Send PHY DB CFG section */
2189 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2190 if (err) {
2191 DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
2192 DEVNAME(sc), err));
2193 return err;
2194 }
2195
2196 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2197 if (err) {
2198 DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
2199 DEVNAME(sc), err));
2200 return err;
2201 }
2202
2203 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2204 &data, &size, 0);
2205 if (err) {
2206 DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
2207 "%d\n", DEVNAME(sc), err));
2208 return err;
2209 }
2210
2211 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2212 if (err) {
2213 DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
2214 "sect, %d\n", DEVNAME(sc), err));
2215 return err;
2216 }
2217
2218 /* Send all the TXP channel specific data */
2219 err = iwm_phy_db_send_all_channel_groups(sc,
2220 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2221 if (err) {
2222 DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
2223 DEVNAME(sc), err));
2224 return err;
2225 }
2226
2227 /* Send all the TXP channel specific data */
2228 err = iwm_phy_db_send_all_channel_groups(sc,
2229 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2230 if (err) {
2231 DPRINTF(("%s: Cannot send channel specific TX power groups, "
2232 "%d\n", DEVNAME(sc), err));
2233 return err;
2234 }
2235
2236 DPRINTF(("Finished sending phy db non channel data\n"));
2237 return 0;
2238 }
2239
2240 /*
2241 * END iwl-phy-db.c
2242 */
2243
2244 /*
2245 * BEGIN iwlwifi/mvm/time-event.c
2246 */
2247
2248 /*
2249 * For the high priority TE use a time event type that has similar priority to
2250 * the FW's action scan priority.
2251 */
2252 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2253 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2254
2255 /* used to convert from time event API v2 to v1 */
2256 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2257 IWM_TE_V2_EVENT_SOCIOPATHIC)
2258 static inline uint16_t
2259 iwm_te_v2_get_notify(uint16_t policy)
2260 {
2261 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2262 }
2263
2264 static inline uint16_t
2265 iwm_te_v2_get_dep_policy(uint16_t policy)
2266 {
2267 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2268 IWM_TE_V2_PLACEMENT_POS;
2269 }
2270
2271 static inline uint16_t
2272 iwm_te_v2_get_absence(uint16_t policy)
2273 {
2274 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2275 }
2276
2277 static void
2278 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2279 struct iwm_time_event_cmd_v1 *cmd_v1)
2280 {
2281 cmd_v1->id_and_color = cmd_v2->id_and_color;
2282 cmd_v1->action = cmd_v2->action;
2283 cmd_v1->id = cmd_v2->id;
2284 cmd_v1->apply_time = cmd_v2->apply_time;
2285 cmd_v1->max_delay = cmd_v2->max_delay;
2286 cmd_v1->depends_on = cmd_v2->depends_on;
2287 cmd_v1->interval = cmd_v2->interval;
2288 cmd_v1->duration = cmd_v2->duration;
2289 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2290 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2291 else
2292 cmd_v1->repeat = htole32(cmd_v2->repeat);
2293 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2294 cmd_v1->interval_reciprocal = 0; /* unused */
2295
2296 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2297 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2298 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2299 }
2300
2301 static int
2302 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
2303 const struct iwm_time_event_cmd_v2 *cmd)
2304 {
2305 struct iwm_time_event_cmd_v1 cmd_v1;
2306
2307 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2308 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
2309 IWM_CMD_SYNC, sizeof(*cmd), cmd);
2310
2311 iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
2312 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
2313 sizeof(cmd_v1), &cmd_v1);
2314 }
2315
2316 static int
2317 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
2318 void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
2319 {
2320 int ret;
2321
2322 DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
2323
2324 ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
2325 if (ret) {
2326 DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
2327 DEVNAME(sc), ret));
2328 }
2329
2330 return ret;
2331 }
2332
2333 static void
2334 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2335 uint32_t duration, uint32_t min_duration, uint32_t max_delay)
2336 {
2337 struct iwm_time_event_cmd_v2 time_cmd;
2338
2339 memset(&time_cmd, 0, sizeof(time_cmd));
2340
2341 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2342 time_cmd.id_and_color =
2343 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2344 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2345
2346 time_cmd.apply_time = htole32(iwm_read_prph(sc,
2347 IWM_DEVICE_SYSTEM_TIME_REG));
2348
2349 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2350 time_cmd.max_delay = htole32(max_delay);
2351 /* TODO: why do we need to interval = bi if it is not periodic? */
2352 time_cmd.interval = htole32(1);
2353 time_cmd.duration = htole32(duration);
2354 time_cmd.repeat = 1;
2355 time_cmd.policy
2356 = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2357 IWM_TE_V2_NOTIF_HOST_EVENT_END);
2358
2359 iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
2360 }
2361
2362 /*
2363 * END iwlwifi/mvm/time-event.c
2364 */
2365
2366 /*
2367 * NVM read access and content parsing. We do not support
2368 * external NVM or writing NVM.
2369 * iwlwifi/mvm/nvm.c
2370 */
2371
2372 /* list of NVM sections we are allowed/need to read */
2373 static const int nvm_to_read[] = {
2374 IWM_NVM_SECTION_TYPE_HW,
2375 IWM_NVM_SECTION_TYPE_SW,
2376 IWM_NVM_SECTION_TYPE_CALIBRATION,
2377 IWM_NVM_SECTION_TYPE_PRODUCTION,
2378 };
2379
2380 /* Default NVM size to read */
2381 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2382 #define IWM_MAX_NVM_SECTION_SIZE 7000
2383
2384 #define IWM_NVM_WRITE_OPCODE 1
2385 #define IWM_NVM_READ_OPCODE 0
2386
2387 static int
2388 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
2389 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
2390 {
2391 offset = 0;
2392 struct iwm_nvm_access_cmd nvm_access_cmd = {
2393 .offset = htole16(offset),
2394 .length = htole16(length),
2395 .type = htole16(section),
2396 .op_code = IWM_NVM_READ_OPCODE,
2397 };
2398 struct iwm_nvm_access_resp *nvm_resp;
2399 struct iwm_rx_packet *pkt;
2400 struct iwm_host_cmd cmd = {
2401 .id = IWM_NVM_ACCESS_CMD,
2402 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
2403 IWM_CMD_SEND_IN_RFKILL,
2404 .data = { &nvm_access_cmd, },
2405 };
2406 int ret, bytes_read, offset_read;
2407 uint8_t *resp_data;
2408
2409 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2410
2411 ret = iwm_send_cmd(sc, &cmd);
2412 if (ret)
2413 return ret;
2414
2415 pkt = cmd.resp_pkt;
2416 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2417 DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
2418 DEVNAME(sc), pkt->hdr.flags));
2419 ret = EIO;
2420 goto exit;
2421 }
2422
2423 /* Extract NVM response */
2424 nvm_resp = (void *)pkt->data;
2425
2426 ret = le16toh(nvm_resp->status);
2427 bytes_read = le16toh(nvm_resp->length);
2428 offset_read = le16toh(nvm_resp->offset);
2429 resp_data = nvm_resp->data;
2430 if (ret) {
2431 DPRINTF(("%s: NVM access command failed with status %d\n",
2432 DEVNAME(sc), ret));
2433 ret = EINVAL;
2434 goto exit;
2435 }
2436
2437 if (offset_read != offset) {
2438 DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
2439 DEVNAME(sc), offset_read));
2440 ret = EINVAL;
2441 goto exit;
2442 }
2443
2444 memcpy(data + offset, resp_data, bytes_read);
2445 *len = bytes_read;
2446
2447 exit:
2448 iwm_free_resp(sc, &cmd);
2449 return ret;
2450 }
2451
2452 /*
2453 * Reads an NVM section completely.
2454 * NICs prior to 7000 family doesn't have a real NVM, but just read
2455 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2456 * by uCode, we need to manually check in this case that we don't
2457 * overflow and try to read more than the EEPROM size.
2458 * For 7000 family NICs, we supply the maximal size we can read, and
2459 * the uCode fills the response with as much data as we can,
2460 * without overflowing, so no check is needed.
2461 */
2462 static int
2463 iwm_nvm_read_section(struct iwm_softc *sc,
2464 uint16_t section, uint8_t *data, uint16_t *len)
2465 {
2466 uint16_t length, seglen;
2467 int error;
2468
2469 /* Set nvm section read length */
2470 length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2471 *len = 0;
2472
2473 /* Read the NVM until exhausted (reading less than requested) */
2474 while (seglen == length) {
2475 error = iwm_nvm_read_chunk(sc,
2476 section, *len, length, data, &seglen);
2477 if (error) {
2478 aprint_error_dev(sc->sc_dev,
2479 "Cannot read NVM from section %d offset %d, "
2480 "length %d\n", section, *len, length);
2481 return error;
2482 }
2483 *len += seglen;
2484 }
2485
2486 DPRINTFN(4, ("NVM section %d read completed\n", section));
2487 return 0;
2488 }
2489
2490 /*
2491 * BEGIN IWM_NVM_PARSE
2492 */
2493
2494 /* iwlwifi/iwl-nvm-parse.c */
2495
2496 /* NVM offsets (in words) definitions */
2497 enum wkp_nvm_offsets {
2498 /* NVM HW-Section offset (in words) definitions */
2499 IWM_HW_ADDR = 0x15,
2500
2501 /* NVM SW-Section offset (in words) definitions */
2502 IWM_NVM_SW_SECTION = 0x1C0,
2503 IWM_NVM_VERSION = 0,
2504 IWM_RADIO_CFG = 1,
2505 IWM_SKU = 2,
2506 IWM_N_HW_ADDRS = 3,
2507 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
2508
2509 /* NVM calibration section offset (in words) definitions */
2510 IWM_NVM_CALIB_SECTION = 0x2B8,
2511 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
2512 };
2513
2514 /* SKU Capabilities (actual values from NVM definition) */
2515 enum nvm_sku_bits {
2516 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
2517 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
2518 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
2519 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
2520 };
2521
2522 /* radio config bits (actual values from NVM definition) */
2523 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
2524 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
2525 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
2526 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
2527 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
2528 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2529
2530 #define DEFAULT_MAX_TX_POWER 16
2531
2532 /**
2533 * enum iwm_nvm_channel_flags - channel flags in NVM
2534 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2535 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2536 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2537 * @IWM_NVM_CHANNEL_RADAR: radar detection required
2538 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2539 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2540 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2541 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2542 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2543 */
2544 enum iwm_nvm_channel_flags {
2545 IWM_NVM_CHANNEL_VALID = (1 << 0),
2546 IWM_NVM_CHANNEL_IBSS = (1 << 1),
2547 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2548 IWM_NVM_CHANNEL_RADAR = (1 << 4),
2549 IWM_NVM_CHANNEL_DFS = (1 << 7),
2550 IWM_NVM_CHANNEL_WIDE = (1 << 8),
2551 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2552 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2553 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2554 };
2555
2556 static void
2557 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
2558 {
2559 struct ieee80211com *ic = &sc->sc_ic;
2560 struct iwm_nvm_data *data = &sc->sc_nvm;
2561 int ch_idx;
2562 struct ieee80211_channel *channel;
2563 uint16_t ch_flags;
2564 int is_5ghz;
2565 int flags, hw_value;
2566
2567 for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
2568 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2569
2570 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2571 !data->sku_cap_band_52GHz_enable)
2572 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2573
2574 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2575 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2576 iwm_nvm_channels[ch_idx],
2577 ch_flags,
2578 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2579 "5.2" : "2.4"));
2580 continue;
2581 }
2582
2583 hw_value = iwm_nvm_channels[ch_idx];
2584 channel = &ic->ic_channels[hw_value];
2585
2586 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2587 if (!is_5ghz) {
2588 flags = IEEE80211_CHAN_2GHZ;
2589 channel->ic_flags
2590 = IEEE80211_CHAN_CCK
2591 | IEEE80211_CHAN_OFDM
2592 | IEEE80211_CHAN_DYN
2593 | IEEE80211_CHAN_2GHZ;
2594 } else {
2595 flags = IEEE80211_CHAN_5GHZ;
2596 channel->ic_flags =
2597 IEEE80211_CHAN_A;
2598 }
2599 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2600
2601 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2602 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2603 }
2604 }
2605
2606 static int
2607 iwm_parse_nvm_data(struct iwm_softc *sc,
2608 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2609 const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
2610 {
2611 struct iwm_nvm_data *data = &sc->sc_nvm;
2612 uint8_t hw_addr[ETHER_ADDR_LEN];
2613 uint16_t radio_cfg, sku;
2614
2615 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2616
2617 radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2618 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2619 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2620 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2621 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2622 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
2623 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
2624
2625 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2626 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2627 #ifndef IWM_NO_5GHZ
2628 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2629 #else
2630 data->sku_cap_band_52GHz_enable = 0;
2631 #endif
2632 data->sku_cap_11n_enable = 0;
2633
2634 if (!data->valid_tx_ant || !data->valid_rx_ant) {
2635 DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n", DEVNAME(sc),
2636 data->valid_tx_ant, data->valid_rx_ant));
2637 return EINVAL;
2638 }
2639
2640 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2641
2642 data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
2643 data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
2644
2645 /* The byte order is little endian 16 bit, meaning 214365 */
2646 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2647 data->hw_addr[0] = hw_addr[1];
2648 data->hw_addr[1] = hw_addr[0];
2649 data->hw_addr[2] = hw_addr[3];
2650 data->hw_addr[3] = hw_addr[2];
2651 data->hw_addr[4] = hw_addr[5];
2652 data->hw_addr[5] = hw_addr[4];
2653
2654 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
2655 data->calib_version = 255; /* TODO:
2656 this value will prevent some checks from
2657 failing, we need to check if this
2658 field is still needed, and if it does,
2659 where is it in the NVM */
2660
2661 return 0;
2662 }
2663
2664 /*
2665 * END NVM PARSE
2666 */
2667
2668 struct iwm_nvm_section {
2669 uint16_t length;
2670 const uint8_t *data;
2671 };
2672
2673 #define IWM_FW_VALID_TX_ANT(sc) \
2674 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
2675 >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
2676 #define IWM_FW_VALID_RX_ANT(sc) \
2677 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
2678 >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
2679
2680 static int
2681 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2682 {
2683 const uint16_t *hw, *sw, *calib;
2684
2685 /* Checking for required sections */
2686 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2687 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2688 DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
2689 return ENOENT;
2690 }
2691
2692 hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
2693 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2694 calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2695 return iwm_parse_nvm_data(sc, hw, sw, calib,
2696 IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
2697 }
2698
2699 static int
2700 iwm_nvm_init(struct iwm_softc *sc)
2701 {
2702 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2703 int i, section, error;
2704 uint16_t len;
2705 uint8_t *nvm_buffer, *temp;
2706
2707 /* Read From FW NVM */
2708 DPRINTF(("Read NVM\n"));
2709
2710 /* TODO: find correct NVM max size for a section */
2711 nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
2712 for (i = 0; i < __arraycount(nvm_to_read); i++) {
2713 section = nvm_to_read[i];
2714 KASSERT(section <= __arraycount(nvm_sections));
2715
2716 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
2717 if (error)
2718 break;
2719
2720 temp = kmem_alloc(len, KM_SLEEP);
2721 memcpy(temp, nvm_buffer, len);
2722 nvm_sections[section].data = temp;
2723 nvm_sections[section].length = len;
2724 }
2725 kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
2726 if (error)
2727 return error;
2728
2729 return iwm_parse_nvm_sections(sc, nvm_sections);
2730 }
2731
2732 /*
2733 * Firmware loading gunk. This is kind of a weird hybrid between the
2734 * iwn driver and the Linux iwlwifi driver.
2735 */
2736
2737 static int
2738 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2739 const uint8_t *section, uint32_t byte_cnt)
2740 {
2741 struct iwm_dma_info *dma = &sc->fw_dma;
2742 int error;
2743
2744 /* Copy firmware section into pre-allocated DMA-safe memory. */
2745 memcpy(dma->vaddr, section, byte_cnt);
2746 bus_dmamap_sync(sc->sc_dmat,
2747 dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2748
2749 if (!iwm_nic_lock(sc))
2750 return EBUSY;
2751
2752 sc->sc_fw_chunk_done = 0;
2753
2754 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2755 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2756 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2757 dst_addr);
2758 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2759 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2760 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2761 (iwm_get_dma_hi_addr(dma->paddr)
2762 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2763 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2764 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2765 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2766 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2767 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2768 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2769 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2770 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2771
2772 iwm_nic_unlock(sc);
2773
2774 /* wait 1s for this segment to load */
2775 while (!sc->sc_fw_chunk_done)
2776 if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
2777 break;
2778
2779 return error;
2780 }
2781
2782 static int
2783 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2784 {
2785 struct iwm_fw_sects *fws;
2786 int error, i, w;
2787 void *data;
2788 uint32_t dlen;
2789 uint32_t offset;
2790
2791 sc->sc_uc.uc_intr = 0;
2792
2793 fws = &sc->sc_fw.fw_sects[ucode_type];
2794 for (i = 0; i < fws->fw_count; i++) {
2795 data = fws->fw_sect[i].fws_data;
2796 dlen = fws->fw_sect[i].fws_len;
2797 offset = fws->fw_sect[i].fws_devoff;
2798 DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
2799 ucode_type, offset, dlen));
2800 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2801 if (error) {
2802 DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u "
2803 "returned error %02d\n", i, fws->fw_count, error));
2804 return error;
2805 }
2806 }
2807
2808 /* wait for the firmware to load */
2809 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2810
2811 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2812 error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
2813 }
2814
2815 return error;
2816 }
2817
2818 /* iwlwifi: pcie/trans.c */
2819 static int
2820 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2821 {
2822 int error;
2823
2824 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2825
2826 if ((error = iwm_nic_init(sc)) != 0) {
2827 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
2828 return error;
2829 }
2830
2831 /* make sure rfkill handshake bits are cleared */
2832 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2833 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2834 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2835
2836 /* clear (again), then enable host interrupts */
2837 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2838 iwm_enable_interrupts(sc);
2839
2840 /* really make sure rfkill handshake bits are cleared */
2841 /* maybe we should write a few times more? just to make sure */
2842 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2843 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2844
2845 /* Load the given image to the HW */
2846 error = iwm_load_firmware(sc, ucode_type);
2847 if (error) {
2848 aprint_error_dev(sc->sc_dev, "failed to load firmware: %d\n",
2849 error);
2850 }
2851 return error;
2852 }
2853
2854 static int
2855 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2856 {
2857 return iwm_post_alive(sc);
2858 }
2859
2860 static int
2861 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2862 {
2863 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2864 .valid = htole32(valid_tx_ant),
2865 };
2866
2867 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2868 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2869 }
2870
2871 /* iwlwifi: mvm/fw.c */
2872 static int
2873 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2874 {
2875 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2876 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2877
2878 /* Set parameters */
2879 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2880 phy_cfg_cmd.calib_control.event_trigger =
2881 sc->sc_default_calib[ucode_type].event_trigger;
2882 phy_cfg_cmd.calib_control.flow_trigger =
2883 sc->sc_default_calib[ucode_type].flow_trigger;
2884
2885 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
2886 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2887 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2888 }
2889
2890 static int
2891 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2892 enum iwm_ucode_type ucode_type)
2893 {
2894 enum iwm_ucode_type old_type = sc->sc_uc_current;
2895 int error;
2896
2897 if ((error = iwm_read_firmware(sc)) != 0)
2898 return error;
2899
2900 sc->sc_uc_current = ucode_type;
2901 error = iwm_start_fw(sc, ucode_type);
2902 if (error) {
2903 sc->sc_uc_current = old_type;
2904 return error;
2905 }
2906
2907 return iwm_fw_alive(sc, sc->sched_base);
2908 }
2909
2910 /*
2911 * mvm misc bits
2912 */
2913
2914 /*
2915 * follows iwlwifi/fw.c
2916 */
2917 static int
2918 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2919 {
2920 int error;
2921
2922 /* do not operate with rfkill switch turned on */
2923 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2924 aprint_error_dev(sc->sc_dev,
2925 "radio is disabled by hardware switch\n");
2926 return EPERM;
2927 }
2928
2929 sc->sc_init_complete = 0;
2930 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2931 IWM_UCODE_TYPE_INIT)) != 0)
2932 return error;
2933
2934 if (justnvm) {
2935 if ((error = iwm_nvm_init(sc)) != 0) {
2936 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
2937 return error;
2938 }
2939 memcpy(&sc->sc_ic.ic_myaddr,
2940 &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
2941
2942 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2943 + sc->sc_capa_max_probe_len
2944 + IWM_MAX_NUM_SCAN_CHANNELS
2945 * sizeof(struct iwm_scan_channel);
2946 sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
2947
2948 return 0;
2949 }
2950
2951 /* Send TX valid antennas before triggering calibrations */
2952 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2953 return error;
2954
2955 /*
2956 * Send phy configurations command to init uCode
2957 * to start the 16.0 uCode init image internal calibrations.
2958 */
2959 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2960 DPRINTF(("%s: failed to run internal calibration: %d\n",
2961 DEVNAME(sc), error));
2962 return error;
2963 }
2964
2965 /*
2966 * Nothing to do but wait for the init complete notification
2967 * from the firmware
2968 */
2969 while (!sc->sc_init_complete)
2970 if ((error = tsleep(&sc->sc_init_complete,
2971 0, "iwminit", 2*hz)) != 0)
2972 break;
2973
2974 return error;
2975 }
2976
2977 /*
2978 * receive side
2979 */
2980
2981 /* (re)stock rx ring, called at init-time and at runtime */
2982 static int
2983 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2984 {
2985 struct iwm_rx_ring *ring = &sc->rxq;
2986 struct iwm_rx_data *data = &ring->data[idx];
2987 struct mbuf *m;
2988 int error;
2989 int fatal = 0;
2990
2991 m = m_gethdr(M_DONTWAIT, MT_DATA);
2992 if (m == NULL)
2993 return ENOBUFS;
2994
2995 if (size <= MCLBYTES) {
2996 MCLGET(m, M_DONTWAIT);
2997 } else {
2998 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
2999 }
3000 if ((m->m_flags & M_EXT) == 0) {
3001 m_freem(m);
3002 return ENOBUFS;
3003 }
3004
3005 if (data->m != NULL) {
3006 bus_dmamap_unload(sc->sc_dmat, data->map);
3007 fatal = 1;
3008 }
3009
3010 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3011 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3012 BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
3013 /* XXX */
3014 if (fatal)
3015 panic("iwm: could not load RX mbuf");
3016 m_freem(m);
3017 return error;
3018 }
3019 data->m = m;
3020 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3021
3022 /* Update RX descriptor. */
3023 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3024 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3025 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3026
3027 return 0;
3028 }
3029
3030 /* iwlwifi: mvm/rx.c */
3031 #define IWM_RSSI_OFFSET 50
3032 static int
3033 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3034 {
3035 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3036 uint32_t agc_a, agc_b;
3037 uint32_t val;
3038
3039 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3040 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3041 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3042
3043 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3044 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3045 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3046
3047 /*
3048 * dBm = rssi dB - agc dB - constant.
3049 * Higher AGC (higher radio gain) means lower signal.
3050 */
3051 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3052 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3053 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3054
3055 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3056 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3057
3058 return max_rssi_dbm;
3059 }
3060
3061 /* iwlwifi: mvm/rx.c */
3062 /*
3063 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3064 * values are reported by the fw as positive values - need to negate
3065 * to obtain their dBM. Account for missing antennas by replacing 0
3066 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3067 */
3068 static int
3069 iwm_mvm_get_signal_strength(struct iwm_softc *sc,
3070 struct iwm_rx_phy_info *phy_info)
3071 {
3072 int energy_a, energy_b, energy_c, max_energy;
3073 uint32_t val;
3074
3075 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3076 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3077 IWM_RX_INFO_ENERGY_ANT_A_POS;
3078 energy_a = energy_a ? -energy_a : -256;
3079 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3080 IWM_RX_INFO_ENERGY_ANT_B_POS;
3081 energy_b = energy_b ? -energy_b : -256;
3082 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3083 IWM_RX_INFO_ENERGY_ANT_C_POS;
3084 energy_c = energy_c ? -energy_c : -256;
3085 max_energy = MAX(energy_a, energy_b);
3086 max_energy = MAX(max_energy, energy_c);
3087
3088 DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3089 energy_a, energy_b, energy_c, max_energy));
3090
3091 return max_energy;
3092 }
3093
3094 static void
3095 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3096 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3097 {
3098 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3099
3100 DPRINTFN(20, ("received PHY stats\n"));
3101 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3102 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3103
3104 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3105 }
3106
3107 /*
3108 * Retrieve the average noise (in dBm) among receivers.
3109 */
3110 static int
3111 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
3112 {
3113 int i, total, nbant, noise;
3114
3115 total = nbant = noise = 0;
3116 for (i = 0; i < 3; i++) {
3117 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3118 if (noise) {
3119 total += noise;
3120 nbant++;
3121 }
3122 }
3123
3124 /* There should be at least one antenna but check anyway. */
3125 return (nbant == 0) ? -127 : (total / nbant) - 107;
3126 }
3127
3128 /*
3129 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3130 *
3131 * Handles the actual data of the Rx packet from the fw
3132 */
3133 static void
3134 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3135 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3136 {
3137 struct ieee80211com *ic = &sc->sc_ic;
3138 struct ieee80211_frame *wh;
3139 struct ieee80211_node *ni;
3140 struct ieee80211_channel *c = NULL;
3141 struct mbuf *m;
3142 struct iwm_rx_phy_info *phy_info;
3143 struct iwm_rx_mpdu_res_start *rx_res;
3144 int device_timestamp;
3145 uint32_t len;
3146 uint32_t rx_pkt_status;
3147 int rssi;
3148
3149 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3150 BUS_DMASYNC_POSTREAD);
3151
3152 phy_info = &sc->sc_last_phy_info;
3153 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3154 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3155 len = le16toh(rx_res->byte_count);
3156 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3157
3158 m = data->m;
3159 m->m_data = pkt->data + sizeof(*rx_res);
3160 m->m_pkthdr.len = m->m_len = len;
3161
3162 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3163 DPRINTF(("dsp size out of range [0,20]: %d\n",
3164 phy_info->cfg_phy_cnt));
3165 return;
3166 }
3167
3168 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3169 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3170 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3171 return; /* drop */
3172 }
3173
3174 device_timestamp = le32toh(phy_info->system_timestamp);
3175
3176 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3177 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3178 } else {
3179 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3180 }
3181 rssi = -rssi;
3182
3183 if (ic->ic_state == IEEE80211_S_SCAN)
3184 iwm_fix_channel(ic, m);
3185
3186 /* replenish ring for the buffer we're going to feed to the sharks */
3187 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3188 return;
3189
3190 m->m_pkthdr.rcvif = IC2IFP(ic);
3191
3192 if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
3193 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3194 c = &ic->ic_channels[le32toh(phy_info->channel)];
3195 }
3196
3197 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3198 if (c)
3199 ni->ni_chan = c;
3200
3201 if (sc->sc_drvbpf != NULL) {
3202 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3203
3204 tap->wr_flags = 0;
3205 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3206 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3207 tap->wr_chan_freq =
3208 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3209 tap->wr_chan_flags =
3210 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3211 tap->wr_dbm_antsignal = (int8_t)rssi;
3212 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3213 tap->wr_tsft = phy_info->system_timestamp;
3214 switch (phy_info->rate) {
3215 /* CCK rates. */
3216 case 10: tap->wr_rate = 2; break;
3217 case 20: tap->wr_rate = 4; break;
3218 case 55: tap->wr_rate = 11; break;
3219 case 110: tap->wr_rate = 22; break;
3220 /* OFDM rates. */
3221 case 0xd: tap->wr_rate = 12; break;
3222 case 0xf: tap->wr_rate = 18; break;
3223 case 0x5: tap->wr_rate = 24; break;
3224 case 0x7: tap->wr_rate = 36; break;
3225 case 0x9: tap->wr_rate = 48; break;
3226 case 0xb: tap->wr_rate = 72; break;
3227 case 0x1: tap->wr_rate = 96; break;
3228 case 0x3: tap->wr_rate = 108; break;
3229 /* Unknown rate: should not happen. */
3230 default: tap->wr_rate = 0;
3231 }
3232
3233 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3234 }
3235 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3236 ieee80211_free_node(ni);
3237 }
3238
3239 static void
3240 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3241 struct iwm_node *in)
3242 {
3243 struct ieee80211com *ic = &sc->sc_ic;
3244 struct ifnet *ifp = IC2IFP(ic);
3245 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3246 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3247 int failack = tx_resp->failure_frame;
3248
3249 KASSERT(tx_resp->frame_count == 1);
3250
3251 /* Update rate control statistics. */
3252 in->in_amn.amn_txcnt++;
3253 if (failack > 0) {
3254 in->in_amn.amn_retrycnt++;
3255 }
3256
3257 if (status != IWM_TX_STATUS_SUCCESS &&
3258 status != IWM_TX_STATUS_DIRECT_DONE)
3259 ifp->if_oerrors++;
3260 else
3261 ifp->if_opackets++;
3262 }
3263
3264 static void
3265 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3266 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3267 {
3268 struct ieee80211com *ic = &sc->sc_ic;
3269 struct ifnet *ifp = IC2IFP(ic);
3270 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3271 int idx = cmd_hdr->idx;
3272 int qid = cmd_hdr->qid;
3273 struct iwm_tx_ring *ring = &sc->txq[qid];
3274 struct iwm_tx_data *txd = &ring->data[idx];
3275 struct iwm_node *in = txd->in;
3276
3277 if (txd->done) {
3278 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3279 DEVNAME(sc)));
3280 return;
3281 }
3282
3283 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3284 BUS_DMASYNC_POSTREAD);
3285
3286 sc->sc_tx_timer = 0;
3287
3288 iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3289
3290 /* Unmap and free mbuf. */
3291 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3292 BUS_DMASYNC_POSTWRITE);
3293 bus_dmamap_unload(sc->sc_dmat, txd->map);
3294 m_freem(txd->m);
3295
3296 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3297 KASSERT(txd->done == 0);
3298 txd->done = 1;
3299 KASSERT(txd->in);
3300
3301 txd->m = NULL;
3302 txd->in = NULL;
3303 ieee80211_free_node(&in->in_ni);
3304
3305 if (--ring->queued < IWM_TX_RING_LOMARK) {
3306 sc->qfullmsk &= ~(1 << ring->qid);
3307 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3308 ifp->if_flags &= ~IFF_OACTIVE;
3309 /*
3310 * Well, we're in interrupt context, but then again
3311 * I guess net80211 does all sorts of stunts in
3312 * interrupt context, so maybe this is no biggie.
3313 */
3314 (*ifp->if_start)(ifp);
3315 }
3316 }
3317 }
3318
3319 /*
3320 * BEGIN iwlwifi/mvm/binding.c
3321 */
3322
3323 static int
3324 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3325 {
3326 struct iwm_binding_cmd cmd;
3327 struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
3328 int i, ret;
3329 uint32_t status;
3330
3331 memset(&cmd, 0, sizeof(cmd));
3332
3333 cmd.id_and_color
3334 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3335 cmd.action = htole32(action);
3336 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3337
3338 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3339 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3340 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3341
3342 status = 0;
3343 ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3344 sizeof(cmd), &cmd, &status);
3345 if (ret) {
3346 DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
3347 DEVNAME(sc), action, ret));
3348 return ret;
3349 }
3350
3351 if (status) {
3352 DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
3353 status));
3354 ret = EIO;
3355 }
3356
3357 return ret;
3358 }
3359
3360 static int
3361 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
3362 {
3363 return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
3364 }
3365
3366 static int
3367 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
3368 {
3369 return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
3370 }
3371
3372 /*
3373 * END iwlwifi/mvm/binding.c
3374 */
3375
3376 /*
3377 * BEGIN iwlwifi/mvm/phy-ctxt.c
3378 */
3379
3380 /*
3381 * Construct the generic fields of the PHY context command
3382 */
3383 static void
3384 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3385 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3386 {
3387 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3388
3389 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3390 ctxt->color));
3391 cmd->action = htole32(action);
3392 cmd->apply_time = htole32(apply_time);
3393 }
3394
3395 /*
3396 * Add the phy configuration to the PHY context command
3397 */
3398 static void
3399 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
3400 struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
3401 uint8_t chains_static, uint8_t chains_dynamic)
3402 {
3403 struct ieee80211com *ic = &sc->sc_ic;
3404 uint8_t active_cnt, idle_cnt;
3405
3406 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3407 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3408
3409 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3410 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3411 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3412
3413 /* Set rx the chains */
3414 idle_cnt = chains_static;
3415 active_cnt = chains_dynamic;
3416
3417 cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
3418 IWM_PHY_RX_CHAIN_VALID_POS);
3419 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3420 cmd->rxchain_info |= htole32(active_cnt <<
3421 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3422
3423 cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
3424 }
3425
3426 /*
3427 * Send a command
3428 * only if something in the configuration changed: in case that this is the
3429 * first time that the phy configuration is applied or in case that the phy
3430 * configuration changed from the previous apply.
3431 */
3432 static int
3433 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
3434 struct iwm_mvm_phy_ctxt *ctxt,
3435 uint8_t chains_static, uint8_t chains_dynamic,
3436 uint32_t action, uint32_t apply_time)
3437 {
3438 struct iwm_phy_context_cmd cmd;
3439 int ret;
3440
3441 /* Set the command header fields */
3442 iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3443
3444 /* Set the command data */
3445 iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3446 chains_static, chains_dynamic);
3447
3448 ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
3449 sizeof(struct iwm_phy_context_cmd), &cmd);
3450 if (ret) {
3451 DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
3452 }
3453 return ret;
3454 }
3455
3456 /*
3457 * Send a command to add a PHY context based on the current HW configuration.
3458 */
3459 static int
3460 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3461 struct ieee80211_channel *chan,
3462 uint8_t chains_static, uint8_t chains_dynamic)
3463 {
3464 ctxt->channel = chan;
3465 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3466 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
3467 }
3468
3469 /*
3470 * Send a command to modify the PHY context based on the current HW
3471 * configuration. Note that the function does not check that the configuration
3472 * changed.
3473 */
3474 static int
3475 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
3476 struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
3477 uint8_t chains_static, uint8_t chains_dynamic)
3478 {
3479 ctxt->channel = chan;
3480 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3481 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
3482 }
3483
3484 /*
3485 * END iwlwifi/mvm/phy-ctxt.c
3486 */
3487
3488 /*
3489 * transmit side
3490 */
3491
3492 /*
3493 * Send a command to the firmware. We try to implement the Linux
3494 * driver interface for the routine.
3495 * mostly from if_iwn (iwn_cmd()).
3496 *
3497 * For now, we always copy the first part and map the second one (if it exists).
3498 */
3499 static int
3500 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3501 {
3502 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3503 struct iwm_tfd *desc;
3504 struct iwm_tx_data *data;
3505 struct iwm_device_cmd *cmd;
3506 struct mbuf *m;
3507 bus_addr_t paddr;
3508 uint32_t addr_lo;
3509 int error, i, paylen, off, s;
3510 int code;
3511 int async, wantresp;
3512
3513 code = hcmd->id;
3514 async = hcmd->flags & IWM_CMD_ASYNC;
3515 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3516
3517 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3518 paylen += hcmd->len[i];
3519 }
3520
3521 /* if the command wants an answer, busy sc_cmd_resp */
3522 if (wantresp) {
3523 KASSERT(!async);
3524 while (sc->sc_wantresp != -1)
3525 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3526 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3527 DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
3528 }
3529
3530 /*
3531 * Is the hardware still available? (after e.g. above wait).
3532 */
3533 s = splnet();
3534 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3535 error = ENXIO;
3536 goto out;
3537 }
3538
3539 desc = &ring->desc[ring->cur];
3540 data = &ring->data[ring->cur];
3541
3542 if (paylen > sizeof(cmd->data)) {
3543 /* Command is too large */
3544 if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
3545 error = EINVAL;
3546 goto out;
3547 }
3548 m = m_gethdr(M_DONTWAIT, MT_DATA);
3549 if (m == NULL) {
3550 error = ENOMEM;
3551 goto out;
3552 }
3553 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3554 if (!(m->m_flags & M_EXT)) {
3555 m_freem(m);
3556 error = ENOMEM;
3557 goto out;
3558 }
3559 cmd = mtod(m, struct iwm_device_cmd *);
3560 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
3561 IWM_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3562 if (error != 0) {
3563 m_freem(m);
3564 goto out;
3565 }
3566 data->m = m;
3567 paddr = data->map->dm_segs[0].ds_addr;
3568 } else {
3569 cmd = &ring->cmd[ring->cur];
3570 paddr = data->cmd_paddr;
3571 }
3572
3573 cmd->hdr.code = code;
3574 cmd->hdr.flags = 0;
3575 cmd->hdr.qid = ring->qid;
3576 cmd->hdr.idx = ring->cur;
3577
3578 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3579 if (hcmd->len[i] == 0)
3580 continue;
3581 memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
3582 off += hcmd->len[i];
3583 }
3584 KASSERT(off == paylen);
3585
3586 /* lo field is not aligned */
3587 addr_lo = htole32((uint32_t)paddr);
3588 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3589 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3590 | ((sizeof(cmd->hdr) + paylen) << 4));
3591 desc->num_tbs = 1;
3592
3593 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
3594 code, sizeof(cmd->hdr) + paylen, async ? " (async)" : ""));
3595
3596 if (paylen > sizeof(cmd->data)) {
3597 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3598 sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3599 } else {
3600 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3601 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3602 sizeof(cmd->hdr) + paylen, BUS_DMASYNC_PREWRITE);
3603 }
3604 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3605 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3606 sizeof (*desc), BUS_DMASYNC_PREWRITE);
3607
3608 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3609 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3610 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3611 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3612 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3613 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3614 DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
3615 error = EBUSY;
3616 goto out;
3617 }
3618
3619 #if 0
3620 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3621 #endif
3622 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3623 code, ring->qid, ring->cur));
3624
3625 /* Kick command ring. */
3626 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3627 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3628
3629 if (!async) {
3630 /* m..m-mmyy-mmyyyy-mym-ym m-my generation */
3631 int generation = sc->sc_generation;
3632 error = tsleep(desc, PCATCH, "iwmcmd", hz);
3633 if (error == 0) {
3634 /* if hardware is no longer up, return error */
3635 if (generation != sc->sc_generation) {
3636 error = ENXIO;
3637 } else {
3638 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3639 }
3640 }
3641 }
3642 out:
3643 if (wantresp && error != 0) {
3644 iwm_free_resp(sc, hcmd);
3645 }
3646 splx(s);
3647
3648 return error;
3649 }
3650
3651 /* iwlwifi: mvm/utils.c */
3652 static int
3653 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
3654 uint32_t flags, uint16_t len, const void *data)
3655 {
3656 struct iwm_host_cmd cmd = {
3657 .id = id,
3658 .len = { len, },
3659 .data = { data, },
3660 .flags = flags,
3661 };
3662
3663 return iwm_send_cmd(sc, &cmd);
3664 }
3665
3666 /* iwlwifi: mvm/utils.c */
3667 static int
3668 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
3669 struct iwm_host_cmd *cmd, uint32_t *status)
3670 {
3671 struct iwm_rx_packet *pkt;
3672 struct iwm_cmd_response *resp;
3673 int error, resp_len;
3674
3675 //lockdep_assert_held(&mvm->mutex);
3676
3677 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3678 cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
3679
3680 if ((error = iwm_send_cmd(sc, cmd)) != 0)
3681 return error;
3682 pkt = cmd->resp_pkt;
3683
3684 /* Can happen if RFKILL is asserted */
3685 if (!pkt) {
3686 error = 0;
3687 goto out_free_resp;
3688 }
3689
3690 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3691 error = EIO;
3692 goto out_free_resp;
3693 }
3694
3695 resp_len = iwm_rx_packet_payload_len(pkt);
3696 if (resp_len != sizeof(*resp)) {
3697 error = EIO;
3698 goto out_free_resp;
3699 }
3700
3701 resp = (void *)pkt->data;
3702 *status = le32toh(resp->status);
3703 out_free_resp:
3704 iwm_free_resp(sc, cmd);
3705 return error;
3706 }
3707
3708 /* iwlwifi/mvm/utils.c */
3709 static int
3710 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
3711 uint16_t len, const void *data, uint32_t *status)
3712 {
3713 struct iwm_host_cmd cmd = {
3714 .id = id,
3715 .len = { len, },
3716 .data = { data, },
3717 };
3718
3719 return iwm_mvm_send_cmd_status(sc, &cmd, status);
3720 }
3721
3722 static void
3723 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3724 {
3725 KASSERT(sc->sc_wantresp != -1);
3726 KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
3727 == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
3728 sc->sc_wantresp = -1;
3729 wakeup(&sc->sc_wantresp);
3730 }
3731
3732 /*
3733 * Process a "command done" firmware notification. This is where we wakeup
3734 * processes waiting for a synchronous command completion.
3735 * from if_iwn
3736 */
3737 static void
3738 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3739 {
3740 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3741 struct iwm_tx_data *data;
3742
3743 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3744 return; /* Not a command ack. */
3745 }
3746
3747 data = &ring->data[pkt->hdr.idx];
3748
3749 /* If the command was mapped in an mbuf, free it. */
3750 if (data->m != NULL) {
3751 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3752 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3753 bus_dmamap_unload(sc->sc_dmat, data->map);
3754 m_freem(data->m);
3755 data->m = NULL;
3756 }
3757 wakeup(&ring->desc[pkt->hdr.idx]);
3758 }
3759
3760 #if 0
3761 /*
3762 * necessary only for block ack mode
3763 */
3764 void
3765 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3766 uint16_t len)
3767 {
3768 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3769 uint16_t w_val;
3770
3771 scd_bc_tbl = sc->sched_dma.vaddr;
3772
3773 len += 8; /* magic numbers came naturally from paris */
3774 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3775 len = roundup(len, 4) / 4;
3776
3777 w_val = htole16(sta_id << 12 | len);
3778
3779 /* Update TX scheduler. */
3780 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3781 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3782 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
3783 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
3784
3785 /* I really wonder what this is ?!? */
3786 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3787 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3788 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3789 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
3790 (char *)(void *)sc->sched_dma.vaddr,
3791 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3792 }
3793 }
3794 #endif
3795
3796 /*
3797 * Fill in various bit for management frames, and leave them
3798 * unfilled for data frames (firmware takes care of that).
3799 * Return the selected TX rate.
3800 */
3801 static const struct iwm_rate *
3802 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3803 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3804 {
3805 const struct iwm_rate *rinfo;
3806 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3807 int ridx, rate_flags;
3808 int nrates = in->in_ni.ni_rates.rs_nrates;
3809
3810 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3811 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3812
3813 /* for data frames, use RS table */
3814 if (type == IEEE80211_FC0_TYPE_DATA) {
3815 if (sc->sc_fixed_ridx != -1) {
3816 tx->initial_rate_index = sc->sc_fixed_ridx;
3817 } else {
3818 tx->initial_rate_index = (nrates-1) - in->in_ni.ni_txrate;
3819 }
3820 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3821 DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
3822 return &iwm_rates[tx->initial_rate_index];
3823 }
3824
3825 /* for non-data, use the lowest supported rate */
3826 ridx = in->in_ridx[0];
3827 rinfo = &iwm_rates[ridx];
3828
3829 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3830 if (IWM_RIDX_IS_CCK(ridx))
3831 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3832 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3833
3834 return rinfo;
3835 }
3836
3837 #define TB0_SIZE 16
3838 static int
3839 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3840 {
3841 struct ieee80211com *ic = &sc->sc_ic;
3842 struct iwm_node *in = (void *)ni;
3843 struct iwm_tx_ring *ring;
3844 struct iwm_tx_data *data;
3845 struct iwm_tfd *desc;
3846 struct iwm_device_cmd *cmd;
3847 struct iwm_tx_cmd *tx;
3848 struct ieee80211_frame *wh;
3849 struct ieee80211_key *k = NULL;
3850 struct mbuf *m1;
3851 const struct iwm_rate *rinfo;
3852 uint32_t flags;
3853 u_int hdrlen;
3854 bus_dma_segment_t *seg;
3855 uint8_t tid, type;
3856 int i, totlen, error, pad;
3857 int hdrlen2;
3858
3859 wh = mtod(m, struct ieee80211_frame *);
3860 hdrlen = ieee80211_anyhdrsize(wh);
3861 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3862
3863 hdrlen2 = (ieee80211_has_qos(wh)) ?
3864 sizeof (struct ieee80211_qosframe) :
3865 sizeof (struct ieee80211_frame);
3866
3867 if (hdrlen != hdrlen2)
3868 DPRINTF(("%s: hdrlen error (%d != %d)\n",
3869 DEVNAME(sc), hdrlen, hdrlen2));
3870
3871 tid = 0;
3872
3873 ring = &sc->txq[ac];
3874 desc = &ring->desc[ring->cur];
3875 memset(desc, 0, sizeof(*desc));
3876 data = &ring->data[ring->cur];
3877
3878 /* Fill out iwm_tx_cmd to send to the firmware */
3879 cmd = &ring->cmd[ring->cur];
3880 cmd->hdr.code = IWM_TX_CMD;
3881 cmd->hdr.flags = 0;
3882 cmd->hdr.qid = ring->qid;
3883 cmd->hdr.idx = ring->cur;
3884
3885 tx = (void *)cmd->data;
3886 memset(tx, 0, sizeof(*tx));
3887
3888 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3889
3890 if (sc->sc_drvbpf != NULL) {
3891 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3892
3893 tap->wt_flags = 0;
3894 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3895 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3896 tap->wt_rate = rinfo->rate;
3897 tap->wt_hwqueue = ac;
3898 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
3899 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3900
3901 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
3902 }
3903
3904 /* Encrypt the frame if need be. */
3905 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3906 k = ieee80211_crypto_encap(ic, ni, m);
3907 if (k == NULL) {
3908 m_freem(m);
3909 return ENOBUFS;
3910 }
3911 /* Packet header may have moved, reset our local pointer. */
3912 wh = mtod(m, struct ieee80211_frame *);
3913 }
3914 totlen = m->m_pkthdr.len;
3915
3916 flags = 0;
3917 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3918 flags |= IWM_TX_CMD_FLG_ACK;
3919 }
3920
3921 if (type != IEEE80211_FC0_TYPE_DATA
3922 && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
3923 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3924 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3925 }
3926
3927 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3928 type != IEEE80211_FC0_TYPE_DATA)
3929 tx->sta_id = sc->sc_aux_sta.sta_id;
3930 else
3931 tx->sta_id = IWM_STATION_ID;
3932
3933 if (type == IEEE80211_FC0_TYPE_MGT) {
3934 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3935
3936 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3937 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3938 tx->pm_frame_timeout = htole16(3);
3939 else
3940 tx->pm_frame_timeout = htole16(2);
3941 } else {
3942 tx->pm_frame_timeout = htole16(0);
3943 }
3944
3945 if (hdrlen & 3) {
3946 /* First segment length must be a multiple of 4. */
3947 flags |= IWM_TX_CMD_FLG_MH_PAD;
3948 pad = 4 - (hdrlen & 3);
3949 } else
3950 pad = 0;
3951
3952 tx->driver_txop = 0;
3953 tx->next_frame_len = 0;
3954
3955 tx->len = htole16(totlen);
3956 tx->tid_tspec = tid;
3957 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3958
3959 /* Set physical address of "scratch area". */
3960 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3961 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3962
3963 /* Copy 802.11 header in TX command. */
3964 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3965
3966 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3967
3968 tx->sec_ctl = 0;
3969 tx->tx_flags |= htole32(flags);
3970
3971 /* Trim 802.11 header. */
3972 m_adj(m, hdrlen);
3973
3974 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3975 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3976 if (error != 0) {
3977 if (error != EFBIG) {
3978 aprint_error_dev(sc->sc_dev,
3979 "can't map mbuf (error %d)\n", error);
3980 m_freem(m);
3981 return error;
3982 }
3983 /* Too many DMA segments, linearize mbuf. */
3984 MGETHDR(m1, M_DONTWAIT, MT_DATA);
3985 if (m1 == NULL) {
3986 m_freem(m);
3987 return ENOBUFS;
3988 }
3989 if (m->m_pkthdr.len > MHLEN) {
3990 MCLGET(m1, M_DONTWAIT);
3991 if (!(m1->m_flags & M_EXT)) {
3992 m_freem(m);
3993 m_freem(m1);
3994 return ENOBUFS;
3995 }
3996 }
3997 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
3998 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
3999 m_freem(m);
4000 m = m1;
4001
4002 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4003 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4004 if (error != 0) {
4005 aprint_error_dev(sc->sc_dev,
4006 "can't map mbuf (error %d)\n", error);
4007 m_freem(m);
4008 return error;
4009 }
4010 }
4011 data->m = m;
4012 data->in = in;
4013 data->done = 0;
4014
4015 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4016 KASSERT(data->in != NULL);
4017
4018 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4019 ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4020
4021 /* Fill TX descriptor. */
4022 desc->num_tbs = 2 + data->map->dm_nsegs;
4023
4024 desc->tbs[0].lo = htole32(data->cmd_paddr);
4025 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4026 (TB0_SIZE << 4);
4027 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4028 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4029 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4030 + hdrlen + pad - TB0_SIZE) << 4);
4031
4032 /* Other DMA segments are for data payload. */
4033 seg = data->map->dm_segs;
4034 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4035 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4036 desc->tbs[i+2].hi_n_len = \
4037 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4038 | ((seg->ds_len) << 4);
4039 }
4040
4041 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4042 BUS_DMASYNC_PREWRITE);
4043 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4044 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4045 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4046 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4047 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4048 sizeof (*desc), BUS_DMASYNC_PREWRITE);
4049
4050 #if 0
4051 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4052 #endif
4053
4054 /* Kick TX ring. */
4055 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4056 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4057
4058 /* Mark TX ring as full if we reach a certain threshold. */
4059 if (++ring->queued > IWM_TX_RING_HIMARK) {
4060 sc->qfullmsk |= 1 << ring->qid;
4061 }
4062
4063 return 0;
4064 }
4065
4066 #if 0
4067 /* not necessary? */
4068 static int
4069 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4070 {
4071 struct iwm_tx_path_flush_cmd flush_cmd = {
4072 .queues_ctl = htole32(tfd_msk),
4073 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4074 };
4075 int ret;
4076
4077 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
4078 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
4079 sizeof(flush_cmd), &flush_cmd);
4080 if (ret)
4081 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4082 ret);
4083 return ret;
4084 }
4085 #endif
4086
4087
4088 /*
4089 * BEGIN mvm/power.c
4090 */
4091
4092 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4093
4094 static int
4095 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
4096 struct iwm_beacon_filter_cmd *cmd)
4097 {
4098 int ret;
4099
4100 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4101 IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
4102
4103 if (!ret) {
4104 DPRINTF(("ba_enable_beacon_abort is: %d\n",
4105 le32toh(cmd->ba_enable_beacon_abort)));
4106 DPRINTF(("ba_escape_timer is: %d\n",
4107 le32toh(cmd->ba_escape_timer)));
4108 DPRINTF(("bf_debug_flag is: %d\n",
4109 le32toh(cmd->bf_debug_flag)));
4110 DPRINTF(("bf_enable_beacon_filter is: %d\n",
4111 le32toh(cmd->bf_enable_beacon_filter)));
4112 DPRINTF(("bf_energy_delta is: %d\n",
4113 le32toh(cmd->bf_energy_delta)));
4114 DPRINTF(("bf_escape_timer is: %d\n",
4115 le32toh(cmd->bf_escape_timer)));
4116 DPRINTF(("bf_roaming_energy_delta is: %d\n",
4117 le32toh(cmd->bf_roaming_energy_delta)));
4118 DPRINTF(("bf_roaming_state is: %d\n",
4119 le32toh(cmd->bf_roaming_state)));
4120 DPRINTF(("bf_temp_threshold is: %d\n",
4121 le32toh(cmd->bf_temp_threshold)));
4122 DPRINTF(("bf_temp_fast_filter is: %d\n",
4123 le32toh(cmd->bf_temp_fast_filter)));
4124 DPRINTF(("bf_temp_slow_filter is: %d\n",
4125 le32toh(cmd->bf_temp_slow_filter)));
4126 }
4127 return ret;
4128 }
4129
4130 static void
4131 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
4132 struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
4133 {
4134 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4135 }
4136
4137 static int
4138 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
4139 int enable)
4140 {
4141 struct iwm_beacon_filter_cmd cmd = {
4142 IWM_BF_CMD_CONFIG_DEFAULTS,
4143 .bf_enable_beacon_filter = htole32(1),
4144 .ba_enable_beacon_abort = htole32(enable),
4145 };
4146
4147 if (!sc->sc_bf.bf_enabled)
4148 return 0;
4149
4150 sc->sc_bf.ba_enabled = enable;
4151 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4152 return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4153 }
4154
4155 static void
4156 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
4157 {
4158 DPRINTF(("Sending power table command on mac id 0x%X for "
4159 "power level %d, flags = 0x%X\n",
4160 cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
4161 DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
4162
4163 if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
4164 DPRINTF(("Disable power management\n"));
4165 return;
4166 }
4167 KASSERT(0);
4168
4169 #if 0
4170 DPRINTF(mvm, "Rx timeout = %u usec\n",
4171 le32_to_cpu(cmd->rx_data_timeout));
4172 DPRINTF(mvm, "Tx timeout = %u usec\n",
4173 le32_to_cpu(cmd->tx_data_timeout));
4174 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
4175 DPRINTF(mvm, "DTIM periods to skip = %u\n",
4176 cmd->skip_dtim_periods);
4177 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
4178 DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
4179 cmd->lprx_rssi_threshold);
4180 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
4181 DPRINTF(mvm, "uAPSD enabled\n");
4182 DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
4183 le32_to_cpu(cmd->rx_data_timeout_uapsd));
4184 DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
4185 le32_to_cpu(cmd->tx_data_timeout_uapsd));
4186 DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
4187 DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
4188 DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
4189 }
4190 #endif
4191 }
4192
4193 static void
4194 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4195 struct iwm_mac_power_cmd *cmd)
4196 {
4197 struct ieee80211com *ic = &sc->sc_ic;
4198 struct ieee80211_node *ni = &in->in_ni;
4199 int dtimper, dtimper_msec;
4200 int keep_alive;
4201
4202 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4203 in->in_color));
4204 dtimper = ic->ic_dtim_period ?: 1;
4205
4206 /*
4207 * Regardless of power management state the driver must set
4208 * keep alive period. FW will use it for sending keep alive NDPs
4209 * immediately after association. Check that keep alive period
4210 * is at least 3 * DTIM
4211 */
4212 dtimper_msec = dtimper * ni->ni_intval;
4213 keep_alive
4214 = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4215 keep_alive = roundup(keep_alive, 1000) / 1000;
4216 cmd->keep_alive_seconds = htole16(keep_alive);
4217 }
4218
4219 static int
4220 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4221 {
4222 int ret;
4223 int ba_enable;
4224 struct iwm_mac_power_cmd cmd;
4225
4226 memset(&cmd, 0, sizeof(cmd));
4227
4228 iwm_mvm_power_build_cmd(sc, in, &cmd);
4229 iwm_mvm_power_log(sc, &cmd);
4230
4231 if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
4232 IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
4233 return ret;
4234
4235 ba_enable = !!(cmd.flags &
4236 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4237 return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
4238 }
4239
4240 static int
4241 iwm_mvm_power_update_device(struct iwm_softc *sc)
4242 {
4243 struct iwm_device_power_cmd cmd = {
4244 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4245 };
4246
4247 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4248 return 0;
4249
4250 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4251 DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
4252
4253 return iwm_mvm_send_cmd_pdu(sc,
4254 IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
4255 }
4256
4257 static int
4258 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4259 {
4260 struct iwm_beacon_filter_cmd cmd = {
4261 IWM_BF_CMD_CONFIG_DEFAULTS,
4262 .bf_enable_beacon_filter = htole32(1),
4263 };
4264 int ret;
4265
4266 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4267 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4268
4269 if (ret == 0)
4270 sc->sc_bf.bf_enabled = 1;
4271
4272 return ret;
4273 }
4274
4275 static int
4276 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4277 {
4278 struct iwm_beacon_filter_cmd cmd;
4279 int ret;
4280
4281 memset(&cmd, 0, sizeof(cmd));
4282 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4283 return 0;
4284
4285 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4286 if (ret == 0)
4287 sc->sc_bf.bf_enabled = 0;
4288
4289 return ret;
4290 }
4291
4292 #if 0
4293 static int
4294 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4295 {
4296 if (!sc->sc_bf.bf_enabled)
4297 return 0;
4298
4299 return iwm_mvm_enable_beacon_filter(sc, in);
4300 }
4301 #endif
4302
4303 /*
4304 * END mvm/power.c
4305 */
4306
4307 /*
4308 * BEGIN mvm/sta.c
4309 */
4310
4311 static void
4312 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
4313 struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
4314 {
4315 memset(cmd_v5, 0, sizeof(*cmd_v5));
4316
4317 cmd_v5->add_modify = cmd_v6->add_modify;
4318 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
4319 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
4320 memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
4321 cmd_v5->sta_id = cmd_v6->sta_id;
4322 cmd_v5->modify_mask = cmd_v6->modify_mask;
4323 cmd_v5->station_flags = cmd_v6->station_flags;
4324 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
4325 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
4326 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
4327 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
4328 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
4329 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
4330 cmd_v5->assoc_id = cmd_v6->assoc_id;
4331 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
4332 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
4333 }
4334
4335 static int
4336 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
4337 struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
4338 {
4339 struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
4340
4341 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
4342 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
4343 sizeof(*cmd), cmd, status);
4344 }
4345
4346 iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
4347
4348 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
4349 &cmd_v5, status);
4350 }
4351
4352 /* send station add/update command to firmware */
4353 static int
4354 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4355 {
4356 struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
4357 int ret;
4358 uint32_t status;
4359
4360 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4361
4362 add_sta_cmd.sta_id = IWM_STATION_ID;
4363 add_sta_cmd.mac_id_n_color
4364 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4365 if (!update) {
4366 add_sta_cmd.tfd_queue_msk = htole32(0xf);
4367 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4368 }
4369 add_sta_cmd.add_modify = update ? 1 : 0;
4370 add_sta_cmd.station_flags_msk
4371 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4372
4373 status = IWM_ADD_STA_SUCCESS;
4374 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4375 if (ret)
4376 return ret;
4377
4378 switch (status) {
4379 case IWM_ADD_STA_SUCCESS:
4380 break;
4381 default:
4382 ret = EIO;
4383 DPRINTF(("IWM_ADD_STA failed\n"));
4384 break;
4385 }
4386
4387 return ret;
4388 }
4389
4390 static int
4391 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4392 {
4393 int ret;
4394
4395 ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
4396 if (ret)
4397 return ret;
4398
4399 return 0;
4400 }
4401
4402 static int
4403 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4404 {
4405 return iwm_mvm_sta_send_to_fw(sc, in, 1);
4406 }
4407
4408 static int
4409 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4410 const uint8_t *addr, uint16_t mac_id, uint16_t color)
4411 {
4412 struct iwm_mvm_add_sta_cmd_v6 cmd;
4413 int ret;
4414 uint32_t status;
4415
4416 memset(&cmd, 0, sizeof(cmd));
4417 cmd.sta_id = sta->sta_id;
4418 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4419
4420 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4421
4422 if (addr)
4423 memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
4424
4425 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4426 if (ret)
4427 return ret;
4428
4429 switch (status) {
4430 case IWM_ADD_STA_SUCCESS:
4431 DPRINTF(("Internal station added.\n"));
4432 return 0;
4433 default:
4434 DPRINTF(("%s: Add internal station failed, status=0x%x\n",
4435 DEVNAME(sc), status));
4436 ret = EIO;
4437 break;
4438 }
4439 return ret;
4440 }
4441
4442 static int
4443 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4444 {
4445 int ret;
4446
4447 sc->sc_aux_sta.sta_id = 3;
4448 sc->sc_aux_sta.tfd_queue_msk = 0;
4449
4450 ret = iwm_mvm_add_int_sta_common(sc,
4451 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4452
4453 if (ret)
4454 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4455 return ret;
4456 }
4457
4458 /*
4459 * END mvm/sta.c
4460 */
4461
4462 /*
4463 * BEGIN mvm/scan.c
4464 */
4465
4466 #define IWM_PLCP_QUIET_THRESH 1
4467 #define IWM_ACTIVE_QUIET_TIME 10
4468 #define LONG_OUT_TIME_PERIOD 600
4469 #define SHORT_OUT_TIME_PERIOD 200
4470 #define SUSPEND_TIME_PERIOD 100
4471
4472 static uint16_t
4473 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
4474 {
4475 uint16_t rx_chain;
4476 uint8_t rx_ant;
4477
4478 rx_ant = IWM_FW_VALID_RX_ANT(sc);
4479 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4480 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4481 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4482 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4483 return htole16(rx_chain);
4484 }
4485
4486 #define ieee80211_tu_to_usec(a) (1024*(a))
4487
4488 static uint32_t
4489 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
4490 {
4491 if (!is_assoc)
4492 return 0;
4493 if (flags & 0x1)
4494 return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
4495 return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
4496 }
4497
4498 static uint32_t
4499 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
4500 {
4501 if (!is_assoc)
4502 return 0;
4503 return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
4504 }
4505
4506 static uint32_t
4507 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
4508 {
4509 if (flags & IEEE80211_CHAN_2GHZ)
4510 return htole32(IWM_PHY_BAND_24);
4511 else
4512 return htole32(IWM_PHY_BAND_5);
4513 }
4514
4515 static uint32_t
4516 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4517 {
4518 uint32_t tx_ant;
4519 int i, ind;
4520
4521 for (i = 0, ind = sc->sc_scan_last_antenna;
4522 i < IWM_RATE_MCS_ANT_NUM; i++) {
4523 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4524 if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
4525 sc->sc_scan_last_antenna = ind;
4526 break;
4527 }
4528 }
4529 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4530
4531 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4532 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4533 tx_ant);
4534 else
4535 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4536 }
4537
4538 /*
4539 * If req->n_ssids > 0, it means we should do an active scan.
4540 * In case of active scan w/o directed scan, we receive a zero-length SSID
4541 * just to notify that this scan is active and not passive.
4542 * In order to notify the FW of the number of SSIDs we wish to scan (including
4543 * the zero-length one), we need to set the corresponding bits in chan->type,
4544 * one for each SSID, and set the active bit (first). If the first SSID is
4545 * already included in the probe template, so we need to set only
4546 * req->n_ssids - 1 bits in addition to the first bit.
4547 */
4548 static uint16_t
4549 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4550 {
4551 if (flags & IEEE80211_CHAN_2GHZ)
4552 return 30 + 3 * (n_ssids + 1);
4553 return 20 + 2 * (n_ssids + 1);
4554 }
4555
4556 static uint16_t
4557 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
4558 {
4559 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4560 }
4561
4562 static int
4563 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
4564 int flags, int n_ssids, int basic_ssid)
4565 {
4566 struct ieee80211com *ic = &sc->sc_ic;
4567 uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
4568 uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
4569 struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
4570 (cmd->data + le16toh(cmd->tx_cmd.len));
4571 int type = (1 << n_ssids) - 1;
4572 struct ieee80211_channel *c;
4573 int nchan;
4574
4575 if (!basic_ssid)
4576 type |= (1 << n_ssids);
4577
4578 for (nchan = 0, c = &ic->ic_channels[1];
4579 c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
4580 c++) {
4581 if ((c->ic_flags & flags) != flags)
4582 continue;
4583
4584 chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
4585 chan->type = htole32(type);
4586 if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
4587 chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4588 chan->active_dwell = htole16(active_dwell);
4589 chan->passive_dwell = htole16(passive_dwell);
4590 chan->iteration_count = htole16(1);
4591 chan++;
4592 nchan++;
4593 }
4594 if (nchan == 0)
4595 DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
4596 return nchan;
4597 }
4598
4599 /*
4600 * Fill in probe request with the following parameters:
4601 * TA is our vif HW address, which mac80211 ensures we have.
4602 * Packet is broadcasted, so this is both SA and DA.
4603 * The probe request IE is made out of two: first comes the most prioritized
4604 * SSID if a directed scan is requested. Second comes whatever extra
4605 * information was given to us as the scan request IE.
4606 */
4607 static uint16_t
4608 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
4609 const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
4610 const uint8_t *ie, int ie_len, int left)
4611 {
4612 int len = 0;
4613 uint8_t *pos = NULL;
4614
4615 /* Make sure there is enough space for the probe request,
4616 * two mandatory IEs and the data */
4617 left -= sizeof(*frame);
4618 if (left < 0)
4619 return 0;
4620
4621 frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4622 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4623 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4624 IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
4625 memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
4626 IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
4627
4628 len += sizeof(*frame);
4629 CTASSERT(sizeof(*frame) == 24);
4630
4631 /* for passive scans, no need to fill anything */
4632 if (n_ssids == 0)
4633 return (uint16_t)len;
4634
4635 /* points to the payload of the request */
4636 pos = (uint8_t *)frame + sizeof(*frame);
4637
4638 /* fill in our SSID IE */
4639 left -= ssid_len + 2;
4640 if (left < 0)
4641 return 0;
4642 *pos++ = IEEE80211_ELEMID_SSID;
4643 *pos++ = ssid_len;
4644 if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
4645 memcpy(pos, ssid, ssid_len);
4646 pos += ssid_len;
4647 }
4648
4649 len += ssid_len + 2;
4650
4651 if (left < ie_len)
4652 return len;
4653
4654 if (ie && ie_len) {
4655 memcpy(pos, ie, ie_len);
4656 len += ie_len;
4657 }
4658
4659 return (uint16_t)len;
4660 }
4661
4662 static int
4663 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
4664 int n_ssids, uint8_t *ssid, int ssid_len)
4665 {
4666 struct ieee80211com *ic = &sc->sc_ic;
4667 struct iwm_host_cmd hcmd = {
4668 .id = IWM_SCAN_REQUEST_CMD,
4669 .len = { 0, },
4670 .data = { sc->sc_scan_cmd, },
4671 .flags = IWM_CMD_SYNC,
4672 .dataflags = { IWM_HCMD_DFL_NOCOPY, },
4673 };
4674 struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
4675 int is_assoc = 0;
4676 int ret;
4677 uint32_t status;
4678 int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
4679
4680 //lockdep_assert_held(&mvm->mutex);
4681
4682 sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
4683
4684 DPRINTF(("Handling ieee80211 scan request\n"));
4685 memset(cmd, 0, sc->sc_scan_cmd_len);
4686
4687 cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
4688 cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
4689 cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
4690 cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
4691 cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
4692 cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
4693 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
4694 IWM_MAC_FILTER_IN_BEACON);
4695
4696 cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
4697 cmd->repeats = htole32(1);
4698
4699 /*
4700 * If the user asked for passive scan, don't change to active scan if
4701 * you see any activity on the channel - remain passive.
4702 */
4703 if (n_ssids > 0) {
4704 cmd->passive2active = htole16(1);
4705 cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4706 #if 0
4707 if (basic_ssid) {
4708 ssid = req->ssids[0].ssid;
4709 ssid_len = req->ssids[0].ssid_len;
4710 }
4711 #endif
4712 } else {
4713 cmd->passive2active = 0;
4714 cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4715 }
4716
4717 cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4718 IWM_TX_CMD_FLG_BT_DIS);
4719 cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
4720 cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4721 cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
4722
4723 cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
4724 (struct ieee80211_frame *)cmd->data,
4725 ic->ic_myaddr, n_ssids, ssid, ssid_len,
4726 NULL, 0, sc->sc_capa_max_probe_len));
4727
4728 cmd->channel_count
4729 = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
4730
4731 cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
4732 le16toh(cmd->tx_cmd.len) +
4733 (cmd->channel_count * sizeof(struct iwm_scan_channel)));
4734 hcmd.len[0] = le16toh(cmd->len);
4735
4736 status = IWM_SCAN_RESPONSE_OK;
4737 ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
4738 if (!ret && status == IWM_SCAN_RESPONSE_OK) {
4739 DPRINTF(("Scan request was sent successfully\n"));
4740 } else {
4741 /*
4742 * If the scan failed, it usually means that the FW was unable
4743 * to allocate the time events. Warn on it, but maybe we
4744 * should try to send the command again with different params.
4745 */
4746 sc->sc_scanband = 0;
4747 ret = EIO;
4748 }
4749 return ret;
4750 }
4751
4752 /*
4753 * END mvm/scan.c
4754 */
4755
4756 /*
4757 * BEGIN mvm/mac-ctxt.c
4758 */
4759
4760 static void
4761 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
4762 int *cck_rates, int *ofdm_rates)
4763 {
4764 int lowest_present_ofdm = 100;
4765 int lowest_present_cck = 100;
4766 uint8_t cck = 0;
4767 uint8_t ofdm = 0;
4768 int i;
4769
4770 for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
4771 cck |= (1 << i);
4772 if (lowest_present_cck > i)
4773 lowest_present_cck = i;
4774 }
4775 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
4776 int adj = i - IWM_FIRST_OFDM_RATE;
4777 ofdm |= (1 << adj);
4778 if (lowest_present_cck > adj)
4779 lowest_present_cck = adj;
4780 }
4781
4782 /*
4783 * Now we've got the basic rates as bitmaps in the ofdm and cck
4784 * variables. This isn't sufficient though, as there might not
4785 * be all the right rates in the bitmap. E.g. if the only basic
4786 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
4787 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
4788 *
4789 * [...] a STA responding to a received frame shall transmit
4790 * its Control Response frame [...] at the highest rate in the
4791 * BSSBasicRateSet parameter that is less than or equal to the
4792 * rate of the immediately previous frame in the frame exchange
4793 * sequence ([...]) and that is of the same modulation class
4794 * ([...]) as the received frame. If no rate contained in the
4795 * BSSBasicRateSet parameter meets these conditions, then the
4796 * control frame sent in response to a received frame shall be
4797 * transmitted at the highest mandatory rate of the PHY that is
4798 * less than or equal to the rate of the received frame, and
4799 * that is of the same modulation class as the received frame.
4800 *
4801 * As a consequence, we need to add all mandatory rates that are
4802 * lower than all of the basic rates to these bitmaps.
4803 */
4804
4805 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
4806 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
4807 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
4808 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
4809 /* 6M already there or needed so always add */
4810 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
4811
4812 /*
4813 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
4814 * Note, however:
4815 * - if no CCK rates are basic, it must be ERP since there must
4816 * be some basic rates at all, so they're OFDM => ERP PHY
4817 * (or we're in 5 GHz, and the cck bitmap will never be used)
4818 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
4819 * - if 5.5M is basic, 1M and 2M are mandatory
4820 * - if 2M is basic, 1M is mandatory
4821 * - if 1M is basic, that's the only valid ACK rate.
4822 * As a consequence, it's not as complicated as it sounds, just add
4823 * any lower rates to the ACK rate bitmap.
4824 */
4825 if (IWM_RATE_11M_INDEX < lowest_present_cck)
4826 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
4827 if (IWM_RATE_5M_INDEX < lowest_present_cck)
4828 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
4829 if (IWM_RATE_2M_INDEX < lowest_present_cck)
4830 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
4831 /* 1M already there or needed so always add */
4832 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
4833
4834 *cck_rates = cck;
4835 *ofdm_rates = ofdm;
4836 }
4837
4838 static void
4839 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
4840 struct iwm_mac_ctx_cmd *cmd, uint32_t action)
4841 {
4842 struct ieee80211com *ic = &sc->sc_ic;
4843 struct ieee80211_node *ni = ic->ic_bss;
4844 int cck_ack_rates, ofdm_ack_rates;
4845 int i;
4846
4847 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4848 in->in_color));
4849 cmd->action = htole32(action);
4850
4851 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
4852 cmd->tsf_id = htole32(in->in_tsfid);
4853
4854 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
4855 if (in->in_assoc) {
4856 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
4857 } else {
4858 memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
4859 }
4860 iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
4861 cmd->cck_rates = htole32(cck_ack_rates);
4862 cmd->ofdm_rates = htole32(ofdm_ack_rates);
4863
4864 cmd->cck_short_preamble
4865 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4866 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
4867 cmd->short_slot
4868 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
4869 ? IWM_MAC_FLG_SHORT_SLOT : 0);
4870
4871 for (i = 0; i < IWM_AC_NUM+1; i++) {
4872 int txf = i;
4873
4874 cmd->ac[txf].cw_min = htole16(0x0f);
4875 cmd->ac[txf].cw_max = htole16(0x3f);
4876 cmd->ac[txf].aifsn = 1;
4877 cmd->ac[txf].fifos_mask = (1 << txf);
4878 cmd->ac[txf].edca_txop = 0;
4879 }
4880
4881 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
4882 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
4883
4884 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
4885 }
4886
4887 static int
4888 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
4889 {
4890 int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
4891 sizeof(*cmd), cmd);
4892 if (ret)
4893 DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
4894 DEVNAME(sc), le32toh(cmd->action), ret));
4895 return ret;
4896 }
4897
4898 /*
4899 * Fill the specific data for mac context of type station or p2p client
4900 */
4901 static void
4902 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
4903 struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
4904 {
4905 struct ieee80211_node *ni = &in->in_ni;
4906 unsigned dtim_period, dtim_count;
4907
4908 dtim_period = ni->ni_dtim_period;
4909 dtim_count = ni->ni_dtim_count;
4910
4911 /* We need the dtim_period to set the MAC as associated */
4912 if (in->in_assoc && dtim_period && !force_assoc_off) {
4913 uint64_t tsf;
4914 uint32_t dtim_offs;
4915
4916 /*
4917 * The DTIM count counts down, so when it is N that means N
4918 * more beacon intervals happen until the DTIM TBTT. Therefore
4919 * add this to the current time. If that ends up being in the
4920 * future, the firmware will handle it.
4921 *
4922 * Also note that the system_timestamp (which we get here as
4923 * "sync_device_ts") and TSF timestamp aren't at exactly the
4924 * same offset in the frame -- the TSF is at the first symbol
4925 * of the TSF, the system timestamp is at signal acquisition
4926 * time. This means there's an offset between them of at most
4927 * a few hundred microseconds (24 * 8 bits + PLCP time gives
4928 * 384us in the longest case), this is currently not relevant
4929 * as the firmware wakes up around 2ms before the TBTT.
4930 */
4931 dtim_offs = dtim_count * ni->ni_intval;
4932 /* convert TU to usecs */
4933 dtim_offs *= 1024;
4934
4935 tsf = ni->ni_tstamp.tsf;
4936
4937 ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
4938 ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
4939
4940 DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
4941 (long long)le64toh(ctxt_sta->dtim_tsf),
4942 le32toh(ctxt_sta->dtim_time), dtim_offs));
4943
4944 ctxt_sta->is_assoc = htole32(1);
4945 } else {
4946 ctxt_sta->is_assoc = htole32(0);
4947 }
4948
4949 ctxt_sta->bi = htole32(ni->ni_intval);
4950 ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
4951 ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
4952 ctxt_sta->dtim_reciprocal =
4953 htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
4954
4955 /* 10 = CONN_MAX_LISTEN_INTERVAL */
4956 ctxt_sta->listen_interval = htole32(10);
4957 ctxt_sta->assoc_id = htole32(ni->ni_associd);
4958 }
4959
4960 static int
4961 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
4962 uint32_t action)
4963 {
4964 struct iwm_mac_ctx_cmd cmd;
4965
4966 memset(&cmd, 0, sizeof(cmd));
4967
4968 /* Fill the common data for all mac context types */
4969 iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
4970
4971 if (in->in_assoc)
4972 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
4973 else
4974 cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
4975
4976 /* Fill the data specific for station mode */
4977 iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
4978 &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
4979
4980 return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
4981 }
4982
4983 static int
4984 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4985 {
4986 return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
4987 }
4988
4989 static int
4990 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
4991 {
4992 int ret;
4993
4994 ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
4995 if (ret)
4996 return ret;
4997
4998 return 0;
4999 }
5000
5001 static int
5002 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
5003 {
5004 return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
5005 }
5006
5007 #if 0
5008 static int
5009 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
5010 {
5011 struct iwm_mac_ctx_cmd cmd;
5012 int ret;
5013
5014 if (!in->in_uploaded) {
5015 print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
5016 return EIO;
5017 }
5018
5019 memset(&cmd, 0, sizeof(cmd));
5020
5021 cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5022 in->in_color));
5023 cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
5024
5025 ret = iwm_mvm_send_cmd_pdu(sc,
5026 IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
5027 if (ret) {
5028 aprint_error_dev(sc->sc_dev,
5029 "Failed to remove MAC context: %d\n", ret);
5030 return ret;
5031 }
5032 in->in_uploaded = 0;
5033
5034 return 0;
5035 }
5036 #endif
5037
5038 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
5039
5040 static void
5041 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
5042 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5043 {
5044 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5045
5046 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5047 le32toh(mb->mac_id),
5048 le32toh(mb->consec_missed_beacons),
5049 le32toh(mb->consec_missed_beacons_since_last_rx),
5050 le32toh(mb->num_recvd_beacons),
5051 le32toh(mb->num_expected_beacons)));
5052
5053 /*
5054 * TODO: the threshold should be adjusted based on latency conditions,
5055 * and/or in case of a CS flow on one of the other AP vifs.
5056 */
5057 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5058 IWM_MVM_MISSED_BEACONS_THRESHOLD)
5059 ieee80211_beacon_miss(&sc->sc_ic);
5060 }
5061
5062 /*
5063 * END mvm/mac-ctxt.c
5064 */
5065
5066 /*
5067 * BEGIN mvm/quota.c
5068 */
5069
5070 static int
5071 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5072 {
5073 struct iwm_time_quota_cmd cmd;
5074 int i, idx, ret, num_active_macs, quota, quota_rem;
5075 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5076 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5077 uint16_t id;
5078
5079 memset(&cmd, 0, sizeof(cmd));
5080
5081 /* currently, PHY ID == binding ID */
5082 if (in) {
5083 id = in->in_phyctxt->id;
5084 KASSERT(id < IWM_MAX_BINDINGS);
5085 colors[id] = in->in_phyctxt->color;
5086
5087 if (1)
5088 n_ifs[id] = 1;
5089 }
5090
5091 /*
5092 * The FW's scheduling session consists of
5093 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
5094 * equally between all the bindings that require quota
5095 */
5096 num_active_macs = 0;
5097 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5098 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5099 num_active_macs += n_ifs[i];
5100 }
5101
5102 quota = 0;
5103 quota_rem = 0;
5104 if (num_active_macs) {
5105 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
5106 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
5107 }
5108
5109 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5110 if (colors[i] < 0)
5111 continue;
5112
5113 cmd.quotas[idx].id_and_color =
5114 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5115
5116 if (n_ifs[i] <= 0) {
5117 cmd.quotas[idx].quota = htole32(0);
5118 cmd.quotas[idx].max_duration = htole32(0);
5119 } else {
5120 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5121 cmd.quotas[idx].max_duration = htole32(0);
5122 }
5123 idx++;
5124 }
5125
5126 /* Give the remainder of the session to the first binding */
5127 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5128
5129 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
5130 sizeof(cmd), &cmd);
5131 if (ret)
5132 DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
5133 return ret;
5134 }
5135
5136 /*
5137 * END mvm/quota.c
5138 */
5139
5140 /*
5141 * aieee80211 routines
5142 */
5143
5144 /*
5145 * Change to AUTH state in 80211 state machine. Roughly matches what
5146 * Linux does in bss_info_changed().
5147 */
5148 static int
5149 iwm_auth(struct iwm_softc *sc)
5150 {
5151 struct ieee80211com *ic = &sc->sc_ic;
5152 struct iwm_node *in = (void *)ic->ic_bss;
5153 uint32_t duration;
5154 uint32_t min_duration;
5155 int error;
5156
5157 in->in_assoc = 0;
5158
5159 if ((error = iwm_allow_mcast(sc)) != 0)
5160 return error;
5161
5162 if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
5163 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5164 return error;
5165 }
5166
5167 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
5168 in->in_ni.ni_chan, 1, 1)) != 0) {
5169 DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
5170 return error;
5171 }
5172 in->in_phyctxt = &sc->sc_phyctxt[0];
5173
5174 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
5175 DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
5176 return error;
5177 }
5178
5179 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
5180 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5181 return error;
5182 }
5183
5184 /* a bit superfluous? */
5185 while (sc->sc_auth_prot)
5186 tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
5187 sc->sc_auth_prot = 1;
5188
5189 duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
5190 200 + in->in_ni.ni_intval);
5191 min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
5192 100 + in->in_ni.ni_intval);
5193 iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
5194
5195 while (sc->sc_auth_prot != 2) {
5196 /*
5197 * well, meh, but if the kernel is sleeping for half a
5198 * second, we have bigger problems
5199 */
5200 if (sc->sc_auth_prot == 0) {
5201 DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
5202 return ETIMEDOUT;
5203 } else if (sc->sc_auth_prot == -1) {
5204 DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
5205 sc->sc_auth_prot = 0;
5206 return EAUTH;
5207 }
5208 tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
5209 }
5210
5211 return 0;
5212 }
5213
5214 static int
5215 iwm_assoc(struct iwm_softc *sc)
5216 {
5217 struct ieee80211com *ic = &sc->sc_ic;
5218 struct iwm_node *in = (void *)ic->ic_bss;
5219 int error;
5220
5221 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
5222 DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
5223 return error;
5224 }
5225
5226 in->in_assoc = 1;
5227 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5228 DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
5229 return error;
5230 }
5231
5232 return 0;
5233 }
5234
5235 static int
5236 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
5237 {
5238 /*
5239 * Ok, so *technically* the proper set of calls for going
5240 * from RUN back to SCAN is:
5241 *
5242 * iwm_mvm_power_mac_disable(sc, in);
5243 * iwm_mvm_mac_ctxt_changed(sc, in);
5244 * iwm_mvm_rm_sta(sc, in);
5245 * iwm_mvm_update_quotas(sc, NULL);
5246 * iwm_mvm_mac_ctxt_changed(sc, in);
5247 * iwm_mvm_binding_remove_vif(sc, in);
5248 * iwm_mvm_mac_ctxt_remove(sc, in);
5249 *
5250 * However, that freezes the device not matter which permutations
5251 * and modifications are attempted. Obviously, this driver is missing
5252 * something since it works in the Linux driver, but figuring out what
5253 * is missing is a little more complicated. Now, since we're going
5254 * back to nothing anyway, we'll just do a complete device reset.
5255 * Up your's, device!
5256 */
5257 //iwm_mvm_flush_tx_path(sc, 0xf, 1);
5258 iwm_stop_device(sc);
5259 iwm_init_hw(sc);
5260 if (in)
5261 in->in_assoc = 0;
5262 return 0;
5263
5264 #if 0
5265 int error;
5266
5267 iwm_mvm_power_mac_disable(sc, in);
5268
5269 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5270 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
5271 error);
5272 return error;
5273 }
5274
5275 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
5276 aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
5277 return error;
5278 }
5279 error = iwm_mvm_rm_sta(sc, in);
5280 in->in_assoc = 0;
5281 iwm_mvm_update_quotas(sc, NULL);
5282 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5283 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
5284 error);
5285 return error;
5286 }
5287 iwm_mvm_binding_remove_vif(sc, in);
5288
5289 iwm_mvm_mac_ctxt_remove(sc, in);
5290
5291 return error;
5292 #endif
5293 }
5294
5295
5296 static struct ieee80211_node *
5297 iwm_node_alloc(struct ieee80211_node_table *nt)
5298 {
5299 return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
5300 }
5301
5302 static void
5303 iwm_calib_timeout(void *arg)
5304 {
5305 struct iwm_softc *sc = arg;
5306 struct ieee80211com *ic = &sc->sc_ic;
5307 int s;
5308
5309 s = splnet();
5310 if (ic->ic_fixed_rate == -1
5311 && ic->ic_opmode == IEEE80211_M_STA
5312 && ic->ic_bss) {
5313 struct iwm_node *in = (void *)ic->ic_bss;
5314 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5315 }
5316 splx(s);
5317
5318 callout_schedule(&sc->sc_calib_to, hz/2);
5319 }
5320
5321 static void
5322 iwm_setrates(struct iwm_node *in)
5323 {
5324 struct ieee80211_node *ni = &in->in_ni;
5325 struct ieee80211com *ic = ni->ni_ic;
5326 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5327 struct iwm_lq_cmd *lq = &in->in_lq;
5328 int nrates = ni->ni_rates.rs_nrates;
5329 int i, ridx, tab = 0;
5330 int txant = 0;
5331
5332 if (nrates > __arraycount(lq->rs_table)) {
5333 DPRINTF(("%s: node supports %d rates, driver handles only "
5334 "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
5335 return;
5336 }
5337
5338 /* first figure out which rates we should support */
5339 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
5340 for (i = 0; i < nrates; i++) {
5341 int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
5342
5343 /* Map 802.11 rate to HW rate index. */
5344 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5345 if (iwm_rates[ridx].rate == rate)
5346 break;
5347 if (ridx > IWM_RIDX_MAX)
5348 DPRINTF(("%s: WARNING: device rate for %d not found!\n",
5349 DEVNAME(sc), rate));
5350 else
5351 in->in_ridx[i] = ridx;
5352 }
5353
5354 /* then construct a lq_cmd based on those */
5355 memset(lq, 0, sizeof(*lq));
5356 lq->sta_id = IWM_STATION_ID;
5357
5358 /*
5359 * are these used? (we don't do SISO or MIMO)
5360 * need to set them to non-zero, though, or we get an error.
5361 */
5362 lq->single_stream_ant_msk = 1;
5363 lq->dual_stream_ant_msk = 1;
5364
5365 /*
5366 * Build the actual rate selection table.
5367 * The lowest bits are the rates. Additionally,
5368 * CCK needs bit 9 to be set. The rest of the bits
5369 * we add to the table select the tx antenna
5370 * Note that we add the rates in the highest rate first
5371 * (opposite of ni_rates).
5372 */
5373 for (i = 0; i < nrates; i++) {
5374 int nextant;
5375
5376 if (txant == 0)
5377 txant = IWM_FW_VALID_TX_ANT(sc);
5378 nextant = 1<<(ffs(txant)-1);
5379 txant &= ~nextant;
5380
5381 ridx = in->in_ridx[(nrates-1)-i];
5382 tab = iwm_rates[ridx].plcp;
5383 tab |= nextant << IWM_RATE_MCS_ANT_POS;
5384 if (IWM_RIDX_IS_CCK(ridx))
5385 tab |= IWM_RATE_MCS_CCK_MSK;
5386 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5387 lq->rs_table[i] = htole32(tab);
5388 }
5389 /* then fill the rest with the lowest possible rate */
5390 for (i = nrates; i < __arraycount(lq->rs_table); i++) {
5391 KASSERT(tab != 0);
5392 lq->rs_table[i] = htole32(tab);
5393 }
5394
5395 /* init amrr */
5396 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5397 ni->ni_txrate = nrates-1;
5398 }
5399
5400 static int
5401 iwm_media_change(struct ifnet *ifp)
5402 {
5403 struct iwm_softc *sc = ifp->if_softc;
5404 struct ieee80211com *ic = &sc->sc_ic;
5405 uint8_t rate, ridx;
5406 int error;
5407
5408 error = ieee80211_media_change(ifp);
5409 if (error != ENETRESET)
5410 return error;
5411
5412 if (ic->ic_fixed_rate != -1) {
5413 rate = ic->ic_sup_rates[ic->ic_curmode].
5414 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5415 /* Map 802.11 rate to HW rate index. */
5416 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5417 if (iwm_rates[ridx].rate == rate)
5418 break;
5419 sc->sc_fixed_ridx = ridx;
5420 }
5421
5422 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5423 (IFF_UP | IFF_RUNNING)) {
5424 iwm_stop(ifp, 0);
5425 error = iwm_init(ifp);
5426 }
5427 return error;
5428 }
5429
5430 static void
5431 iwm_newstate_cb(struct work *wk, void *v)
5432 {
5433 struct iwm_softc *sc = v;
5434 struct ieee80211com *ic = &sc->sc_ic;
5435 struct iwm_newstate_state *iwmns = (void *)wk;
5436 enum ieee80211_state nstate = iwmns->ns_nstate;
5437 int generation = iwmns->ns_generation;
5438 struct iwm_node *in;
5439 int arg = iwmns->ns_arg;
5440 int error;
5441
5442 kmem_free(iwmns, sizeof(*iwmns));
5443
5444 DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
5445 if (sc->sc_generation != generation) {
5446 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5447 if (nstate == IEEE80211_S_INIT) {
5448 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5449 sc->sc_newstate(ic, nstate, arg);
5450 }
5451 return;
5452 }
5453
5454 DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
5455
5456 /* disable beacon filtering if we're hopping out of RUN */
5457 if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
5458 iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
5459
5460 if (((in = (void *)ic->ic_bss) != NULL))
5461 in->in_assoc = 0;
5462 iwm_release(sc, NULL);
5463
5464 /*
5465 * It's impossible to directly go RUN->SCAN. If we iwm_release()
5466 * above then the card will be completely reinitialized,
5467 * so the driver must do everything necessary to bring the card
5468 * from INIT to SCAN.
5469 *
5470 * Additionally, upon receiving deauth frame from AP,
5471 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
5472 * state. This will also fail with this driver, so bring the FSM
5473 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
5474 */
5475 if (nstate == IEEE80211_S_SCAN ||
5476 nstate == IEEE80211_S_AUTH ||
5477 nstate == IEEE80211_S_ASSOC) {
5478 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5479 sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
5480 DPRINTF(("Going INIT->SCAN\n"));
5481 nstate = IEEE80211_S_SCAN;
5482 }
5483 }
5484
5485 switch (nstate) {
5486 case IEEE80211_S_INIT:
5487 sc->sc_scanband = 0;
5488 break;
5489
5490 case IEEE80211_S_SCAN:
5491 if (sc->sc_scanband)
5492 break;
5493
5494 if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
5495 ic->ic_des_esslen != 0,
5496 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5497 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5498 return;
5499 }
5500 ic->ic_state = nstate;
5501 return;
5502
5503 case IEEE80211_S_AUTH:
5504 if ((error = iwm_auth(sc)) != 0) {
5505 DPRINTF(("%s: could not move to auth state: %d\n",
5506 DEVNAME(sc), error));
5507 return;
5508 }
5509
5510 break;
5511
5512 case IEEE80211_S_ASSOC:
5513 if ((error = iwm_assoc(sc)) != 0) {
5514 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5515 error));
5516 return;
5517 }
5518 break;
5519
5520 case IEEE80211_S_RUN: {
5521 struct iwm_host_cmd cmd = {
5522 .id = IWM_LQ_CMD,
5523 .len = { sizeof(in->in_lq), },
5524 .flags = IWM_CMD_SYNC,
5525 };
5526
5527 in = (struct iwm_node *)ic->ic_bss;
5528 iwm_mvm_power_mac_update_mode(sc, in);
5529 iwm_mvm_enable_beacon_filter(sc, in);
5530 iwm_mvm_update_quotas(sc, in);
5531 iwm_setrates(in);
5532
5533 cmd.data[0] = &in->in_lq;
5534 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
5535 DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
5536 }
5537
5538 callout_schedule(&sc->sc_calib_to, hz/2);
5539
5540 break; }
5541
5542 default:
5543 DPRINTF(("%s: unsupported state %d\n", DEVNAME(sc), nstate));
5544 break;
5545 }
5546
5547 sc->sc_newstate(ic, nstate, arg);
5548 }
5549
5550 static int
5551 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5552 {
5553 struct iwm_newstate_state *iwmns;
5554 struct ifnet *ifp = IC2IFP(ic);
5555 struct iwm_softc *sc = ifp->if_softc;
5556
5557 callout_stop(&sc->sc_calib_to);
5558
5559 iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
5560 if (!iwmns) {
5561 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5562 return ENOMEM;
5563 }
5564
5565 iwmns->ns_nstate = nstate;
5566 iwmns->ns_arg = arg;
5567 iwmns->ns_generation = sc->sc_generation;
5568
5569 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5570
5571 return 0;
5572 }
5573
5574 static void
5575 iwm_endscan_cb(struct work *work __unused, void *arg)
5576 {
5577 struct iwm_softc *sc = arg;
5578 struct ieee80211com *ic = &sc->sc_ic;
5579 int done;
5580
5581 DPRINTF(("scan ended\n"));
5582
5583 if (sc->sc_scanband == IEEE80211_CHAN_2GHZ) {
5584 #ifndef IWM_NO_5GHZ
5585 int error;
5586 done = 0;
5587 if ((error = iwm_mvm_scan_request(sc,
5588 IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
5589 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5590 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5591 done = 1;
5592 }
5593 #else
5594 done = 1;
5595 #endif
5596 } else {
5597 done = 1;
5598 }
5599
5600 if (done) {
5601 if (!sc->sc_scanband) {
5602 ieee80211_cancel_scan(ic);
5603 } else {
5604 ieee80211_end_scan(ic);
5605 }
5606 sc->sc_scanband = 0;
5607 }
5608 }
5609
5610 static int
5611 iwm_init_hw(struct iwm_softc *sc)
5612 {
5613 struct ieee80211com *ic = &sc->sc_ic;
5614 int error, i, qid;
5615
5616 if ((error = iwm_preinit(sc)) != 0)
5617 return error;
5618
5619 if ((error = iwm_start_hw(sc)) != 0)
5620 return error;
5621
5622 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
5623 return error;
5624 }
5625
5626 /*
5627 * should stop and start HW since that INIT
5628 * image just loaded
5629 */
5630 iwm_stop_device(sc);
5631 if ((error = iwm_start_hw(sc)) != 0) {
5632 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
5633 return error;
5634 }
5635
5636 /* omstart, this time with the regular firmware */
5637 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
5638 if (error) {
5639 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
5640 goto error;
5641 }
5642
5643 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
5644 goto error;
5645
5646 /* Send phy db control command and then phy db calibration*/
5647 if ((error = iwm_send_phy_db_data(sc)) != 0)
5648 goto error;
5649
5650 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
5651 goto error;
5652
5653 /* Add auxiliary station for scanning */
5654 if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
5655 goto error;
5656
5657 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5658 /*
5659 * The channel used here isn't relevant as it's
5660 * going to be overwritten in the other flows.
5661 * For now use the first channel we have.
5662 */
5663 if ((error = iwm_mvm_phy_ctxt_add(sc,
5664 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5665 goto error;
5666 }
5667
5668 error = iwm_mvm_power_update_device(sc);
5669 if (error)
5670 goto error;
5671
5672 /* Mark TX rings as active. */
5673 for (qid = 0; qid < 4; qid++) {
5674 iwm_enable_txq(sc, qid, qid);
5675 }
5676
5677 return 0;
5678
5679 error:
5680 iwm_stop_device(sc);
5681 return error;
5682 }
5683
5684 /* Allow multicast from our BSSID. */
5685 static int
5686 iwm_allow_mcast(struct iwm_softc *sc)
5687 {
5688 struct ieee80211com *ic = &sc->sc_ic;
5689 struct ieee80211_node *ni = ic->ic_bss;
5690 struct iwm_mcast_filter_cmd *cmd;
5691 size_t size;
5692 int error;
5693
5694 size = roundup(sizeof(*cmd), 4);
5695 cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
5696 if (cmd == NULL)
5697 return ENOMEM;
5698 cmd->filter_own = 1;
5699 cmd->port_id = 0;
5700 cmd->count = 0;
5701 cmd->pass_all = 1;
5702 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
5703
5704 error = iwm_mvm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD,
5705 IWM_CMD_SYNC, size, cmd);
5706 kmem_intr_free(cmd, size);
5707 return error;
5708 }
5709
5710 /*
5711 * ifnet interfaces
5712 */
5713
5714 static int
5715 iwm_init(struct ifnet *ifp)
5716 {
5717 struct iwm_softc *sc = ifp->if_softc;
5718 int error;
5719
5720 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5721 return 0;
5722 }
5723 sc->sc_generation++;
5724 sc->sc_flags &= ~IWM_FLAG_STOPPED;
5725
5726 if ((error = iwm_init_hw(sc)) != 0) {
5727 iwm_stop(ifp, 1);
5728 return error;
5729 }
5730
5731 /*
5732 * Ok, firmware loaded and we are jogging
5733 */
5734
5735 ifp->if_flags &= ~IFF_OACTIVE;
5736 ifp->if_flags |= IFF_RUNNING;
5737
5738 ieee80211_begin_scan(&sc->sc_ic, 0);
5739 sc->sc_flags |= IWM_FLAG_HW_INITED;
5740
5741 return 0;
5742 }
5743
5744 /*
5745 * Dequeue packets from sendq and call send.
5746 * mostly from iwn
5747 */
5748 static void
5749 iwm_start(struct ifnet *ifp)
5750 {
5751 struct iwm_softc *sc = ifp->if_softc;
5752 struct ieee80211com *ic = &sc->sc_ic;
5753 struct ieee80211_node *ni;
5754 struct ether_header *eh;
5755 struct mbuf *m;
5756 int ac;
5757
5758 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
5759 return;
5760
5761 for (;;) {
5762 /* why isn't this done per-queue? */
5763 if (sc->qfullmsk != 0) {
5764 ifp->if_flags |= IFF_OACTIVE;
5765 break;
5766 }
5767
5768 /* need to send management frames even if we're not RUNning */
5769 IF_DEQUEUE(&ic->ic_mgtq, m);
5770 if (m) {
5771 ni = (void *)m->m_pkthdr.rcvif;
5772 ac = 0;
5773 goto sendit;
5774 }
5775 if (ic->ic_state != IEEE80211_S_RUN) {
5776 break;
5777 }
5778
5779 IFQ_DEQUEUE(&ifp->if_snd, m);
5780 if (!m)
5781 break;
5782 if (m->m_len < sizeof (*eh) &&
5783 (m = m_pullup(m, sizeof (*eh))) == NULL) {
5784 ifp->if_oerrors++;
5785 continue;
5786 }
5787 if (ifp->if_bpf != NULL)
5788 bpf_mtap(ifp, m);
5789
5790 eh = mtod(m, struct ether_header *);
5791 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
5792 if (ni == NULL) {
5793 m_freem(m);
5794 ifp->if_oerrors++;
5795 continue;
5796 }
5797 /* classify mbuf so we can find which tx ring to use */
5798 if (ieee80211_classify(ic, m, ni) != 0) {
5799 m_freem(m);
5800 ieee80211_free_node(ni);
5801 ifp->if_oerrors++;
5802 continue;
5803 }
5804
5805 /* No QoS encapsulation for EAPOL frames. */
5806 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
5807 M_WME_GETAC(m) : WME_AC_BE;
5808
5809 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
5810 ieee80211_free_node(ni);
5811 ifp->if_oerrors++;
5812 continue;
5813 }
5814
5815 sendit:
5816 if (ic->ic_rawbpf != NULL)
5817 bpf_mtap3(ic->ic_rawbpf, m);
5818 if (iwm_tx(sc, m, ni, ac) != 0) {
5819 ieee80211_free_node(ni);
5820 ifp->if_oerrors++;
5821 continue;
5822 }
5823
5824 if (ifp->if_flags & IFF_UP) {
5825 sc->sc_tx_timer = 15;
5826 ifp->if_timer = 1;
5827 }
5828 }
5829
5830 return;
5831 }
5832
5833 static void
5834 iwm_stop(struct ifnet *ifp, int disable)
5835 {
5836 struct iwm_softc *sc = ifp->if_softc;
5837 struct ieee80211com *ic = &sc->sc_ic;
5838
5839 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5840 sc->sc_flags |= IWM_FLAG_STOPPED;
5841 sc->sc_generation++;
5842 sc->sc_scanband = 0;
5843 sc->sc_auth_prot = 0;
5844 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5845
5846 if (ic->ic_state != IEEE80211_S_INIT)
5847 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5848
5849 ifp->if_timer = sc->sc_tx_timer = 0;
5850 iwm_stop_device(sc);
5851 }
5852
5853 static void
5854 iwm_watchdog(struct ifnet *ifp)
5855 {
5856 struct iwm_softc *sc = ifp->if_softc;
5857
5858 ifp->if_timer = 0;
5859 if (sc->sc_tx_timer > 0) {
5860 if (--sc->sc_tx_timer == 0) {
5861 aprint_error_dev(sc->sc_dev, "device timeout\n");
5862 #ifdef IWM_DEBUG
5863 iwm_nic_error(sc);
5864 #endif
5865 ifp->if_flags &= ~IFF_UP;
5866 iwm_stop(ifp, 1);
5867 ifp->if_oerrors++;
5868 return;
5869 }
5870 ifp->if_timer = 1;
5871 }
5872
5873 ieee80211_watchdog(&sc->sc_ic);
5874 }
5875
5876 static int
5877 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
5878 {
5879 struct iwm_softc *sc = ifp->if_softc;
5880 struct ieee80211com *ic = &sc->sc_ic;
5881 const struct sockaddr *sa;
5882 int s, error = 0;
5883
5884 s = splnet();
5885
5886 switch (cmd) {
5887 case SIOCSIFADDR:
5888 ifp->if_flags |= IFF_UP;
5889 /* FALLTHROUGH */
5890 case SIOCSIFFLAGS:
5891 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5892 break;
5893 if (ifp->if_flags & IFF_UP) {
5894 if (!(ifp->if_flags & IFF_RUNNING)) {
5895 if ((error = iwm_init(ifp)) != 0)
5896 ifp->if_flags &= ~IFF_UP;
5897 }
5898 } else {
5899 if (ifp->if_flags & IFF_RUNNING)
5900 iwm_stop(ifp, 1);
5901 }
5902 break;
5903
5904 case SIOCADDMULTI:
5905 case SIOCDELMULTI:
5906 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
5907 error = (cmd == SIOCADDMULTI) ?
5908 ether_addmulti(sa, &sc->sc_ec) :
5909 ether_delmulti(sa, &sc->sc_ec);
5910
5911 if (error == ENETRESET)
5912 error = 0;
5913 break;
5914
5915 default:
5916 error = ieee80211_ioctl(ic, cmd, data);
5917 }
5918
5919 if (error == ENETRESET) {
5920 error = 0;
5921 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5922 (IFF_UP | IFF_RUNNING)) {
5923 iwm_stop(ifp, 0);
5924 error = iwm_init(ifp);
5925 }
5926 }
5927
5928 splx(s);
5929 return error;
5930 }
5931
5932 /*
5933 * The interrupt side of things
5934 */
5935
5936 /*
5937 * error dumping routines are from iwlwifi/mvm/utils.c
5938 */
5939
5940 /*
5941 * Note: This structure is read from the device with IO accesses,
5942 * and the reading already does the endian conversion. As it is
5943 * read with uint32_t-sized accesses, any members with a different size
5944 * need to be ordered correctly though!
5945 */
5946 struct iwm_error_event_table {
5947 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5948 uint32_t error_id; /* type of error */
5949 uint32_t pc; /* program counter */
5950 uint32_t blink1; /* branch link */
5951 uint32_t blink2; /* branch link */
5952 uint32_t ilink1; /* interrupt link */
5953 uint32_t ilink2; /* interrupt link */
5954 uint32_t data1; /* error-specific data */
5955 uint32_t data2; /* error-specific data */
5956 uint32_t data3; /* error-specific data */
5957 uint32_t bcon_time; /* beacon timer */
5958 uint32_t tsf_low; /* network timestamp function timer */
5959 uint32_t tsf_hi; /* network timestamp function timer */
5960 uint32_t gp1; /* GP1 timer register */
5961 uint32_t gp2; /* GP2 timer register */
5962 uint32_t gp3; /* GP3 timer register */
5963 uint32_t ucode_ver; /* uCode version */
5964 uint32_t hw_ver; /* HW Silicon version */
5965 uint32_t brd_ver; /* HW board version */
5966 uint32_t log_pc; /* log program counter */
5967 uint32_t frame_ptr; /* frame pointer */
5968 uint32_t stack_ptr; /* stack pointer */
5969 uint32_t hcmd; /* last host command header */
5970 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5971 * rxtx_flag */
5972 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5973 * host_flag */
5974 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5975 * enc_flag */
5976 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5977 * time_flag */
5978 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5979 * wico interrupt */
5980 uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
5981 uint32_t wait_event; /* wait event() caller address */
5982 uint32_t l2p_control; /* L2pControlField */
5983 uint32_t l2p_duration; /* L2pDurationField */
5984 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5985 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5986 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5987 * (LMPM_PMG_SEL) */
5988 uint32_t u_timestamp; /* indicate when the date and time of the
5989 * compilation */
5990 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5991 } __packed;
5992
5993 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5994 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5995
5996 #ifdef IWM_DEBUG
5997 static const struct {
5998 const char *name;
5999 uint8_t num;
6000 } advanced_lookup[] = {
6001 { "NMI_INTERRUPT_WDG", 0x34 },
6002 { "SYSASSERT", 0x35 },
6003 { "UCODE_VERSION_MISMATCH", 0x37 },
6004 { "BAD_COMMAND", 0x38 },
6005 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
6006 { "FATAL_ERROR", 0x3D },
6007 { "NMI_TRM_HW_ERR", 0x46 },
6008 { "NMI_INTERRUPT_TRM", 0x4C },
6009 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
6010 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
6011 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
6012 { "NMI_INTERRUPT_HOST", 0x66 },
6013 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
6014 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
6015 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
6016 { "ADVANCED_SYSASSERT", 0 },
6017 };
6018
6019 static const char *
6020 iwm_desc_lookup(uint32_t num)
6021 {
6022 int i;
6023
6024 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
6025 if (advanced_lookup[i].num == num)
6026 return advanced_lookup[i].name;
6027
6028 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
6029 return advanced_lookup[i].name;
6030 }
6031
6032 /*
6033 * Support for dumping the error log seemed like a good idea ...
6034 * but it's mostly hex junk and the only sensible thing is the
6035 * hw/ucode revision (which we know anyway). Since it's here,
6036 * I'll just leave it in, just in case e.g. the Intel guys want to
6037 * help us decipher some "ADVANCED_SYSASSERT" later.
6038 */
6039 static void
6040 iwm_nic_error(struct iwm_softc *sc)
6041 {
6042 struct iwm_error_event_table table;
6043 uint32_t base;
6044
6045 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6046 base = sc->sc_uc.uc_error_event_table;
6047 if (base < 0x800000 || base >= 0x80C000) {
6048 aprint_error_dev(sc->sc_dev,
6049 "Not valid error log pointer 0x%08x\n", base);
6050 return;
6051 }
6052
6053 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
6054 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6055 return;
6056 }
6057
6058 if (!table.valid) {
6059 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6060 return;
6061 }
6062
6063 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6064 aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
6065 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6066 sc->sc_flags, table.valid);
6067 }
6068
6069 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
6070 iwm_desc_lookup(table.error_id));
6071 aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
6072 aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
6073 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
6074 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
6075 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
6076 aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
6077 aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
6078 aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
6079 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
6080 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
6081 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
6082 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
6083 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
6084 aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
6085 aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
6086 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
6087 aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
6088 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
6089 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
6090 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
6091 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
6092 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
6093 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
6094 aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
6095 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
6096 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
6097 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
6098 table.l2p_duration);
6099 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
6100 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6101 table.l2p_addr_match);
6102 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
6103 table.lmpm_pmg_sel);
6104 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
6105 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
6106 table.flow_handler);
6107 }
6108 #endif
6109
6110 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6111 do { \
6112 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6113 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6114 _var_ = (void *)((_pkt_)+1); \
6115 } while (/*CONSTCOND*/0)
6116
6117 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6118 do { \
6119 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6120 sizeof(len), BUS_DMASYNC_POSTREAD); \
6121 _ptr_ = (void *)((_pkt_)+1); \
6122 } while (/*CONSTCOND*/0)
6123
6124 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6125
6126 /*
6127 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
6128 * Basic structure from if_iwn
6129 */
6130 static void
6131 iwm_notif_intr(struct iwm_softc *sc)
6132 {
6133 uint16_t hw;
6134
6135 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6136 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6137
6138 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6139 while (sc->rxq.cur != hw) {
6140 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6141 struct iwm_rx_packet *pkt, tmppkt;
6142 struct iwm_cmd_response *cresp;
6143 int qid, idx;
6144
6145 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6146 BUS_DMASYNC_POSTREAD);
6147 pkt = mtod(data->m, struct iwm_rx_packet *);
6148
6149 qid = pkt->hdr.qid & ~0x80;
6150 idx = pkt->hdr.idx;
6151
6152 DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
6153 pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
6154 pkt->hdr.code, sc->rxq.cur, hw));
6155
6156 /*
6157 * randomly get these from the firmware, no idea why.
6158 * they at least seem harmless, so just ignore them for now
6159 */
6160 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6161 || pkt->len_n_flags == htole32(0x55550000))) {
6162 ADVANCE_RXQ(sc);
6163 continue;
6164 }
6165
6166 switch (pkt->hdr.code) {
6167 case IWM_REPLY_RX_PHY_CMD:
6168 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
6169 break;
6170
6171 case IWM_REPLY_RX_MPDU_CMD:
6172 tmppkt = *pkt; // XXX m is freed by ieee80211_input()
6173 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
6174 pkt = &tmppkt;
6175 break;
6176
6177 case IWM_TX_CMD:
6178 iwm_mvm_rx_tx_cmd(sc, pkt, data);
6179 break;
6180
6181 case IWM_MISSED_BEACONS_NOTIFICATION:
6182 iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
6183 break;
6184
6185 case IWM_MVM_ALIVE: {
6186 struct iwm_mvm_alive_resp *resp;
6187 SYNC_RESP_STRUCT(resp, pkt);
6188
6189 sc->sc_uc.uc_error_event_table
6190 = le32toh(resp->error_event_table_ptr);
6191 sc->sc_uc.uc_log_event_table
6192 = le32toh(resp->log_event_table_ptr);
6193 sc->sched_base = le32toh(resp->scd_base_ptr);
6194 sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
6195
6196 sc->sc_uc.uc_intr = 1;
6197 wakeup(&sc->sc_uc);
6198 break; }
6199
6200 case IWM_CALIB_RES_NOTIF_PHY_DB: {
6201 struct iwm_calib_res_notif_phy_db *phy_db_notif;
6202 SYNC_RESP_STRUCT(phy_db_notif, pkt);
6203
6204 uint16_t size = le16toh(phy_db_notif->length);
6205 bus_dmamap_sync(sc->sc_dmat, data->map,
6206 sizeof(*pkt) + sizeof(*phy_db_notif),
6207 size, BUS_DMASYNC_POSTREAD);
6208 iwm_phy_db_set_section(sc, phy_db_notif, size);
6209
6210 break; }
6211
6212 case IWM_STATISTICS_NOTIFICATION: {
6213 struct iwm_notif_statistics *stats;
6214 SYNC_RESP_STRUCT(stats, pkt);
6215 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6216 sc->sc_noise = iwm_get_noise(&stats->rx.general);
6217 break; }
6218
6219 case IWM_NVM_ACCESS_CMD:
6220 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6221 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6222 sizeof(sc->sc_cmd_resp),
6223 BUS_DMASYNC_POSTREAD);
6224 memcpy(sc->sc_cmd_resp,
6225 pkt, sizeof(sc->sc_cmd_resp));
6226 }
6227 break;
6228
6229 case IWM_PHY_CONFIGURATION_CMD:
6230 case IWM_TX_ANT_CONFIGURATION_CMD:
6231 case IWM_ADD_STA:
6232 case IWM_MAC_CONTEXT_CMD:
6233 case IWM_REPLY_SF_CFG_CMD:
6234 case IWM_POWER_TABLE_CMD:
6235 case IWM_PHY_CONTEXT_CMD:
6236 case IWM_BINDING_CONTEXT_CMD:
6237 case IWM_TIME_EVENT_CMD:
6238 case IWM_SCAN_REQUEST_CMD:
6239 case IWM_REPLY_BEACON_FILTERING_CMD:
6240 case IWM_MAC_PM_POWER_TABLE:
6241 case IWM_TIME_QUOTA_CMD:
6242 case IWM_REMOVE_STA:
6243 case IWM_TXPATH_FLUSH:
6244 case IWM_LQ_CMD:
6245 SYNC_RESP_STRUCT(cresp, pkt);
6246 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6247 memcpy(sc->sc_cmd_resp,
6248 pkt, sizeof(*pkt)+sizeof(*cresp));
6249 }
6250 break;
6251
6252 /* ignore */
6253 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
6254 break;
6255
6256 case IWM_INIT_COMPLETE_NOTIF:
6257 sc->sc_init_complete = 1;
6258 wakeup(&sc->sc_init_complete);
6259 break;
6260
6261 case IWM_SCAN_COMPLETE_NOTIFICATION: {
6262 struct iwm_scan_complete_notif *notif;
6263 SYNC_RESP_STRUCT(notif, pkt);
6264
6265 workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6266 break; }
6267
6268 case IWM_REPLY_ERROR: {
6269 struct iwm_error_resp *resp;
6270 SYNC_RESP_STRUCT(resp, pkt);
6271
6272 aprint_error_dev(sc->sc_dev,
6273 "firmware error 0x%x, cmd 0x%x\n",
6274 le32toh(resp->error_type), resp->cmd_id);
6275 break; }
6276
6277 case IWM_TIME_EVENT_NOTIFICATION: {
6278 struct iwm_time_event_notif *notif;
6279 SYNC_RESP_STRUCT(notif, pkt);
6280
6281 if (notif->status) {
6282 if (le32toh(notif->action) &
6283 IWM_TE_V2_NOTIF_HOST_EVENT_START)
6284 sc->sc_auth_prot = 2;
6285 else
6286 sc->sc_auth_prot = 0;
6287 } else {
6288 sc->sc_auth_prot = -1;
6289 }
6290 wakeup(&sc->sc_auth_prot);
6291 break; }
6292
6293 case IWM_MCAST_FILTER_CMD:
6294 break;
6295
6296 default:
6297 aprint_error_dev(sc->sc_dev,
6298 "frame %d/%d %x UNHANDLED (this should "
6299 "not happen)\n", qid, idx, pkt->len_n_flags);
6300 break;
6301 }
6302
6303 /*
6304 * Why test bit 0x80? The Linux driver:
6305 *
6306 * There is one exception: uCode sets bit 15 when it
6307 * originates the response/notification, i.e. when the
6308 * response/notification is not a direct response to a
6309 * command sent by the driver. For example, uCode issues
6310 * IWM_REPLY_RX when it sends a received frame to the driver;
6311 * it is not a direct response to any driver command.
6312 *
6313 * Ok, so since when is 7 == 15? Well, the Linux driver
6314 * uses a slightly different format for pkt->hdr, and "qid"
6315 * is actually the upper byte of a two-byte field.
6316 */
6317 if (!(pkt->hdr.qid & (1 << 7))) {
6318 iwm_cmd_done(sc, pkt);
6319 }
6320
6321 ADVANCE_RXQ(sc);
6322 }
6323
6324 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
6325 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6326
6327 /*
6328 * Tell the firmware what we have processed.
6329 * Seems like the hardware gets upset unless we align
6330 * the write by 8??
6331 */
6332 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
6333 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
6334 }
6335
6336 static int
6337 iwm_intr(void *arg)
6338 {
6339 struct iwm_softc *sc = arg;
6340 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6341 int handled = 0;
6342 int r1, r2, rv = 0;
6343 int isperiodic = 0;
6344
6345 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
6346
6347 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
6348 uint32_t *ict = sc->ict_dma.vaddr;
6349 int tmp;
6350
6351 tmp = htole32(ict[sc->ict_cur]);
6352 if (!tmp)
6353 goto out_ena;
6354
6355 /*
6356 * ok, there was something. keep plowing until we have all.
6357 */
6358 r1 = r2 = 0;
6359 while (tmp) {
6360 r1 |= tmp;
6361 ict[sc->ict_cur] = 0;
6362 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
6363 tmp = htole32(ict[sc->ict_cur]);
6364 }
6365
6366 /* this is where the fun begins. don't ask */
6367 if (r1 == 0xffffffff)
6368 r1 = 0;
6369
6370 /* i am not expected to understand this */
6371 if (r1 & 0xc0000)
6372 r1 |= 0x8000;
6373 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
6374 } else {
6375 r1 = IWM_READ(sc, IWM_CSR_INT);
6376 /* "hardware gone" (where, fishing?) */
6377 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
6378 goto out;
6379 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
6380 }
6381 if (r1 == 0 && r2 == 0) {
6382 goto out_ena;
6383 }
6384
6385 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
6386
6387 /* ignored */
6388 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
6389
6390 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
6391 #ifdef IWM_DEBUG
6392 int i;
6393
6394 iwm_nic_error(sc);
6395
6396 /* Dump driver status (TX and RX rings) while we're here. */
6397 DPRINTF(("driver status:\n"));
6398 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
6399 struct iwm_tx_ring *ring = &sc->txq[i];
6400 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
6401 "queued=%-3d\n",
6402 i, ring->qid, ring->cur, ring->queued));
6403 }
6404 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
6405 DPRINTF((" 802.11 state %d\n", sc->sc_ic.ic_state));
6406 #endif
6407
6408 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
6409 ifp->if_flags &= ~IFF_UP;
6410 iwm_stop(ifp, 1);
6411 rv = 1;
6412 goto out;
6413
6414 }
6415
6416 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
6417 handled |= IWM_CSR_INT_BIT_HW_ERR;
6418 aprint_error_dev(sc->sc_dev,
6419 "hardware error, stopping device\n");
6420 ifp->if_flags &= ~IFF_UP;
6421 iwm_stop(ifp, 1);
6422 rv = 1;
6423 goto out;
6424 }
6425
6426 /* firmware chunk loaded */
6427 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
6428 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
6429 handled |= IWM_CSR_INT_BIT_FH_TX;
6430
6431 sc->sc_fw_chunk_done = 1;
6432 wakeup(&sc->sc_fw);
6433 }
6434
6435 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
6436 handled |= IWM_CSR_INT_BIT_RF_KILL;
6437 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
6438 DPRINTF(("%s: rfkill switch, disabling interface\n",
6439 DEVNAME(sc)));
6440 ifp->if_flags &= ~IFF_UP;
6441 iwm_stop(ifp, 1);
6442 }
6443 }
6444
6445 /*
6446 * The Linux driver uses periodic interrupts to avoid races.
6447 * We cargo-cult like it's going out of fashion.
6448 */
6449 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
6450 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
6451 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
6452 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
6453 IWM_WRITE_1(sc,
6454 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
6455 isperiodic = 1;
6456 }
6457
6458 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
6459 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
6460 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
6461
6462 iwm_notif_intr(sc);
6463
6464 /* enable periodic interrupt, see above */
6465 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
6466 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
6467 IWM_CSR_INT_PERIODIC_ENA);
6468 }
6469
6470 if (__predict_false(r1 & ~handled))
6471 DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
6472 rv = 1;
6473
6474 out_ena:
6475 iwm_restore_interrupts(sc);
6476 out:
6477 return rv;
6478 }
6479
6480 /*
6481 * Autoconf glue-sniffing
6482 */
6483
6484 static const pci_product_id_t iwm_devices[] = {
6485 PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
6486 PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
6487 PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
6488 PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
6489 PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
6490 PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
6491 };
6492
6493 static int
6494 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
6495 {
6496 struct pci_attach_args *pa = aux;
6497
6498 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
6499 return 0;
6500
6501 for (size_t i = 0; i < __arraycount(iwm_devices); i++)
6502 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
6503 return 1;
6504
6505 return 0;
6506 }
6507
6508 static int
6509 iwm_preinit(struct iwm_softc *sc)
6510 {
6511 int error;
6512
6513 if ((error = iwm_prepare_card_hw(sc)) != 0) {
6514 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6515 return error;
6516 }
6517
6518 if (sc->sc_flags & IWM_FLAG_ATTACHED)
6519 return 0;
6520
6521 if ((error = iwm_start_hw(sc)) != 0) {
6522 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6523 return error;
6524 }
6525
6526 error = iwm_run_init_mvm_ucode(sc, 1);
6527 iwm_stop_device(sc);
6528 return error;
6529 }
6530
6531 static void
6532 iwm_attach_hook(device_t dev)
6533 {
6534 struct iwm_softc *sc = device_private(dev);
6535 struct ieee80211com *ic = &sc->sc_ic;
6536 struct ifnet *ifp = &sc->sc_ec.ec_if;
6537
6538 KASSERT(!cold);
6539
6540 if (iwm_preinit(sc) != 0)
6541 return;
6542
6543 sc->sc_flags |= IWM_FLAG_ATTACHED;
6544
6545 aprint_normal_dev(sc->sc_dev,
6546 "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
6547 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6548 IWM_UCODE_MAJOR(sc->sc_fwver),
6549 IWM_UCODE_MINOR(sc->sc_fwver),
6550 IWM_UCODE_API(sc->sc_fwver),
6551 ether_sprintf(sc->sc_nvm.hw_addr));
6552
6553 ic->ic_ifp = ifp;
6554 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6555 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6556 ic->ic_state = IEEE80211_S_INIT;
6557
6558 /* Set device capabilities. */
6559 ic->ic_caps =
6560 IEEE80211_C_WEP | /* WEP */
6561 IEEE80211_C_WPA | /* 802.11i */
6562 IEEE80211_C_SHSLOT | /* short slot time supported */
6563 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
6564
6565 #ifndef IWM_NO_5GHZ
6566 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
6567 #endif
6568 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
6569 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
6570
6571 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
6572 sc->sc_phyctxt[i].id = i;
6573 }
6574
6575 sc->sc_amrr.amrr_min_success_threshold = 1;
6576 sc->sc_amrr.amrr_max_success_threshold = 15;
6577
6578 /* IBSS channel undefined for now. */
6579 ic->ic_ibss_chan = &ic->ic_channels[1];
6580
6581 #if 0
6582 /* Max RSSI */
6583 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6584 #endif
6585
6586 ifp->if_softc = sc;
6587 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
6588 ifp->if_init = iwm_init;
6589 ifp->if_stop = iwm_stop;
6590 ifp->if_ioctl = iwm_ioctl;
6591 ifp->if_start = iwm_start;
6592 ifp->if_watchdog = iwm_watchdog;
6593 IFQ_SET_READY(&ifp->if_snd);
6594 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
6595
6596 if_initialize(ifp);
6597 ieee80211_ifattach(ic);
6598 if_register(ifp);
6599
6600 ic->ic_node_alloc = iwm_node_alloc;
6601
6602 /* Override 802.11 state transition machine. */
6603 sc->sc_newstate = ic->ic_newstate;
6604 ic->ic_newstate = iwm_newstate;
6605 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
6606 ieee80211_announce(ic);
6607
6608 iwm_radiotap_attach(sc);
6609 callout_init(&sc->sc_calib_to, 0);
6610 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
6611
6612 //task_set(&sc->init_task, iwm_init_task, sc);
6613 }
6614
6615 static void
6616 iwm_attach(device_t parent, device_t self, void *aux)
6617 {
6618 struct iwm_softc *sc = device_private(self);
6619 struct pci_attach_args *pa = aux;
6620 pci_intr_handle_t ih;
6621 pcireg_t reg, memtype;
6622 const char *intrstr;
6623 int error;
6624 int txq_i;
6625
6626 sc->sc_dev = self;
6627 sc->sc_pct = pa->pa_pc;
6628 sc->sc_pcitag = pa->pa_tag;
6629 sc->sc_dmat = pa->pa_dmat;
6630 sc->sc_pciid = pa->pa_id;
6631
6632 pci_aprint_devinfo(pa, NULL);
6633
6634 /*
6635 * Get the offset of the PCI Express Capability Structure in PCI
6636 * Configuration Space.
6637 */
6638 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
6639 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
6640 if (error == 0) {
6641 aprint_error_dev(self,
6642 "PCIe capability structure not found!\n");
6643 return;
6644 }
6645
6646 /* Clear device-specific "PCI retry timeout" register (41h). */
6647 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6648 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6649
6650 /* Enable bus-mastering and hardware bug workaround. */
6651 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
6652 reg |= PCI_COMMAND_MASTER_ENABLE;
6653 /* if !MSI */
6654 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
6655 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
6656 }
6657 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
6658
6659 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
6660 error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
6661 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
6662 if (error != 0) {
6663 aprint_error_dev(self, "can't map mem space\n");
6664 return;
6665 }
6666
6667 /* Install interrupt handler. */
6668 if (pci_intr_map(pa, &ih)) {
6669 aprint_error_dev(self, "can't map interrupt\n");
6670 return;
6671 }
6672
6673 char intrbuf[PCI_INTRSTR_LEN];
6674 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
6675 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
6676 if (sc->sc_ih == NULL) {
6677 aprint_error_dev(self, "can't establish interrupt");
6678 if (intrstr != NULL)
6679 aprint_error(" at %s", intrstr);
6680 aprint_error("\n");
6681 return;
6682 }
6683 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
6684
6685 sc->sc_wantresp = -1;
6686
6687 switch (PCI_PRODUCT(sc->sc_pciid)) {
6688 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
6689 case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
6690 sc->sc_fwname = "iwlwifi-7260-9.ucode";
6691 break;
6692 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
6693 case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
6694 sc->sc_fwname = "iwlwifi-3160-9.ucode";
6695 break;
6696 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
6697 case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
6698 sc->sc_fwname = "iwlwifi-7265-9.ucode";
6699 break;
6700 default:
6701 aprint_error_dev(self, "unknown product %#x",
6702 PCI_PRODUCT(sc->sc_pciid));
6703 return;
6704 }
6705 DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
6706 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6707
6708 /*
6709 * We now start fiddling with the hardware
6710 */
6711
6712 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6713 if (iwm_prepare_card_hw(sc) != 0) {
6714 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6715 return;
6716 }
6717
6718 /* Allocate DMA memory for firmware transfers. */
6719 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6720 aprint_error_dev(sc->sc_dev,
6721 "could not allocate memory for firmware\n");
6722 return;
6723 }
6724
6725 /* Allocate "Keep Warm" page. */
6726 if ((error = iwm_alloc_kw(sc)) != 0) {
6727 aprint_error_dev(sc->sc_dev,
6728 "could not allocate keep warm page\n");
6729 goto fail1;
6730 }
6731
6732 /* We use ICT interrupts */
6733 if ((error = iwm_alloc_ict(sc)) != 0) {
6734 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
6735 goto fail2;
6736 }
6737
6738 /* Allocate TX scheduler "rings". */
6739 if ((error = iwm_alloc_sched(sc)) != 0) {
6740 aprint_error_dev(sc->sc_dev,
6741 "could not allocate TX scheduler rings\n");
6742 goto fail3;
6743 }
6744
6745 /* Allocate TX rings */
6746 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
6747 if ((error = iwm_alloc_tx_ring(sc,
6748 &sc->txq[txq_i], txq_i)) != 0) {
6749 aprint_error_dev(sc->sc_dev,
6750 "could not allocate TX ring %d\n", txq_i);
6751 goto fail4;
6752 }
6753 }
6754
6755 /* Allocate RX ring. */
6756 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6757 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
6758 goto fail4;
6759 }
6760
6761 workqueue_create(&sc->sc_eswq, "iwmes",
6762 iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
6763 workqueue_create(&sc->sc_nswq, "iwmns",
6764 iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
6765
6766 /* Clear pending interrupts. */
6767 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6768
6769 /*
6770 * We can't do normal attach before the file system is mounted
6771 * because we cannot read the MAC address without loading the
6772 * firmware from disk. So we postpone until mountroot is done.
6773 * Notably, this will require a full driver unload/load cycle
6774 * (or reboot) in case the firmware is not present when the
6775 * hook runs.
6776 */
6777 config_mountroot(self, iwm_attach_hook);
6778
6779 return;
6780
6781 /* Free allocated memory if something failed during attachment. */
6782 fail4: while (--txq_i >= 0)
6783 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
6784 iwm_free_sched(sc);
6785 fail3: if (sc->ict_dma.vaddr != NULL)
6786 iwm_free_ict(sc);
6787 fail2: iwm_free_kw(sc);
6788 fail1: iwm_free_fwmem(sc);
6789 }
6790
6791 /*
6792 * Attach the interface to 802.11 radiotap.
6793 */
6794 void
6795 iwm_radiotap_attach(struct iwm_softc *sc)
6796 {
6797 struct ifnet *ifp = sc->sc_ic.ic_ifp;
6798
6799 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
6800 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
6801 &sc->sc_drvbpf);
6802
6803 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
6804 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
6805 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
6806
6807 sc->sc_txtap_len = sizeof sc->sc_txtapu;
6808 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
6809 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
6810 }
6811
6812 #if 0
6813 static void
6814 iwm_init_task(void *arg1)
6815 {
6816 struct iwm_softc *sc = arg1;
6817 struct ifnet *ifp = &sc->sc_ic.ic_if;
6818 int s;
6819
6820 s = splnet();
6821 while (sc->sc_flags & IWM_FLAG_BUSY)
6822 tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
6823 sc->sc_flags |= IWM_FLAG_BUSY;
6824
6825 iwm_stop(ifp, 0);
6826 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
6827 iwm_init(ifp);
6828
6829 sc->sc_flags &= ~IWM_FLAG_BUSY;
6830 wakeup(&sc->sc_flags);
6831 splx(s);
6832 }
6833
6834 static void
6835 iwm_wakeup(struct iwm_softc *sc)
6836 {
6837 pcireg_t reg;
6838
6839 /* Clear device-specific "PCI retry timeout" register (41h). */
6840 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6841 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6842
6843 iwm_init_task(sc);
6844 }
6845
6846 static int
6847 iwm_activate(device_t self, enum devact act)
6848 {
6849 struct iwm_softc *sc = device_private(self);
6850 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6851
6852 switch (act) {
6853 case DVACT_DEACTIVATE:
6854 if (ifp->if_flags & IFF_RUNNING)
6855 iwm_stop(ifp, 0);
6856 return 0;
6857 default:
6858 return EOPNOTSUPP;
6859 }
6860 }
6861 #endif
6862
6863 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
6864 NULL, NULL);
6865