if_iwm.c revision 1.4 1 /* $NetBSD: if_iwm.c,v 1.4 2015/02/13 18:02:32 nonaka Exp $ */
2 /* OpenBSD: if_iwm.c,v 1.18 2015/02/11 01:12:42 brad Exp */
3
4 /*
5 * Copyright (c) 2014 genua mbh <info (at) genua.de>
6 * Copyright (c) 2014 Fixup Software Ltd.
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 /*-
22 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
23 * which were used as the reference documentation for this implementation.
24 *
25 * Driver version we are currently based off of is
26 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
27 *
28 ***********************************************************************
29 *
30 * This file is provided under a dual BSD/GPLv2 license. When using or
31 * redistributing this file, you may do so under either license.
32 *
33 * GPL LICENSE SUMMARY
34 *
35 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
36 *
37 * This program is free software; you can redistribute it and/or modify
38 * it under the terms of version 2 of the GNU General Public License as
39 * published by the Free Software Foundation.
40 *
41 * This program is distributed in the hope that it will be useful, but
42 * WITHOUT ANY WARRANTY; without even the implied warranty of
43 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 * General Public License for more details.
45 *
46 * You should have received a copy of the GNU General Public License
47 * along with this program; if not, write to the Free Software
48 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 * USA
50 *
51 * The full GNU General Public License is included in this distribution
52 * in the file called COPYING.
53 *
54 * Contact Information:
55 * Intel Linux Wireless <ilw (at) linux.intel.com>
56 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 *
58 *
59 * BSD LICENSE
60 *
61 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
62 * All rights reserved.
63 *
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
66 * are met:
67 *
68 * * Redistributions of source code must retain the above copyright
69 * notice, this list of conditions and the following disclaimer.
70 * * Redistributions in binary form must reproduce the above copyright
71 * notice, this list of conditions and the following disclaimer in
72 * the documentation and/or other materials provided with the
73 * distribution.
74 * * Neither the name Intel Corporation nor the names of its
75 * contributors may be used to endorse or promote products derived
76 * from this software without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
79 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
80 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
81 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
82 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
83 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
84 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
85 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
86 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
87 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
88 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
89 */
90
91 /*-
92 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
93 *
94 * Permission to use, copy, modify, and distribute this software for any
95 * purpose with or without fee is hereby granted, provided that the above
96 * copyright notice and this permission notice appear in all copies.
97 *
98 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
99 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
100 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
101 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
102 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
103 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
104 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
105 */
106
107 #include <sys/cdefs.h>
108 __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.4 2015/02/13 18:02:32 nonaka Exp $");
109
110 #include <sys/param.h>
111 #include <sys/conf.h>
112 #include <sys/kernel.h>
113 #include <sys/kmem.h>
114 #include <sys/mbuf.h>
115 #include <sys/mutex.h>
116 #include <sys/proc.h>
117 #include <sys/socket.h>
118 #include <sys/sockio.h>
119 #include <sys/systm.h>
120
121 #include <sys/cpu.h>
122 #include <sys/bus.h>
123 #include <sys/workqueue.h>
124 #include <machine/endian.h>
125 #include <machine/intr.h>
126
127 #include <dev/pci/pcireg.h>
128 #include <dev/pci/pcivar.h>
129 #include <dev/pci/pcidevs.h>
130 #include <dev/firmload.h>
131
132 #include <net/bpf.h>
133 #include <net/if.h>
134 #include <net/if_arp.h>
135 #include <net/if_dl.h>
136 #include <net/if_media.h>
137 #include <net/if_types.h>
138 #include <net/if_ether.h>
139
140 #include <netinet/in.h>
141 #include <netinet/in_systm.h>
142 #include <netinet/ip.h>
143
144 #include <net80211/ieee80211_var.h>
145 #include <net80211/ieee80211_amrr.h>
146 #include <net80211/ieee80211_radiotap.h>
147
148 #define DEVNAME(_s) device_xname((_s)->sc_dev)
149 #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
150
151 #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
152 #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
153
154 #ifdef IWM_DEBUG
155 #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
156 #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
157 int iwm_debug = 1;
158 #else
159 #define DPRINTF(x) do { ; } while (0)
160 #define DPRINTFN(n, x) do { ; } while (0)
161 #endif
162
163 #include <dev/pci/if_iwmreg.h>
164 #include <dev/pci/if_iwmvar.h>
165
166 static const uint8_t iwm_nvm_channels[] = {
167 /* 2.4 GHz */
168 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
169 /* 5 GHz */
170 36, 40, 44 , 48, 52, 56, 60, 64,
171 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
172 149, 153, 157, 161, 165
173 };
174 #define IWM_NUM_2GHZ_CHANNELS 14
175
176 /* It looks like 11a TX is broken, unfortunately. */
177 #define IWM_NO_5GHZ 1
178
179 static const struct iwm_rate {
180 uint8_t rate;
181 uint8_t plcp;
182 } iwm_rates[] = {
183 { 2, IWM_RATE_1M_PLCP },
184 { 4, IWM_RATE_2M_PLCP },
185 { 11, IWM_RATE_5M_PLCP },
186 { 22, IWM_RATE_11M_PLCP },
187 { 12, IWM_RATE_6M_PLCP },
188 { 18, IWM_RATE_9M_PLCP },
189 { 24, IWM_RATE_12M_PLCP },
190 { 36, IWM_RATE_18M_PLCP },
191 { 48, IWM_RATE_24M_PLCP },
192 { 72, IWM_RATE_36M_PLCP },
193 { 96, IWM_RATE_48M_PLCP },
194 { 108, IWM_RATE_54M_PLCP },
195 };
196 #define IWM_RIDX_CCK 0
197 #define IWM_RIDX_OFDM 4
198 #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
199 #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
200 #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
201
202 struct iwm_newstate_state {
203 struct work ns_wk;
204 struct ieee80211com *ns_ic;
205 enum ieee80211_state ns_nstate;
206 int ns_arg;
207 int ns_generation;
208 };
209
210 static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
211 static int iwm_firmware_store_section(struct iwm_softc *,
212 enum iwm_ucode_type, uint8_t *, size_t);
213 static int iwm_set_default_calib(struct iwm_softc *, const void *);
214 static int iwm_read_firmware(struct iwm_softc *);
215 static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
216 static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
217 #ifdef IWM_DEBUG
218 static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
219 #endif
220 static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
221 static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
222 static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
223 static int iwm_nic_lock(struct iwm_softc *);
224 static void iwm_nic_unlock(struct iwm_softc *);
225 static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
226 uint32_t);
227 static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
228 static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
229 static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
230 bus_size_t, bus_size_t);
231 static void iwm_dma_contig_free(struct iwm_dma_info *);
232 static int iwm_alloc_fwmem(struct iwm_softc *);
233 static void iwm_free_fwmem(struct iwm_softc *);
234 static int iwm_alloc_sched(struct iwm_softc *);
235 static void iwm_free_sched(struct iwm_softc *);
236 static int iwm_alloc_kw(struct iwm_softc *);
237 static void iwm_free_kw(struct iwm_softc *);
238 static int iwm_alloc_ict(struct iwm_softc *);
239 static void iwm_free_ict(struct iwm_softc *);
240 static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
241 static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
242 static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
243 static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
244 int);
245 static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
246 static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
247 static void iwm_enable_rfkill_int(struct iwm_softc *);
248 static int iwm_check_rfkill(struct iwm_softc *);
249 static void iwm_enable_interrupts(struct iwm_softc *);
250 static void iwm_restore_interrupts(struct iwm_softc *);
251 static void iwm_disable_interrupts(struct iwm_softc *);
252 static void iwm_ict_reset(struct iwm_softc *);
253 static int iwm_set_hw_ready(struct iwm_softc *);
254 static int iwm_prepare_card_hw(struct iwm_softc *);
255 static void iwm_apm_config(struct iwm_softc *);
256 static int iwm_apm_init(struct iwm_softc *);
257 static void iwm_apm_stop(struct iwm_softc *);
258 static int iwm_start_hw(struct iwm_softc *);
259 static void iwm_stop_device(struct iwm_softc *);
260 static void iwm_set_pwr(struct iwm_softc *);
261 static void iwm_mvm_nic_config(struct iwm_softc *);
262 static int iwm_nic_rx_init(struct iwm_softc *);
263 static int iwm_nic_tx_init(struct iwm_softc *);
264 static int iwm_nic_init(struct iwm_softc *);
265 static void iwm_enable_txq(struct iwm_softc *, int, int);
266 static int iwm_post_alive(struct iwm_softc *);
267 static int iwm_is_valid_channel(uint16_t);
268 static uint8_t iwm_ch_id_to_ch_index(uint16_t);
269 static uint16_t iwm_channel_id_to_papd(uint16_t);
270 static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
271 static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
272 uint8_t **, uint16_t *, uint16_t);
273 static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
274 void *);
275 static int iwm_send_phy_db_data(struct iwm_softc *);
276 static int iwm_send_phy_db_data(struct iwm_softc *);
277 static void iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
278 struct iwm_time_event_cmd_v1 *);
279 static int iwm_mvm_send_time_event_cmd(struct iwm_softc *,
280 const struct iwm_time_event_cmd_v2 *);
281 static int iwm_mvm_time_event_send_add(struct iwm_softc *,
282 struct iwm_node *, void *, struct iwm_time_event_cmd_v2 *);
283 static void iwm_mvm_protect_session(struct iwm_softc *, struct iwm_node *,
284 uint32_t, uint32_t, uint32_t);
285 static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
286 uint16_t, uint8_t *, uint16_t *);
287 static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
288 uint16_t *);
289 static void iwm_init_channel_map(struct iwm_softc *,
290 const uint16_t * const);
291 static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
292 const uint16_t *, const uint16_t *, uint8_t, uint8_t);
293 static int iwm_nvm_init(struct iwm_softc *);
294 static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
295 const uint8_t *, uint32_t);
296 static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
297 static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
298 static int iwm_fw_alive(struct iwm_softc *, uint32_t);
299 static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
300 static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
301 static int iwm_mvm_load_ucode_wait_alive(struct iwm_softc *,
302 enum iwm_ucode_type);
303 static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
304 static int iwm_rx_addbuf(struct iwm_softc *, int, int);
305 static int iwm_mvm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
306 static int iwm_mvm_get_signal_strength(struct iwm_softc *,
307 struct iwm_rx_phy_info *);
308 static void iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *,
309 struct iwm_rx_packet *, struct iwm_rx_data *);
310 static int iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *);
311 static void iwm_mvm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
312 struct iwm_rx_data *);
313 static void iwm_mvm_rx_tx_cmd_single(struct iwm_softc *,
314 struct iwm_rx_packet *, struct iwm_node *);
315 static void iwm_mvm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
316 struct iwm_rx_data *);
317 static int iwm_mvm_binding_cmd(struct iwm_softc *, struct iwm_node *,
318 uint32_t);
319 static int iwm_mvm_binding_update(struct iwm_softc *, struct iwm_node *,
320 int);
321 static int iwm_mvm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
322 static void iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *,
323 struct iwm_mvm_phy_ctxt *, struct iwm_phy_context_cmd *,
324 uint32_t, uint32_t);
325 static void iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *,
326 struct iwm_phy_context_cmd *, struct ieee80211_channel *,
327 uint8_t, uint8_t);
328 static int iwm_mvm_phy_ctxt_apply(struct iwm_softc *,
329 struct iwm_mvm_phy_ctxt *, uint8_t, uint8_t, uint32_t,
330 uint32_t);
331 static int iwm_mvm_phy_ctxt_add(struct iwm_softc *,
332 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
333 uint8_t, uint8_t);
334 static int iwm_mvm_phy_ctxt_changed(struct iwm_softc *,
335 struct iwm_mvm_phy_ctxt *, struct ieee80211_channel *,
336 uint8_t, uint8_t);
337 static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
338 static int iwm_mvm_send_cmd_pdu(struct iwm_softc *, uint8_t, uint32_t,
339 uint16_t, const void *);
340 static int iwm_mvm_send_cmd_status(struct iwm_softc *,
341 struct iwm_host_cmd *, uint32_t *);
342 static int iwm_mvm_send_cmd_pdu_status(struct iwm_softc *, uint8_t,
343 uint16_t, const void *, uint32_t *);
344 static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
345 static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
346 #if 0
347 static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
348 uint16_t);
349 #endif
350 static const struct iwm_rate *iwm_tx_fill_cmd(struct iwm_softc *,
351 struct iwm_node *, struct ieee80211_frame *,
352 struct iwm_tx_cmd *);
353 static int iwm_tx(struct iwm_softc *, struct mbuf *,
354 struct ieee80211_node *, int);
355 static int iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *,
356 struct iwm_beacon_filter_cmd *);
357 static void iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *,
358 struct iwm_node *, struct iwm_beacon_filter_cmd *);
359 static int iwm_mvm_update_beacon_abort(struct iwm_softc *,
360 struct iwm_node *, int);
361 static void iwm_mvm_power_log(struct iwm_softc *,
362 struct iwm_mac_power_cmd *);
363 static void iwm_mvm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
364 struct iwm_mac_power_cmd *);
365 static int iwm_mvm_power_mac_update_mode(struct iwm_softc *,
366 struct iwm_node *);
367 static int iwm_mvm_power_update_device(struct iwm_softc *);
368 static int iwm_mvm_enable_beacon_filter(struct iwm_softc *,
369 struct iwm_node *);
370 static int iwm_mvm_disable_beacon_filter(struct iwm_softc *,
371 struct iwm_node *);
372 static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
373 struct iwm_mvm_add_sta_cmd_v5 *);
374 static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
375 struct iwm_mvm_add_sta_cmd_v6 *, int *);
376 static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
377 int);
378 static int iwm_mvm_add_sta(struct iwm_softc *, struct iwm_node *);
379 static int iwm_mvm_update_sta(struct iwm_softc *, struct iwm_node *);
380 static int iwm_mvm_add_int_sta_common(struct iwm_softc *,
381 struct iwm_int_sta *, const uint8_t *, uint16_t, uint16_t);
382 static int iwm_mvm_add_aux_sta(struct iwm_softc *);
383 static uint16_t iwm_mvm_scan_rx_chain(struct iwm_softc *);
384 static uint32_t iwm_mvm_scan_max_out_time(struct iwm_softc *, uint32_t, int);
385 static uint32_t iwm_mvm_scan_suspend_time(struct iwm_softc *, int);
386 static uint32_t iwm_mvm_scan_rxon_flags(struct iwm_softc *, int);
387 static uint32_t iwm_mvm_scan_rate_n_flags(struct iwm_softc *, int, int);
388 static uint16_t iwm_mvm_get_active_dwell(struct iwm_softc *, int, int);
389 static uint16_t iwm_mvm_get_passive_dwell(struct iwm_softc *, int);
390 static int iwm_mvm_scan_fill_channels(struct iwm_softc *,
391 struct iwm_scan_cmd *, int, int, int);
392 static uint16_t iwm_mvm_fill_probe_req(struct iwm_softc *,
393 struct ieee80211_frame *, const uint8_t *, int,
394 const uint8_t *, int, const uint8_t *, int, int);
395 static int iwm_mvm_scan_request(struct iwm_softc *, int, int, uint8_t *,
396 int);
397 static void iwm_mvm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
398 int *);
399 static void iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *,
400 struct iwm_node *, struct iwm_mac_ctx_cmd *, uint32_t);
401 static int iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *,
402 struct iwm_mac_ctx_cmd *);
403 static void iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *,
404 struct iwm_node *, struct iwm_mac_data_sta *, int);
405 static int iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *,
406 struct iwm_node *, uint32_t);
407 static int iwm_mvm_mac_ctx_send(struct iwm_softc *, struct iwm_node *,
408 uint32_t);
409 static int iwm_mvm_mac_ctxt_add(struct iwm_softc *, struct iwm_node *);
410 static int iwm_mvm_mac_ctxt_changed(struct iwm_softc *, struct iwm_node *);
411 static int iwm_mvm_update_quotas(struct iwm_softc *, struct iwm_node *);
412 static int iwm_auth(struct iwm_softc *);
413 static int iwm_assoc(struct iwm_softc *);
414 static int iwm_release(struct iwm_softc *, struct iwm_node *);
415 static void iwm_calib_timeout(void *);
416 static void iwm_setrates(struct iwm_node *);
417 static int iwm_media_change(struct ifnet *);
418 static void iwm_newstate_cb(void *);
419 static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
420 static void iwm_endscan_cb(void *);
421 static int iwm_init_hw(struct iwm_softc *);
422 static int iwm_init(struct ifnet *);
423 static void iwm_start(struct ifnet *);
424 static void iwm_stop(struct ifnet *, int);
425 static void iwm_watchdog(struct ifnet *);
426 static int iwm_ioctl(struct ifnet *, u_long, void *);
427 #ifdef IWM_DEBUG
428 static const char *iwm_desc_lookup(uint32_t);
429 static void iwm_nic_error(struct iwm_softc *);
430 #endif
431 static void iwm_notif_intr(struct iwm_softc *);
432 static int iwm_intr(void *);
433 static int iwm_preinit(struct iwm_softc *);
434 static void iwm_attach_hook(device_t);
435 static void iwm_attach(device_t, device_t, void *);
436 #if 0
437 static void iwm_init_task(void *);
438 static int iwm_activate(device_t, enum devact);
439 static void iwm_wakeup(struct iwm_softc *);
440 #endif
441 static void iwm_radiotap_attach(struct iwm_softc *);
442
443 static int
444 iwm_firmload(struct iwm_softc *sc)
445 {
446 struct iwm_fw_info *fw = &sc->sc_fw;
447 firmware_handle_t fwh;
448 int error;
449
450 /* Open firmware image. */
451 if ((error = firmware_open("if_iwm", sc->sc_fwname, &fwh)) != 0) {
452 aprint_error_dev(sc->sc_dev,
453 "could not get firmware handle %s\n", sc->sc_fwname);
454 return error;
455 }
456
457 fw->fw_rawsize = firmware_get_size(fwh);
458 /*
459 * Well, this is how the Linux driver checks it ....
460 */
461 if (fw->fw_rawsize < sizeof(uint32_t)) {
462 aprint_error_dev(sc->sc_dev,
463 "firmware too short: %zd bytes\n", fw->fw_rawsize);
464 error = EINVAL;
465 goto out;
466 }
467
468 /* some sanity */
469 if (fw->fw_rawsize > IWM_FWMAXSIZE) {
470 aprint_error_dev(sc->sc_dev,
471 "firmware size is ridiculous: %zd bytes\n",
472 fw->fw_rawsize);
473 error = EINVAL;
474 goto out;
475 }
476
477 /* Read the firmware. */
478 fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
479 if (fw->fw_rawdata == NULL) {
480 aprint_error_dev(sc->sc_dev,
481 "not enough memory to stock firmware %s\n", sc->sc_fwname);
482 error = ENOMEM;
483 goto out;
484 }
485 error = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
486 if (error) {
487 aprint_error_dev(sc->sc_dev,
488 "could not read firmware %s\n", sc->sc_fwname);
489 goto out;
490 }
491
492 out:
493 /* caller will release memory, if necessary */
494
495 firmware_close(fwh);
496 return error;
497 }
498
499 /*
500 * just maintaining status quo.
501 */
502 static void
503 iwm_fix_channel(struct ieee80211com *ic, struct mbuf *m)
504 {
505 struct ieee80211_frame *wh;
506 uint8_t subtype;
507 uint8_t *frm, *efrm;
508
509 wh = mtod(m, struct ieee80211_frame *);
510
511 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
512 return;
513
514 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
515
516 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
517 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
518 return;
519
520 frm = (uint8_t *)(wh + 1);
521 efrm = mtod(m, uint8_t *) + m->m_len;
522
523 frm += 12; /* skip tstamp, bintval and capinfo fields */
524 while (frm < efrm) {
525 if (*frm == IEEE80211_ELEMID_DSPARMS) {
526 #if IEEE80211_CHAN_MAX < 255
527 if (frm[2] <= IEEE80211_CHAN_MAX)
528 #endif
529 ic->ic_curchan = &ic->ic_channels[frm[2]];
530 }
531 frm += frm[1] + 2;
532 }
533 }
534
535 /*
536 * Firmware parser.
537 */
538
539 static int
540 iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
541 {
542 struct iwm_fw_cscheme_list *l = (void *)data;
543
544 if (dlen < sizeof(*l) ||
545 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
546 return EINVAL;
547
548 /* we don't actually store anything for now, always use s/w crypto */
549
550 return 0;
551 }
552
553 static int
554 iwm_firmware_store_section(struct iwm_softc *sc,
555 enum iwm_ucode_type type, uint8_t *data, size_t dlen)
556 {
557 struct iwm_fw_sects *fws;
558 struct iwm_fw_onesect *fwone;
559
560 if (type >= IWM_UCODE_TYPE_MAX)
561 return EINVAL;
562 if (dlen < sizeof(uint32_t))
563 return EINVAL;
564
565 fws = &sc->sc_fw.fw_sects[type];
566 if (fws->fw_count >= IWM_UCODE_SECT_MAX)
567 return EINVAL;
568
569 fwone = &fws->fw_sect[fws->fw_count];
570
571 /* first 32bit are device load offset */
572 memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
573
574 /* rest is data */
575 fwone->fws_data = data + sizeof(uint32_t);
576 fwone->fws_len = dlen - sizeof(uint32_t);
577
578 /* for freeing the buffer during driver unload */
579 fwone->fws_alloc = data;
580 fwone->fws_allocsize = dlen;
581
582 fws->fw_count++;
583 fws->fw_totlen += fwone->fws_len;
584
585 return 0;
586 }
587
588 /* iwlwifi: iwl-drv.c */
589 struct iwm_tlv_calib_data {
590 uint32_t ucode_type;
591 struct iwm_tlv_calib_ctrl calib;
592 } __packed;
593
594 static int
595 iwm_set_default_calib(struct iwm_softc *sc, const void *data)
596 {
597 const struct iwm_tlv_calib_data *def_calib = data;
598 uint32_t ucode_type = le32toh(def_calib->ucode_type);
599
600 if (ucode_type >= IWM_UCODE_TYPE_MAX) {
601 DPRINTF(("%s: Wrong ucode_type %u for default "
602 "calibration.\n", DEVNAME(sc), ucode_type));
603 return EINVAL;
604 }
605
606 sc->sc_default_calib[ucode_type].flow_trigger =
607 def_calib->calib.flow_trigger;
608 sc->sc_default_calib[ucode_type].event_trigger =
609 def_calib->calib.event_trigger;
610
611 return 0;
612 }
613
614 static int
615 iwm_read_firmware(struct iwm_softc *sc)
616 {
617 struct iwm_fw_info *fw = &sc->sc_fw;
618 struct iwm_tlv_ucode_header *uhdr;
619 struct iwm_ucode_tlv tlv;
620 enum iwm_ucode_tlv_type tlv_type;
621 uint8_t *data;
622 int error, status;
623 size_t len;
624
625 if (fw->fw_status == IWM_FW_STATUS_NONE) {
626 fw->fw_status = IWM_FW_STATUS_INPROGRESS;
627 } else {
628 while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
629 tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
630 }
631 status = fw->fw_status;
632
633 if (status == IWM_FW_STATUS_DONE)
634 return 0;
635
636 /*
637 * Load firmware into driver memory.
638 * fw_rawdata and fw_rawsize will be set.
639 */
640 error = iwm_firmload(sc);
641 if (error != 0) {
642 aprint_error_dev(sc->sc_dev,
643 "could not read firmware %s (error %d)\n",
644 sc->sc_fwname, error);
645 goto out;
646 }
647
648 /*
649 * Parse firmware contents
650 */
651
652 uhdr = (void *)fw->fw_rawdata;
653 if (*(uint32_t *)fw->fw_rawdata != 0
654 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
655 aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
656 sc->sc_fwname);
657 error = EINVAL;
658 goto out;
659 }
660
661 sc->sc_fwver = le32toh(uhdr->ver);
662 data = uhdr->data;
663 len = fw->fw_rawsize - sizeof(*uhdr);
664
665 while (len >= sizeof(tlv)) {
666 size_t tlv_len;
667 void *tlv_data;
668
669 memcpy(&tlv, data, sizeof(tlv));
670 tlv_len = le32toh(tlv.length);
671 tlv_type = le32toh(tlv.type);
672
673 len -= sizeof(tlv);
674 data += sizeof(tlv);
675 tlv_data = data;
676
677 if (len < tlv_len) {
678 aprint_error_dev(sc->sc_dev,
679 "firmware too short: %zu bytes\n", len);
680 error = EINVAL;
681 goto parse_out;
682 }
683
684 switch ((int)tlv_type) {
685 case IWM_UCODE_TLV_PROBE_MAX_LEN:
686 if (tlv_len < sizeof(uint32_t)) {
687 error = EINVAL;
688 goto parse_out;
689 }
690 sc->sc_capa_max_probe_len
691 = le32toh(*(uint32_t *)tlv_data);
692 /* limit it to something sensible */
693 if (sc->sc_capa_max_probe_len > (1<<16)) {
694 DPRINTF(("%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
695 "ridiculous\n", DEVNAME(sc)));
696 error = EINVAL;
697 goto parse_out;
698 }
699 break;
700 case IWM_UCODE_TLV_PAN:
701 if (tlv_len) {
702 error = EINVAL;
703 goto parse_out;
704 }
705 sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
706 break;
707 case IWM_UCODE_TLV_FLAGS:
708 if (tlv_len < sizeof(uint32_t)) {
709 error = EINVAL;
710 goto parse_out;
711 }
712 /*
713 * Apparently there can be many flags, but Linux driver
714 * parses only the first one, and so do we.
715 *
716 * XXX: why does this override IWM_UCODE_TLV_PAN?
717 * Intentional or a bug? Observations from
718 * current firmware file:
719 * 1) TLV_PAN is parsed first
720 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
721 * ==> this resets TLV_PAN to itself... hnnnk
722 */
723 sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
724 break;
725 case IWM_UCODE_TLV_CSCHEME:
726 if ((error = iwm_store_cscheme(sc,
727 tlv_data, tlv_len)) != 0)
728 goto parse_out;
729 break;
730 case IWM_UCODE_TLV_NUM_OF_CPU:
731 if (tlv_len != sizeof(uint32_t)) {
732 error = EINVAL;
733 goto parse_out;
734 }
735 if (le32toh(*(uint32_t*)tlv_data) != 1) {
736 DPRINTF(("%s: driver supports "
737 "only TLV_NUM_OF_CPU == 1", DEVNAME(sc)));
738 error = EINVAL;
739 goto parse_out;
740 }
741 break;
742 case IWM_UCODE_TLV_SEC_RT:
743 if ((error = iwm_firmware_store_section(sc,
744 IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0)
745 goto parse_out;
746 break;
747 case IWM_UCODE_TLV_SEC_INIT:
748 if ((error = iwm_firmware_store_section(sc,
749 IWM_UCODE_TYPE_INIT, tlv_data, tlv_len)) != 0)
750 goto parse_out;
751 break;
752 case IWM_UCODE_TLV_SEC_WOWLAN:
753 if ((error = iwm_firmware_store_section(sc,
754 IWM_UCODE_TYPE_WOW, tlv_data, tlv_len)) != 0)
755 goto parse_out;
756 break;
757 case IWM_UCODE_TLV_DEF_CALIB:
758 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
759 error = EINVAL;
760 goto parse_out;
761 }
762 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0)
763 goto parse_out;
764 break;
765 case IWM_UCODE_TLV_PHY_SKU:
766 if (tlv_len != sizeof(uint32_t)) {
767 error = EINVAL;
768 goto parse_out;
769 }
770 sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
771 break;
772
773 case IWM_UCODE_TLV_API_CHANGES_SET:
774 case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
775 /* ignore, not used by current driver */
776 break;
777
778 default:
779 DPRINTF(("%s: unknown firmware section %d, abort\n",
780 DEVNAME(sc), tlv_type));
781 error = EINVAL;
782 goto parse_out;
783 }
784
785 len -= roundup(tlv_len, 4);
786 data += roundup(tlv_len, 4);
787 }
788
789 KASSERT(error == 0);
790
791 parse_out:
792 if (error) {
793 aprint_error_dev(sc->sc_dev,
794 "firmware parse error, section type %d\n", tlv_type);
795 }
796
797 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
798 aprint_error_dev(sc->sc_dev,
799 "device uses unsupported power ops\n");
800 error = ENOTSUP;
801 }
802
803 out:
804 if (error)
805 fw->fw_status = IWM_FW_STATUS_NONE;
806 else
807 fw->fw_status = IWM_FW_STATUS_DONE;
808 wakeup(&sc->sc_fw);
809
810 if (error) {
811 kmem_free(fw->fw_rawdata, fw->fw_rawsize);
812 fw->fw_rawdata = NULL;
813 }
814 return error;
815 }
816
817 /*
818 * basic device access
819 */
820
821 static uint32_t
822 iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
823 {
824 IWM_WRITE(sc,
825 IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
826 IWM_BARRIER_READ_WRITE(sc);
827 return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
828 }
829
830 static void
831 iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
832 {
833 IWM_WRITE(sc,
834 IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
835 IWM_BARRIER_WRITE(sc);
836 IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
837 }
838
839 #ifdef IWM_DEBUG
840 /* iwlwifi: pcie/trans.c */
841 static int
842 iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
843 {
844 int offs, ret = 0;
845 uint32_t *vals = buf;
846
847 if (iwm_nic_lock(sc)) {
848 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
849 for (offs = 0; offs < dwords; offs++)
850 vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
851 iwm_nic_unlock(sc);
852 } else {
853 ret = EBUSY;
854 }
855 return ret;
856 }
857 #endif
858
859 /* iwlwifi: pcie/trans.c */
860 static int
861 iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
862 {
863 int offs;
864 const uint32_t *vals = buf;
865
866 if (iwm_nic_lock(sc)) {
867 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
868 /* WADDR auto-increments */
869 for (offs = 0; offs < dwords; offs++) {
870 uint32_t val = vals ? vals[offs] : 0;
871 IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
872 }
873 iwm_nic_unlock(sc);
874 } else {
875 DPRINTF(("%s: write_mem failed\n", DEVNAME(sc)));
876 return EBUSY;
877 }
878 return 0;
879 }
880
881 static int
882 iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
883 {
884 return iwm_write_mem(sc, addr, &val, 1);
885 }
886
887 static int
888 iwm_poll_bit(struct iwm_softc *sc, int reg,
889 uint32_t bits, uint32_t mask, int timo)
890 {
891 for (;;) {
892 if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
893 return 1;
894 }
895 if (timo < 10) {
896 return 0;
897 }
898 timo -= 10;
899 DELAY(10);
900 }
901 }
902
903 static int
904 iwm_nic_lock(struct iwm_softc *sc)
905 {
906 int rv = 0;
907
908 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
909 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
910
911 if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
912 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
913 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
914 | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
915 rv = 1;
916 } else {
917 /* jolt */
918 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
919 }
920
921 return rv;
922 }
923
924 static void
925 iwm_nic_unlock(struct iwm_softc *sc)
926 {
927 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
928 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
929 }
930
931 static void
932 iwm_set_bits_mask_prph(struct iwm_softc *sc,
933 uint32_t reg, uint32_t bits, uint32_t mask)
934 {
935 uint32_t val;
936
937 /* XXX: no error path? */
938 if (iwm_nic_lock(sc)) {
939 val = iwm_read_prph(sc, reg) & mask;
940 val |= bits;
941 iwm_write_prph(sc, reg, val);
942 iwm_nic_unlock(sc);
943 }
944 }
945
946 static void
947 iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
948 {
949 iwm_set_bits_mask_prph(sc, reg, bits, ~0);
950 }
951
952 static void
953 iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
954 {
955 iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
956 }
957
958 /*
959 * DMA resource routines
960 */
961
962 static int
963 iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
964 bus_size_t size, bus_size_t alignment)
965 {
966 int nsegs, error;
967 void *va;
968
969 dma->tag = tag;
970 dma->size = size;
971
972 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
973 &dma->map);
974 if (error != 0)
975 goto fail;
976
977 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
978 BUS_DMA_NOWAIT);
979 if (error != 0)
980 goto fail;
981
982 error = bus_dmamem_map(tag, &dma->seg, 1, size, &va,
983 BUS_DMA_NOWAIT);
984 if (error != 0)
985 goto fail;
986 dma->vaddr = va;
987
988 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
989 BUS_DMA_NOWAIT);
990 if (error != 0)
991 goto fail;
992
993 memset(dma->vaddr, 0, size);
994 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
995 dma->paddr = dma->map->dm_segs[0].ds_addr;
996
997 return 0;
998
999 fail: iwm_dma_contig_free(dma);
1000 return error;
1001 }
1002
1003 static void
1004 iwm_dma_contig_free(struct iwm_dma_info *dma)
1005 {
1006 if (dma->map != NULL) {
1007 if (dma->vaddr != NULL) {
1008 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1009 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1010 bus_dmamap_unload(dma->tag, dma->map);
1011 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1012 bus_dmamem_free(dma->tag, &dma->seg, 1);
1013 dma->vaddr = NULL;
1014 }
1015 bus_dmamap_destroy(dma->tag, dma->map);
1016 dma->map = NULL;
1017 }
1018 }
1019
1020 /* fwmem is used to load firmware onto the card */
1021 static int
1022 iwm_alloc_fwmem(struct iwm_softc *sc)
1023 {
1024 /* Must be aligned on a 16-byte boundary. */
1025 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
1026 sc->sc_fwdmasegsz, 16);
1027 }
1028
1029 static void
1030 iwm_free_fwmem(struct iwm_softc *sc)
1031 {
1032 iwm_dma_contig_free(&sc->fw_dma);
1033 }
1034
1035 /* tx scheduler rings. not used? */
1036 static int
1037 iwm_alloc_sched(struct iwm_softc *sc)
1038 {
1039 int rv;
1040
1041 /* TX scheduler rings must be aligned on a 1KB boundary. */
1042 rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1043 __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
1044 return rv;
1045 }
1046
1047 static void
1048 iwm_free_sched(struct iwm_softc *sc)
1049 {
1050 iwm_dma_contig_free(&sc->sched_dma);
1051 }
1052
1053 /* keep-warm page is used internally by the card. see iwl-fh.h for more info */
1054 static int
1055 iwm_alloc_kw(struct iwm_softc *sc)
1056 {
1057 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
1058 }
1059
1060 static void
1061 iwm_free_kw(struct iwm_softc *sc)
1062 {
1063 iwm_dma_contig_free(&sc->kw_dma);
1064 }
1065
1066 /* interrupt cause table */
1067 static int
1068 iwm_alloc_ict(struct iwm_softc *sc)
1069 {
1070 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1071 IWM_ICT_SIZE, 1<<IWM_ICT_PADDR_SHIFT);
1072 }
1073
1074 static void
1075 iwm_free_ict(struct iwm_softc *sc)
1076 {
1077 iwm_dma_contig_free(&sc->ict_dma);
1078 }
1079
1080 static int
1081 iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1082 {
1083 bus_size_t size;
1084 int i, error;
1085
1086 ring->cur = 0;
1087
1088 /* Allocate RX descriptors (256-byte aligned). */
1089 size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1090 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1091 if (error != 0) {
1092 aprint_error_dev(sc->sc_dev,
1093 "could not allocate RX ring DMA memory\n");
1094 goto fail;
1095 }
1096 ring->desc = ring->desc_dma.vaddr;
1097
1098 /* Allocate RX status area (16-byte aligned). */
1099 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1100 sizeof(*ring->stat), 16);
1101 if (error != 0) {
1102 aprint_error_dev(sc->sc_dev,
1103 "could not allocate RX status DMA memory\n");
1104 goto fail;
1105 }
1106 ring->stat = ring->stat_dma.vaddr;
1107
1108 /*
1109 * Allocate and map RX buffers.
1110 */
1111 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1112 struct iwm_rx_data *data = &ring->data[i];
1113
1114 memset(data, 0, sizeof(*data));
1115 error = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1116 IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1117 &data->map);
1118 if (error != 0) {
1119 aprint_error_dev(sc->sc_dev,
1120 "could not create RX buf DMA map\n");
1121 goto fail;
1122 }
1123
1124 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1125 goto fail;
1126 }
1127 }
1128 return 0;
1129
1130 fail: iwm_free_rx_ring(sc, ring);
1131 return error;
1132 }
1133
1134 static void
1135 iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1136 {
1137 int ntries;
1138
1139 if (iwm_nic_lock(sc)) {
1140 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1141 for (ntries = 0; ntries < 1000; ntries++) {
1142 if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1143 IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1144 break;
1145 DELAY(10);
1146 }
1147 iwm_nic_unlock(sc);
1148 }
1149 ring->cur = 0;
1150 }
1151
1152 static void
1153 iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1154 {
1155 int i;
1156
1157 iwm_dma_contig_free(&ring->desc_dma);
1158 iwm_dma_contig_free(&ring->stat_dma);
1159
1160 for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1161 struct iwm_rx_data *data = &ring->data[i];
1162
1163 if (data->m != NULL) {
1164 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1165 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1166 bus_dmamap_unload(sc->sc_dmat, data->map);
1167 m_freem(data->m);
1168 }
1169 if (data->map != NULL)
1170 bus_dmamap_destroy(sc->sc_dmat, data->map);
1171 }
1172 }
1173
1174 static int
1175 iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1176 {
1177 bus_addr_t paddr;
1178 bus_size_t size;
1179 int i, error;
1180
1181 ring->qid = qid;
1182 ring->queued = 0;
1183 ring->cur = 0;
1184
1185 /* Allocate TX descriptors (256-byte aligned). */
1186 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1187 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1188 if (error != 0) {
1189 aprint_error_dev(sc->sc_dev,
1190 "could not allocate TX ring DMA memory\n");
1191 goto fail;
1192 }
1193 ring->desc = ring->desc_dma.vaddr;
1194
1195 /*
1196 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1197 * to allocate commands space for other rings.
1198 */
1199 if (qid > IWM_MVM_CMD_QUEUE)
1200 return 0;
1201
1202 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1203 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1204 if (error != 0) {
1205 aprint_error_dev(sc->sc_dev,
1206 "could not allocate TX cmd DMA memory\n");
1207 goto fail;
1208 }
1209 ring->cmd = ring->cmd_dma.vaddr;
1210
1211 paddr = ring->cmd_dma.paddr;
1212 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1213 struct iwm_tx_data *data = &ring->data[i];
1214
1215 data->cmd_paddr = paddr;
1216 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1217 + offsetof(struct iwm_tx_cmd, scratch);
1218 paddr += sizeof(struct iwm_device_cmd);
1219
1220 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1221 IWM_NUM_OF_TBS, MCLBYTES, 0, BUS_DMA_NOWAIT, &data->map);
1222 if (error != 0) {
1223 aprint_error_dev(sc->sc_dev,
1224 "could not create TX buf DMA map\n");
1225 goto fail;
1226 }
1227 }
1228 KASSERT(paddr == ring->cmd_dma.paddr + size);
1229 return 0;
1230
1231 fail: iwm_free_tx_ring(sc, ring);
1232 return error;
1233 }
1234
1235 static void
1236 iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1237 {
1238 int i;
1239
1240 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1241 struct iwm_tx_data *data = &ring->data[i];
1242
1243 if (data->m != NULL) {
1244 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1245 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1246 bus_dmamap_unload(sc->sc_dmat, data->map);
1247 m_freem(data->m);
1248 data->m = NULL;
1249 }
1250 }
1251 /* Clear TX descriptors. */
1252 memset(ring->desc, 0, ring->desc_dma.size);
1253 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1254 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1255 sc->qfullmsk &= ~(1 << ring->qid);
1256 ring->queued = 0;
1257 ring->cur = 0;
1258 }
1259
1260 static void
1261 iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1262 {
1263 int i;
1264
1265 iwm_dma_contig_free(&ring->desc_dma);
1266 iwm_dma_contig_free(&ring->cmd_dma);
1267
1268 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1269 struct iwm_tx_data *data = &ring->data[i];
1270
1271 if (data->m != NULL) {
1272 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1273 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1274 bus_dmamap_unload(sc->sc_dmat, data->map);
1275 m_freem(data->m);
1276 }
1277 if (data->map != NULL)
1278 bus_dmamap_destroy(sc->sc_dmat, data->map);
1279 }
1280 }
1281
1282 /*
1283 * High-level hardware frobbing routines
1284 */
1285
1286 static void
1287 iwm_enable_rfkill_int(struct iwm_softc *sc)
1288 {
1289 sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1290 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1291 }
1292
1293 static int
1294 iwm_check_rfkill(struct iwm_softc *sc)
1295 {
1296 uint32_t v;
1297 int s;
1298 int rv;
1299
1300 s = splnet();
1301
1302 /*
1303 * "documentation" is not really helpful here:
1304 * 27: HW_RF_KILL_SW
1305 * Indicates state of (platform's) hardware RF-Kill switch
1306 *
1307 * But apparently when it's off, it's on ...
1308 */
1309 v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1310 rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1311 if (rv) {
1312 sc->sc_flags |= IWM_FLAG_RFKILL;
1313 } else {
1314 sc->sc_flags &= ~IWM_FLAG_RFKILL;
1315 }
1316
1317 splx(s);
1318 return rv;
1319 }
1320
1321 static void
1322 iwm_enable_interrupts(struct iwm_softc *sc)
1323 {
1324 sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1325 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1326 }
1327
1328 static void
1329 iwm_restore_interrupts(struct iwm_softc *sc)
1330 {
1331 IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1332 }
1333
1334 static void
1335 iwm_disable_interrupts(struct iwm_softc *sc)
1336 {
1337 int s = splnet();
1338
1339 /* disable interrupts */
1340 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1341
1342 /* acknowledge all interrupts */
1343 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1344 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1345
1346 splx(s);
1347 }
1348
1349 static void
1350 iwm_ict_reset(struct iwm_softc *sc)
1351 {
1352 iwm_disable_interrupts(sc);
1353
1354 /* Reset ICT table. */
1355 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1356 sc->ict_cur = 0;
1357
1358 /* Set physical address of ICT table (4KB aligned). */
1359 IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1360 IWM_CSR_DRAM_INT_TBL_ENABLE
1361 | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1362 | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1363
1364 /* Switch to ICT interrupt mode in driver. */
1365 sc->sc_flags |= IWM_FLAG_USE_ICT;
1366
1367 /* Re-enable interrupts. */
1368 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1369 iwm_enable_interrupts(sc);
1370 }
1371
1372 #define IWM_HW_READY_TIMEOUT 50
1373 static int
1374 iwm_set_hw_ready(struct iwm_softc *sc)
1375 {
1376 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1377 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1378
1379 return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1380 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1381 IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1382 IWM_HW_READY_TIMEOUT);
1383 }
1384 #undef IWM_HW_READY_TIMEOUT
1385
1386 static int
1387 iwm_prepare_card_hw(struct iwm_softc *sc)
1388 {
1389 int rv = 0;
1390 int t = 0;
1391
1392 if (!iwm_set_hw_ready(sc))
1393 goto out;
1394
1395 /* If HW is not ready, prepare the conditions to check again */
1396 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1397 IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1398
1399 do {
1400 if (iwm_set_hw_ready(sc))
1401 goto out;
1402 DELAY(200);
1403 t += 200;
1404 } while (t < 150000);
1405
1406 rv = ETIMEDOUT;
1407
1408 out:
1409 return rv;
1410 }
1411
1412 static void
1413 iwm_apm_config(struct iwm_softc *sc)
1414 {
1415 pcireg_t reg;
1416
1417 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1418 sc->sc_cap_off + PCIE_LCSR);
1419 if (reg & PCIE_LCSR_ASPM_L1) {
1420 /* Um the Linux driver prints "Disabling L0S for this one ... */
1421 IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1422 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1423 } else {
1424 /* ... and "Enabling" here */
1425 IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1426 IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1427 }
1428 }
1429
1430 /*
1431 * Start up NIC's basic functionality after it has been reset
1432 * (e.g. after platform boot, or shutdown via iwm_pcie_apm_stop())
1433 * NOTE: This does not load uCode nor start the embedded processor
1434 */
1435 static int
1436 iwm_apm_init(struct iwm_softc *sc)
1437 {
1438 int error = 0;
1439
1440 DPRINTF(("iwm apm start\n"));
1441
1442 /* Disable L0S exit timer (platform NMI Work/Around) */
1443 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1444 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1445
1446 /*
1447 * Disable L0s without affecting L1;
1448 * don't wait for ICH L0s (ICH bug W/A)
1449 */
1450 IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1451 IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1452
1453 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1454 IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1455
1456 /*
1457 * Enable HAP INTA (interrupt from management bus) to
1458 * wake device's PCI Express link L1a -> L0s
1459 */
1460 IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1461 IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1462
1463 iwm_apm_config(sc);
1464
1465 #if 0 /* not for 7k */
1466 /* Configure analog phase-lock-loop before activating to D0A */
1467 if (trans->cfg->base_params->pll_cfg_val)
1468 IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1469 trans->cfg->base_params->pll_cfg_val);
1470 #endif
1471
1472 /*
1473 * Set "initialization complete" bit to move adapter from
1474 * D0U* --> D0A* (powered-up active) state.
1475 */
1476 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1477
1478 /*
1479 * Wait for clock stabilization; once stabilized, access to
1480 * device-internal resources is supported, e.g. iwm_write_prph()
1481 * and accesses to uCode SRAM.
1482 */
1483 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1484 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1485 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1486 aprint_error_dev(sc->sc_dev,
1487 "timeout waiting for clock stabilization\n");
1488 goto out;
1489 }
1490
1491 /*
1492 * This is a bit of an abuse - This is needed for 7260 / 3160
1493 * only check host_interrupt_operation_mode even if this is
1494 * not related to host_interrupt_operation_mode.
1495 *
1496 * Enable the oscillator to count wake up time for L1 exit. This
1497 * consumes slightly more power (100uA) - but allows to be sure
1498 * that we wake up from L1 on time.
1499 *
1500 * This looks weird: read twice the same register, discard the
1501 * value, set a bit, and yet again, read that same register
1502 * just to discard the value. But that's the way the hardware
1503 * seems to like it.
1504 */
1505 iwm_read_prph(sc, IWM_OSC_CLK);
1506 iwm_read_prph(sc, IWM_OSC_CLK);
1507 iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1508 iwm_read_prph(sc, IWM_OSC_CLK);
1509 iwm_read_prph(sc, IWM_OSC_CLK);
1510
1511 /*
1512 * Enable DMA clock and wait for it to stabilize.
1513 *
1514 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1515 * do not disable clocks. This preserves any hardware bits already
1516 * set by default in "CLK_CTRL_REG" after reset.
1517 */
1518 iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1519 //kpause("iwmapm", 0, mstohz(20), NULL);
1520 DELAY(20);
1521
1522 /* Disable L1-Active */
1523 iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1524 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1525
1526 /* Clear the interrupt in APMG if the NIC is in RFKILL */
1527 iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1528 IWM_APMG_RTC_INT_STT_RFKILL);
1529
1530 out:
1531 if (error)
1532 aprint_error_dev(sc->sc_dev, "apm init error %d\n", error);
1533 return error;
1534 }
1535
1536 /* iwlwifi/pcie/trans.c */
1537 static void
1538 iwm_apm_stop(struct iwm_softc *sc)
1539 {
1540 /* stop device's busmaster DMA activity */
1541 IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1542
1543 if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1544 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1545 IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1546 aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1547 DPRINTF(("iwm apm stop\n"));
1548 }
1549
1550 /* iwlwifi pcie/trans.c */
1551 static int
1552 iwm_start_hw(struct iwm_softc *sc)
1553 {
1554 int error;
1555
1556 if ((error = iwm_prepare_card_hw(sc)) != 0)
1557 return error;
1558
1559 /* Reset the entire device */
1560 IWM_WRITE(sc, IWM_CSR_RESET,
1561 IWM_CSR_RESET_REG_FLAG_SW_RESET |
1562 IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1563 DELAY(10);
1564
1565 if ((error = iwm_apm_init(sc)) != 0)
1566 return error;
1567
1568 iwm_enable_rfkill_int(sc);
1569 iwm_check_rfkill(sc);
1570
1571 return 0;
1572 }
1573
1574 /* iwlwifi pcie/trans.c */
1575
1576 static void
1577 iwm_stop_device(struct iwm_softc *sc)
1578 {
1579 int chnl, ntries;
1580 int qid;
1581
1582 /* tell the device to stop sending interrupts */
1583 iwm_disable_interrupts(sc);
1584
1585 /* device going down, Stop using ICT table */
1586 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1587
1588 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1589
1590 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1591
1592 /* Stop all DMA channels. */
1593 if (iwm_nic_lock(sc)) {
1594 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1595 IWM_WRITE(sc,
1596 IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1597 for (ntries = 0; ntries < 200; ntries++) {
1598 uint32_t r;
1599
1600 r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1601 if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1602 chnl))
1603 break;
1604 DELAY(20);
1605 }
1606 }
1607 iwm_nic_unlock(sc);
1608 }
1609
1610 /* Stop RX ring. */
1611 iwm_reset_rx_ring(sc, &sc->rxq);
1612
1613 /* Reset all TX rings. */
1614 for (qid = 0; qid < __arraycount(sc->txq); qid++)
1615 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1616
1617 /*
1618 * Power-down device's busmaster DMA clocks
1619 */
1620 iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1621 DELAY(5);
1622
1623 /* Make sure (redundant) we've released our request to stay awake */
1624 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1625 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1626
1627 /* Stop the device, and put it in low power state */
1628 iwm_apm_stop(sc);
1629
1630 /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1631 * Clean again the interrupt here
1632 */
1633 iwm_disable_interrupts(sc);
1634 /* stop and reset the on-board processor */
1635 IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
1636
1637 /*
1638 * Even if we stop the HW, we still want the RF kill
1639 * interrupt
1640 */
1641 iwm_enable_rfkill_int(sc);
1642 iwm_check_rfkill(sc);
1643 }
1644
1645 /* iwlwifi pcie/trans.c (always main power) */
1646 static void
1647 iwm_set_pwr(struct iwm_softc *sc)
1648 {
1649 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1650 IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN, ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
1651 }
1652
1653 /* iwlwifi: mvm/ops.c */
1654 static void
1655 iwm_mvm_nic_config(struct iwm_softc *sc)
1656 {
1657 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1658 uint32_t reg_val = 0;
1659
1660 radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1661 IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1662 radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1663 IWM_FW_PHY_CFG_RADIO_STEP_POS;
1664 radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1665 IWM_FW_PHY_CFG_RADIO_DASH_POS;
1666
1667 /* SKU control */
1668 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1669 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1670 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1671 IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1672
1673 /* radio configuration */
1674 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1675 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1676 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1677
1678 IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1679
1680 DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1681 radio_cfg_step, radio_cfg_dash));
1682
1683 /*
1684 * W/A : NIC is stuck in a reset state after Early PCIe power off
1685 * (PCIe power is lost before PERST# is asserted), causing ME FW
1686 * to lose ownership and not being able to obtain it back.
1687 */
1688 iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1689 IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1690 ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1691 }
1692
1693 static int
1694 iwm_nic_rx_init(struct iwm_softc *sc)
1695 {
1696 if (!iwm_nic_lock(sc))
1697 return EBUSY;
1698
1699 /*
1700 * Initialize RX ring. This is from the iwn driver.
1701 */
1702 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1703
1704 /* stop DMA */
1705 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1706 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1707 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1708 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1709 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1710
1711 /* Set physical address of RX ring (256-byte aligned). */
1712 IWM_WRITE(sc,
1713 IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1714
1715 /* Set physical address of RX status (16-byte aligned). */
1716 IWM_WRITE(sc,
1717 IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1718
1719 /* Enable RX. */
1720 /*
1721 * Note: Linux driver also sets this:
1722 * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1723 *
1724 * It causes weird behavior. YMMV.
1725 */
1726 IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1727 IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1728 IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1729 IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1730 IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1731 IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1732
1733 IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1734 IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1735
1736 /*
1737 * Thus sayeth el jefe (iwlwifi) via a comment:
1738 *
1739 * This value should initially be 0 (before preparing any
1740 * RBs), should be 8 after preparing the first 8 RBs (for example)
1741 */
1742 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1743
1744 iwm_nic_unlock(sc);
1745
1746 return 0;
1747 }
1748
1749 static int
1750 iwm_nic_tx_init(struct iwm_softc *sc)
1751 {
1752 int qid;
1753
1754 if (!iwm_nic_lock(sc))
1755 return EBUSY;
1756
1757 /* Deactivate TX scheduler. */
1758 iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1759
1760 /* Set physical address of "keep warm" page (16-byte aligned). */
1761 IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1762
1763 /* Initialize TX rings. */
1764 for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1765 struct iwm_tx_ring *txq = &sc->txq[qid];
1766
1767 /* Set physical address of TX ring (256-byte aligned). */
1768 IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1769 txq->desc_dma.paddr >> 8);
1770 DPRINTF(("loading ring %d descriptors (%p) at %lx\n",
1771 qid, txq->desc, txq->desc_dma.paddr >> 8));
1772 }
1773 iwm_nic_unlock(sc);
1774
1775 return 0;
1776 }
1777
1778 static int
1779 iwm_nic_init(struct iwm_softc *sc)
1780 {
1781 int error;
1782
1783 iwm_apm_init(sc);
1784 iwm_set_pwr(sc);
1785
1786 iwm_mvm_nic_config(sc);
1787
1788 if ((error = iwm_nic_rx_init(sc)) != 0)
1789 return error;
1790
1791 /*
1792 * Ditto for TX, from iwn
1793 */
1794 if ((error = iwm_nic_tx_init(sc)) != 0)
1795 return error;
1796
1797 DPRINTF(("shadow registers enabled\n"));
1798 IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
1799
1800 return 0;
1801 }
1802
1803 enum iwm_mvm_tx_fifo {
1804 IWM_MVM_TX_FIFO_BK = 0,
1805 IWM_MVM_TX_FIFO_BE,
1806 IWM_MVM_TX_FIFO_VI,
1807 IWM_MVM_TX_FIFO_VO,
1808 IWM_MVM_TX_FIFO_MCAST = 5,
1809 };
1810
1811 static const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
1812 IWM_MVM_TX_FIFO_VO,
1813 IWM_MVM_TX_FIFO_VI,
1814 IWM_MVM_TX_FIFO_BE,
1815 IWM_MVM_TX_FIFO_BK,
1816 };
1817
1818 static void
1819 iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
1820 {
1821 if (!iwm_nic_lock(sc)) {
1822 DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
1823 return; /* XXX return EBUSY */
1824 }
1825
1826 /* unactivate before configuration */
1827 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1828 (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
1829 | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
1830
1831 if (qid != IWM_MVM_CMD_QUEUE) {
1832 iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
1833 }
1834
1835 iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
1836
1837 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1838 iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
1839
1840 iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
1841 /* Set scheduler window size and frame limit. */
1842 iwm_write_mem32(sc,
1843 sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
1844 sizeof(uint32_t),
1845 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
1846 IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
1847 ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
1848 IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
1849
1850 iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
1851 (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1852 (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
1853 (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
1854 IWM_SCD_QUEUE_STTS_REG_MSK);
1855
1856 iwm_nic_unlock(sc);
1857
1858 DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
1859 }
1860
1861 static int
1862 iwm_post_alive(struct iwm_softc *sc)
1863 {
1864 int nwords;
1865 int error, chnl;
1866
1867 if (!iwm_nic_lock(sc))
1868 return EBUSY;
1869
1870 if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
1871 DPRINTF(("%s: sched addr mismatch", DEVNAME(sc)));
1872 error = EINVAL;
1873 goto out;
1874 }
1875
1876 iwm_ict_reset(sc);
1877
1878 /* Clear TX scheduler state in SRAM. */
1879 nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1880 IWM_SCD_CONTEXT_MEM_LOWER_BOUND)
1881 / sizeof(uint32_t);
1882 error = iwm_write_mem(sc,
1883 sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND,
1884 NULL, nwords);
1885 if (error)
1886 goto out;
1887
1888 /* Set physical address of TX scheduler rings (1KB aligned). */
1889 iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
1890
1891 iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
1892
1893 /* enable command channel */
1894 iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
1895
1896 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1897
1898 /* Enable DMA channels. */
1899 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1900 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
1901 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
1902 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
1903 }
1904
1905 IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
1906 IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
1907
1908 /* Enable L1-Active */
1909 iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1910 IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1911
1912 out:
1913 iwm_nic_unlock(sc);
1914 return error;
1915 }
1916
1917 /*
1918 * PHY db
1919 * iwlwifi/iwl-phy-db.c
1920 */
1921
1922 /*
1923 * BEGIN iwl-phy-db.c
1924 */
1925
1926 enum iwm_phy_db_section_type {
1927 IWM_PHY_DB_CFG = 1,
1928 IWM_PHY_DB_CALIB_NCH,
1929 IWM_PHY_DB_UNUSED,
1930 IWM_PHY_DB_CALIB_CHG_PAPD,
1931 IWM_PHY_DB_CALIB_CHG_TXP,
1932 IWM_PHY_DB_MAX
1933 };
1934
1935 #define IWM_PHY_DB_CMD 0x6c /* TEMP API - The actual is 0x8c */
1936
1937 /*
1938 * phy db - configure operational ucode
1939 */
1940 struct iwm_phy_db_cmd {
1941 uint16_t type;
1942 uint16_t length;
1943 uint8_t data[];
1944 } __packed;
1945
1946 /* for parsing of tx power channel group data that comes from the firmware*/
1947 struct iwm_phy_db_chg_txp {
1948 uint32_t space;
1949 uint16_t max_channel_idx;
1950 } __packed;
1951
1952 /*
1953 * phy db - Receive phy db chunk after calibrations
1954 */
1955 struct iwm_calib_res_notif_phy_db {
1956 uint16_t type;
1957 uint16_t length;
1958 uint8_t data[];
1959 } __packed;
1960
1961 /*
1962 * get phy db section: returns a pointer to a phy db section specified by
1963 * type and channel group id.
1964 */
1965 static struct iwm_phy_db_entry *
1966 iwm_phy_db_get_section(struct iwm_softc *sc,
1967 enum iwm_phy_db_section_type type, uint16_t chg_id)
1968 {
1969 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
1970
1971 if (type >= IWM_PHY_DB_MAX)
1972 return NULL;
1973
1974 switch (type) {
1975 case IWM_PHY_DB_CFG:
1976 return &phy_db->cfg;
1977 case IWM_PHY_DB_CALIB_NCH:
1978 return &phy_db->calib_nch;
1979 case IWM_PHY_DB_CALIB_CHG_PAPD:
1980 if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
1981 return NULL;
1982 return &phy_db->calib_ch_group_papd[chg_id];
1983 case IWM_PHY_DB_CALIB_CHG_TXP:
1984 if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
1985 return NULL;
1986 return &phy_db->calib_ch_group_txp[chg_id];
1987 default:
1988 return NULL;
1989 }
1990 return NULL;
1991 }
1992
1993 static int
1994 iwm_phy_db_set_section(struct iwm_softc *sc,
1995 struct iwm_calib_res_notif_phy_db *phy_db_notif)
1996 {
1997 enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
1998 uint16_t size = le16toh(phy_db_notif->length);
1999 struct iwm_phy_db_entry *entry;
2000 uint16_t chg_id = 0;
2001
2002 if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2003 type == IWM_PHY_DB_CALIB_CHG_TXP)
2004 chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2005
2006 entry = iwm_phy_db_get_section(sc, type, chg_id);
2007 if (!entry)
2008 return EINVAL;
2009
2010 if (entry->data)
2011 kmem_free(entry->data, entry->size);
2012 entry->data = kmem_alloc(size, KM_NOSLEEP);
2013 if (!entry->data) {
2014 entry->size = 0;
2015 return ENOMEM;
2016 }
2017 memcpy(entry->data, phy_db_notif->data, size);
2018 entry->size = size;
2019
2020 DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d , Size: %d, data: %p\n",
2021 __func__, __LINE__, type, size, entry->data));
2022
2023 return 0;
2024 }
2025
2026 static int
2027 iwm_is_valid_channel(uint16_t ch_id)
2028 {
2029 if (ch_id <= 14 ||
2030 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2031 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2032 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2033 return 1;
2034 return 0;
2035 }
2036
2037 static uint8_t
2038 iwm_ch_id_to_ch_index(uint16_t ch_id)
2039 {
2040 if (!iwm_is_valid_channel(ch_id))
2041 return 0xff;
2042
2043 if (ch_id <= 14)
2044 return ch_id - 1;
2045 if (ch_id <= 64)
2046 return (ch_id + 20) / 4;
2047 if (ch_id <= 140)
2048 return (ch_id - 12) / 4;
2049 return (ch_id - 13) / 4;
2050 }
2051
2052
2053 static uint16_t
2054 iwm_channel_id_to_papd(uint16_t ch_id)
2055 {
2056 if (!iwm_is_valid_channel(ch_id))
2057 return 0xff;
2058
2059 if (1 <= ch_id && ch_id <= 14)
2060 return 0;
2061 if (36 <= ch_id && ch_id <= 64)
2062 return 1;
2063 if (100 <= ch_id && ch_id <= 140)
2064 return 2;
2065 return 3;
2066 }
2067
2068 static uint16_t
2069 iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2070 {
2071 struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2072 struct iwm_phy_db_chg_txp *txp_chg;
2073 int i;
2074 uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2075
2076 if (ch_index == 0xff)
2077 return 0xff;
2078
2079 for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2080 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2081 if (!txp_chg)
2082 return 0xff;
2083 /*
2084 * Looking for the first channel group that its max channel is
2085 * higher then wanted channel.
2086 */
2087 if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2088 return i;
2089 }
2090 return 0xff;
2091 }
2092
2093 static int
2094 iwm_phy_db_get_section_data(struct iwm_softc *sc,
2095 uint32_t type, uint8_t **data, uint16_t *size, uint16_t ch_id)
2096 {
2097 struct iwm_phy_db_entry *entry;
2098 uint16_t ch_group_id = 0;
2099
2100 /* find wanted channel group */
2101 if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2102 ch_group_id = iwm_channel_id_to_papd(ch_id);
2103 else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2104 ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2105
2106 entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2107 if (!entry)
2108 return EINVAL;
2109
2110 *data = entry->data;
2111 *size = entry->size;
2112
2113 DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2114 __func__, __LINE__, type, *size));
2115
2116 return 0;
2117 }
2118
2119 static int
2120 iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
2121 uint16_t length, void *data)
2122 {
2123 struct iwm_phy_db_cmd phy_db_cmd;
2124 struct iwm_host_cmd cmd = {
2125 .id = IWM_PHY_DB_CMD,
2126 .flags = IWM_CMD_SYNC,
2127 };
2128
2129 DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n", type, length));
2130
2131 /* Set phy db cmd variables */
2132 phy_db_cmd.type = le16toh(type);
2133 phy_db_cmd.length = le16toh(length);
2134
2135 /* Set hcmd variables */
2136 cmd.data[0] = &phy_db_cmd;
2137 cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2138 cmd.data[1] = data;
2139 cmd.len[1] = length;
2140 cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
2141
2142 return iwm_send_cmd(sc, &cmd);
2143 }
2144
2145 static int
2146 iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2147 enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2148 {
2149 uint16_t i;
2150 int err;
2151 struct iwm_phy_db_entry *entry;
2152
2153 /* Send all the channel-specific groups to operational fw */
2154 for (i = 0; i < max_ch_groups; i++) {
2155 entry = iwm_phy_db_get_section(sc, type, i);
2156 if (!entry)
2157 return EINVAL;
2158
2159 if (!entry->size)
2160 continue;
2161
2162 /* Send the requested PHY DB section */
2163 err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2164 if (err) {
2165 DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2166 "err %d\n", DEVNAME(sc), type, i, err));
2167 return err;
2168 }
2169
2170 DPRINTFN(10, ("Sent PHY_DB HCMD, type = %d num = %d\n", type, i));
2171 }
2172
2173 return 0;
2174 }
2175
2176 static int
2177 iwm_send_phy_db_data(struct iwm_softc *sc)
2178 {
2179 uint8_t *data = NULL;
2180 uint16_t size = 0;
2181 int err;
2182
2183 DPRINTF(("Sending phy db data and configuration to runtime image\n"));
2184
2185 /* Send PHY DB CFG section */
2186 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2187 if (err) {
2188 DPRINTF(("%s: Cannot get Phy DB cfg section, %d\n",
2189 DEVNAME(sc), err));
2190 return err;
2191 }
2192
2193 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2194 if (err) {
2195 DPRINTF(("%s: Cannot send HCMD of Phy DB cfg section, %d\n",
2196 DEVNAME(sc), err));
2197 return err;
2198 }
2199
2200 err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2201 &data, &size, 0);
2202 if (err) {
2203 DPRINTF(("%s: Cannot get Phy DB non specific channel section, "
2204 "%d\n", DEVNAME(sc), err));
2205 return err;
2206 }
2207
2208 err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2209 if (err) {
2210 DPRINTF(("%s: Cannot send HCMD of Phy DB non specific channel "
2211 "sect, %d\n", DEVNAME(sc), err));
2212 return err;
2213 }
2214
2215 /* Send all the TXP channel specific data */
2216 err = iwm_phy_db_send_all_channel_groups(sc,
2217 IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2218 if (err) {
2219 DPRINTF(("%s: Cannot send channel specific PAPD groups, %d\n",
2220 DEVNAME(sc), err));
2221 return err;
2222 }
2223
2224 /* Send all the TXP channel specific data */
2225 err = iwm_phy_db_send_all_channel_groups(sc,
2226 IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2227 if (err) {
2228 DPRINTF(("%s: Cannot send channel specific TX power groups, "
2229 "%d\n", DEVNAME(sc), err));
2230 return err;
2231 }
2232
2233 DPRINTF(("Finished sending phy db non channel data\n"));
2234 return 0;
2235 }
2236
2237 /*
2238 * END iwl-phy-db.c
2239 */
2240
2241 /*
2242 * BEGIN iwlwifi/mvm/time-event.c
2243 */
2244
2245 /*
2246 * For the high priority TE use a time event type that has similar priority to
2247 * the FW's action scan priority.
2248 */
2249 #define IWM_MVM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2250 #define IWM_MVM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2251
2252 /* used to convert from time event API v2 to v1 */
2253 #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2254 IWM_TE_V2_EVENT_SOCIOPATHIC)
2255 static inline uint16_t
2256 iwm_te_v2_get_notify(uint16_t policy)
2257 {
2258 return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2259 }
2260
2261 static inline uint16_t
2262 iwm_te_v2_get_dep_policy(uint16_t policy)
2263 {
2264 return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2265 IWM_TE_V2_PLACEMENT_POS;
2266 }
2267
2268 static inline uint16_t
2269 iwm_te_v2_get_absence(uint16_t policy)
2270 {
2271 return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2272 }
2273
2274 static void
2275 iwm_mvm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2276 struct iwm_time_event_cmd_v1 *cmd_v1)
2277 {
2278 cmd_v1->id_and_color = cmd_v2->id_and_color;
2279 cmd_v1->action = cmd_v2->action;
2280 cmd_v1->id = cmd_v2->id;
2281 cmd_v1->apply_time = cmd_v2->apply_time;
2282 cmd_v1->max_delay = cmd_v2->max_delay;
2283 cmd_v1->depends_on = cmd_v2->depends_on;
2284 cmd_v1->interval = cmd_v2->interval;
2285 cmd_v1->duration = cmd_v2->duration;
2286 if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2287 cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2288 else
2289 cmd_v1->repeat = htole32(cmd_v2->repeat);
2290 cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2291 cmd_v1->interval_reciprocal = 0; /* unused */
2292
2293 cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2294 cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2295 cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2296 }
2297
2298 static int
2299 iwm_mvm_send_time_event_cmd(struct iwm_softc *sc,
2300 const struct iwm_time_event_cmd_v2 *cmd)
2301 {
2302 struct iwm_time_event_cmd_v1 cmd_v1;
2303
2304 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2305 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD,
2306 IWM_CMD_SYNC, sizeof(*cmd), cmd);
2307
2308 iwm_mvm_te_v2_to_v1(cmd, &cmd_v1);
2309 return iwm_mvm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, IWM_CMD_SYNC,
2310 sizeof(cmd_v1), &cmd_v1);
2311 }
2312
2313 static int
2314 iwm_mvm_time_event_send_add(struct iwm_softc *sc, struct iwm_node *in,
2315 void *te_data, struct iwm_time_event_cmd_v2 *te_cmd)
2316 {
2317 int ret;
2318
2319 DPRINTF(("Add new TE, duration %d TU\n", le32toh(te_cmd->duration)));
2320
2321 ret = iwm_mvm_send_time_event_cmd(sc, te_cmd);
2322 if (ret) {
2323 DPRINTF(("%s: Couldn't send IWM_TIME_EVENT_CMD: %d\n",
2324 DEVNAME(sc), ret));
2325 }
2326
2327 return ret;
2328 }
2329
2330 static void
2331 iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2332 uint32_t duration, uint32_t min_duration, uint32_t max_delay)
2333 {
2334 struct iwm_time_event_cmd_v2 time_cmd;
2335
2336 memset(&time_cmd, 0, sizeof(time_cmd));
2337
2338 time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2339 time_cmd.id_and_color =
2340 htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2341 time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2342
2343 time_cmd.apply_time = htole32(iwm_read_prph(sc,
2344 IWM_DEVICE_SYSTEM_TIME_REG));
2345
2346 time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2347 time_cmd.max_delay = htole32(max_delay);
2348 /* TODO: why do we need to interval = bi if it is not periodic? */
2349 time_cmd.interval = htole32(1);
2350 time_cmd.duration = htole32(duration);
2351 time_cmd.repeat = 1;
2352 time_cmd.policy
2353 = htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2354 IWM_TE_V2_NOTIF_HOST_EVENT_END);
2355
2356 iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
2357 }
2358
2359 /*
2360 * END iwlwifi/mvm/time-event.c
2361 */
2362
2363 /*
2364 * NVM read access and content parsing. We do not support
2365 * external NVM or writing NVM.
2366 * iwlwifi/mvm/nvm.c
2367 */
2368
2369 /* list of NVM sections we are allowed/need to read */
2370 static const int nvm_to_read[] = {
2371 IWM_NVM_SECTION_TYPE_HW,
2372 IWM_NVM_SECTION_TYPE_SW,
2373 IWM_NVM_SECTION_TYPE_CALIBRATION,
2374 IWM_NVM_SECTION_TYPE_PRODUCTION,
2375 };
2376
2377 /* Default NVM size to read */
2378 #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2379 #define IWM_MAX_NVM_SECTION_SIZE 7000
2380
2381 #define IWM_NVM_WRITE_OPCODE 1
2382 #define IWM_NVM_READ_OPCODE 0
2383
2384 static int
2385 iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
2386 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
2387 {
2388 offset = 0;
2389 struct iwm_nvm_access_cmd nvm_access_cmd = {
2390 .offset = htole16(offset),
2391 .length = htole16(length),
2392 .type = htole16(section),
2393 .op_code = IWM_NVM_READ_OPCODE,
2394 };
2395 struct iwm_nvm_access_resp *nvm_resp;
2396 struct iwm_rx_packet *pkt;
2397 struct iwm_host_cmd cmd = {
2398 .id = IWM_NVM_ACCESS_CMD,
2399 .flags = IWM_CMD_SYNC | IWM_CMD_WANT_SKB |
2400 IWM_CMD_SEND_IN_RFKILL,
2401 .data = { &nvm_access_cmd, },
2402 };
2403 int ret, bytes_read, offset_read;
2404 uint8_t *resp_data;
2405
2406 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2407
2408 ret = iwm_send_cmd(sc, &cmd);
2409 if (ret)
2410 return ret;
2411
2412 pkt = cmd.resp_pkt;
2413 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2414 DPRINTF(("%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
2415 DEVNAME(sc), pkt->hdr.flags));
2416 ret = EIO;
2417 goto exit;
2418 }
2419
2420 /* Extract NVM response */
2421 nvm_resp = (void *)pkt->data;
2422
2423 ret = le16toh(nvm_resp->status);
2424 bytes_read = le16toh(nvm_resp->length);
2425 offset_read = le16toh(nvm_resp->offset);
2426 resp_data = nvm_resp->data;
2427 if (ret) {
2428 DPRINTF(("%s: NVM access command failed with status %d\n",
2429 DEVNAME(sc), ret));
2430 ret = EINVAL;
2431 goto exit;
2432 }
2433
2434 if (offset_read != offset) {
2435 DPRINTF(("%s: NVM ACCESS response with invalid offset %d\n",
2436 DEVNAME(sc), offset_read));
2437 ret = EINVAL;
2438 goto exit;
2439 }
2440
2441 memcpy(data + offset, resp_data, bytes_read);
2442 *len = bytes_read;
2443
2444 exit:
2445 iwm_free_resp(sc, &cmd);
2446 return ret;
2447 }
2448
2449 /*
2450 * Reads an NVM section completely.
2451 * NICs prior to 7000 family doesn't have a real NVM, but just read
2452 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2453 * by uCode, we need to manually check in this case that we don't
2454 * overflow and try to read more than the EEPROM size.
2455 * For 7000 family NICs, we supply the maximal size we can read, and
2456 * the uCode fills the response with as much data as we can,
2457 * without overflowing, so no check is needed.
2458 */
2459 static int
2460 iwm_nvm_read_section(struct iwm_softc *sc,
2461 uint16_t section, uint8_t *data, uint16_t *len)
2462 {
2463 uint16_t length, seglen;
2464 int error;
2465
2466 /* Set nvm section read length */
2467 length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2468 *len = 0;
2469
2470 /* Read the NVM until exhausted (reading less than requested) */
2471 while (seglen == length) {
2472 error = iwm_nvm_read_chunk(sc,
2473 section, *len, length, data, &seglen);
2474 if (error) {
2475 aprint_error_dev(sc->sc_dev,
2476 "Cannot read NVM from section %d offset %d, "
2477 "length %d\n", section, *len, length);
2478 return error;
2479 }
2480 *len += seglen;
2481 }
2482
2483 DPRINTFN(4, ("NVM section %d read completed\n", section));
2484 return 0;
2485 }
2486
2487 /*
2488 * BEGIN IWM_NVM_PARSE
2489 */
2490
2491 /* iwlwifi/iwl-nvm-parse.c */
2492
2493 /* NVM offsets (in words) definitions */
2494 enum wkp_nvm_offsets {
2495 /* NVM HW-Section offset (in words) definitions */
2496 IWM_HW_ADDR = 0x15,
2497
2498 /* NVM SW-Section offset (in words) definitions */
2499 IWM_NVM_SW_SECTION = 0x1C0,
2500 IWM_NVM_VERSION = 0,
2501 IWM_RADIO_CFG = 1,
2502 IWM_SKU = 2,
2503 IWM_N_HW_ADDRS = 3,
2504 IWM_NVM_CHANNELS = 0x1E0 - IWM_NVM_SW_SECTION,
2505
2506 /* NVM calibration section offset (in words) definitions */
2507 IWM_NVM_CALIB_SECTION = 0x2B8,
2508 IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
2509 };
2510
2511 /* SKU Capabilities (actual values from NVM definition) */
2512 enum nvm_sku_bits {
2513 IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
2514 IWM_NVM_SKU_CAP_BAND_52GHZ = (1 << 1),
2515 IWM_NVM_SKU_CAP_11N_ENABLE = (1 << 2),
2516 IWM_NVM_SKU_CAP_11AC_ENABLE = (1 << 3),
2517 };
2518
2519 /* radio config bits (actual values from NVM definition) */
2520 #define IWM_NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
2521 #define IWM_NVM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
2522 #define IWM_NVM_RF_CFG_TYPE_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
2523 #define IWM_NVM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
2524 #define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
2525 #define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
2526
2527 #define DEFAULT_MAX_TX_POWER 16
2528
2529 /**
2530 * enum iwm_nvm_channel_flags - channel flags in NVM
2531 * @IWM_NVM_CHANNEL_VALID: channel is usable for this SKU/geo
2532 * @IWM_NVM_CHANNEL_IBSS: usable as an IBSS channel
2533 * @IWM_NVM_CHANNEL_ACTIVE: active scanning allowed
2534 * @IWM_NVM_CHANNEL_RADAR: radar detection required
2535 * @IWM_NVM_CHANNEL_DFS: dynamic freq selection candidate
2536 * @IWM_NVM_CHANNEL_WIDE: 20 MHz channel okay (?)
2537 * @IWM_NVM_CHANNEL_40MHZ: 40 MHz channel okay (?)
2538 * @IWM_NVM_CHANNEL_80MHZ: 80 MHz channel okay (?)
2539 * @IWM_NVM_CHANNEL_160MHZ: 160 MHz channel okay (?)
2540 */
2541 enum iwm_nvm_channel_flags {
2542 IWM_NVM_CHANNEL_VALID = (1 << 0),
2543 IWM_NVM_CHANNEL_IBSS = (1 << 1),
2544 IWM_NVM_CHANNEL_ACTIVE = (1 << 3),
2545 IWM_NVM_CHANNEL_RADAR = (1 << 4),
2546 IWM_NVM_CHANNEL_DFS = (1 << 7),
2547 IWM_NVM_CHANNEL_WIDE = (1 << 8),
2548 IWM_NVM_CHANNEL_40MHZ = (1 << 9),
2549 IWM_NVM_CHANNEL_80MHZ = (1 << 10),
2550 IWM_NVM_CHANNEL_160MHZ = (1 << 11),
2551 };
2552
2553 static void
2554 iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags)
2555 {
2556 struct ieee80211com *ic = &sc->sc_ic;
2557 struct iwm_nvm_data *data = &sc->sc_nvm;
2558 int ch_idx;
2559 struct ieee80211_channel *channel;
2560 uint16_t ch_flags;
2561 int is_5ghz;
2562 int flags, hw_value;
2563
2564 for (ch_idx = 0; ch_idx < __arraycount(iwm_nvm_channels); ch_idx++) {
2565 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2566
2567 if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2568 !data->sku_cap_band_52GHz_enable)
2569 ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2570
2571 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2572 DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2573 iwm_nvm_channels[ch_idx],
2574 ch_flags,
2575 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
2576 "5.2" : "2.4"));
2577 continue;
2578 }
2579
2580 hw_value = iwm_nvm_channels[ch_idx];
2581 channel = &ic->ic_channels[hw_value];
2582
2583 is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2584 if (!is_5ghz) {
2585 flags = IEEE80211_CHAN_2GHZ;
2586 channel->ic_flags
2587 = IEEE80211_CHAN_CCK
2588 | IEEE80211_CHAN_OFDM
2589 | IEEE80211_CHAN_DYN
2590 | IEEE80211_CHAN_2GHZ;
2591 } else {
2592 flags = IEEE80211_CHAN_5GHZ;
2593 channel->ic_flags =
2594 IEEE80211_CHAN_A;
2595 }
2596 channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2597
2598 if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2599 channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2600 }
2601 }
2602
2603 static int
2604 iwm_parse_nvm_data(struct iwm_softc *sc,
2605 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2606 const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
2607 {
2608 struct iwm_nvm_data *data = &sc->sc_nvm;
2609 uint8_t hw_addr[ETHER_ADDR_LEN];
2610 uint16_t radio_cfg, sku;
2611
2612 data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2613
2614 radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2615 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2616 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2617 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2618 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2619 data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK(radio_cfg);
2620 data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK(radio_cfg);
2621
2622 sku = le16_to_cpup(nvm_sw + IWM_SKU);
2623 data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
2624 #ifndef IWM_NO_5GHZ
2625 data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
2626 #else
2627 data->sku_cap_band_52GHz_enable = 0;
2628 #endif
2629 data->sku_cap_11n_enable = 0;
2630
2631 if (!data->valid_tx_ant || !data->valid_rx_ant) {
2632 DPRINTF(("%s: invalid antennas (0x%x, 0x%x)\n",
2633 DEVNAME(sc), data->valid_tx_ant,
2634 data->valid_rx_ant));
2635 return EINVAL;
2636 }
2637
2638 data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2639
2640 data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
2641 data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
2642
2643 /* The byte order is little endian 16 bit, meaning 214365 */
2644 memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
2645 data->hw_addr[0] = hw_addr[1];
2646 data->hw_addr[1] = hw_addr[0];
2647 data->hw_addr[2] = hw_addr[3];
2648 data->hw_addr[3] = hw_addr[2];
2649 data->hw_addr[4] = hw_addr[5];
2650 data->hw_addr[5] = hw_addr[4];
2651
2652 iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS]);
2653 data->calib_version = 255; /* TODO:
2654 this value will prevent some checks from
2655 failing, we need to check if this
2656 field is still needed, and if it does,
2657 where is it in the NVM */
2658
2659 return 0;
2660 }
2661
2662 /*
2663 * END NVM PARSE
2664 */
2665
2666 struct iwm_nvm_section {
2667 uint16_t length;
2668 const uint8_t *data;
2669 };
2670
2671 #define IWM_FW_VALID_TX_ANT(sc) \
2672 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
2673 >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
2674 #define IWM_FW_VALID_RX_ANT(sc) \
2675 ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
2676 >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
2677
2678 static int
2679 iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
2680 {
2681 const uint16_t *hw, *sw, *calib;
2682
2683 /* Checking for required sections */
2684 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2685 !sections[IWM_NVM_SECTION_TYPE_HW].data) {
2686 DPRINTF(("%s: Can't parse empty NVM sections\n", DEVNAME(sc)));
2687 return ENOENT;
2688 }
2689
2690 hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
2691 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2692 calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2693 return iwm_parse_nvm_data(sc, hw, sw, calib,
2694 IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
2695 }
2696
2697 static int
2698 iwm_nvm_init(struct iwm_softc *sc)
2699 {
2700 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2701 int i, section, error;
2702 uint16_t len;
2703 uint8_t *nvm_buffer, *temp;
2704
2705 /* Read From FW NVM */
2706 DPRINTF(("Read NVM\n"));
2707
2708 /* TODO: find correct NVM max size for a section */
2709 nvm_buffer = kmem_alloc(IWM_OTP_LOW_IMAGE_SIZE, KM_SLEEP);
2710 for (i = 0; i < __arraycount(nvm_to_read); i++) {
2711 section = nvm_to_read[i];
2712 KASSERT(section <= __arraycount(nvm_sections));
2713
2714 error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
2715 if (error)
2716 break;
2717
2718 temp = kmem_alloc(len, KM_SLEEP);
2719 memcpy(temp, nvm_buffer, len);
2720 nvm_sections[section].data = temp;
2721 nvm_sections[section].length = len;
2722 }
2723 kmem_free(nvm_buffer, IWM_OTP_LOW_IMAGE_SIZE);
2724 if (error)
2725 return error;
2726
2727 return iwm_parse_nvm_sections(sc, nvm_sections);
2728 }
2729
2730 /*
2731 * Firmware loading gunk. This is kind of a weird hybrid between the
2732 * iwn driver and the Linux iwlwifi driver.
2733 */
2734
2735 static int
2736 iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2737 const uint8_t *section, uint32_t byte_cnt)
2738 {
2739 struct iwm_dma_info *dma = &sc->fw_dma;
2740 int error;
2741
2742 /* Copy firmware section into pre-allocated DMA-safe memory. */
2743 memcpy(dma->vaddr, section, byte_cnt);
2744 bus_dmamap_sync(sc->sc_dmat,
2745 dma->map, 0, byte_cnt, BUS_DMASYNC_PREWRITE);
2746
2747 if (!iwm_nic_lock(sc))
2748 return EBUSY;
2749
2750 sc->sc_fw_chunk_done = 0;
2751
2752 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2753 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
2754 IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
2755 dst_addr);
2756 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
2757 dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
2758 IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
2759 (iwm_get_dma_hi_addr(dma->paddr)
2760 << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
2761 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
2762 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
2763 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
2764 IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
2765 IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
2766 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2767 IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
2768 IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
2769
2770 iwm_nic_unlock(sc);
2771
2772 /* wait 1s for this segment to load */
2773 while (!sc->sc_fw_chunk_done)
2774 if ((error = tsleep(&sc->sc_fw, 0, "iwmfw", hz)) != 0)
2775 break;
2776
2777 return error;
2778 }
2779
2780 static int
2781 iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2782 {
2783 struct iwm_fw_sects *fws;
2784 int error, i, w;
2785 void *data;
2786 uint32_t dlen;
2787 uint32_t offset;
2788
2789 sc->sc_uc.uc_intr = 0;
2790
2791 fws = &sc->sc_fw.fw_sects[ucode_type];
2792 for (i = 0; i < fws->fw_count; i++) {
2793 data = fws->fw_sect[i].fws_data;
2794 dlen = fws->fw_sect[i].fws_len;
2795 offset = fws->fw_sect[i].fws_devoff;
2796 DPRINTF(("LOAD FIRMWARE type %d offset %u len %d\n",
2797 ucode_type, offset, dlen));
2798 error = iwm_firmware_load_chunk(sc, offset, data, dlen);
2799 if (error) {
2800 DPRINTF(("iwm_firmware_load_chunk() chunk %u of %u returned error %02d\n", i, fws->fw_count, error));
2801 return error;
2802 }
2803 }
2804
2805 /* wait for the firmware to load */
2806 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2807
2808 for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
2809 error = tsleep(&sc->sc_uc, 0, "iwmuc", hz/10);
2810 }
2811
2812 return error;
2813 }
2814
2815 /* iwlwifi: pcie/trans.c */
2816 static int
2817 iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
2818 {
2819 int error;
2820
2821 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2822
2823 if ((error = iwm_nic_init(sc)) != 0) {
2824 aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
2825 return error;
2826 }
2827
2828 /* make sure rfkill handshake bits are cleared */
2829 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2830 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
2831 IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
2832
2833 /* clear (again), then enable host interrupts */
2834 IWM_WRITE(sc, IWM_CSR_INT, ~0);
2835 iwm_enable_interrupts(sc);
2836
2837 /* really make sure rfkill handshake bits are cleared */
2838 /* maybe we should write a few times more? just to make sure */
2839 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2840 IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
2841
2842 /* Load the given image to the HW */
2843 return iwm_load_firmware(sc, ucode_type);
2844 }
2845
2846 static int
2847 iwm_fw_alive(struct iwm_softc *sc, uint32_t sched_base)
2848 {
2849 return iwm_post_alive(sc);
2850 }
2851
2852 static int
2853 iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2854 {
2855 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2856 .valid = htole32(valid_tx_ant),
2857 };
2858
2859 return iwm_mvm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD,
2860 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2861 }
2862
2863 /* iwlwifi: mvm/fw.c */
2864 static int
2865 iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
2866 {
2867 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2868 enum iwm_ucode_type ucode_type = sc->sc_uc_current;
2869
2870 /* Set parameters */
2871 phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
2872 phy_cfg_cmd.calib_control.event_trigger =
2873 sc->sc_default_calib[ucode_type].event_trigger;
2874 phy_cfg_cmd.calib_control.flow_trigger =
2875 sc->sc_default_calib[ucode_type].flow_trigger;
2876
2877 DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
2878 return iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, IWM_CMD_SYNC,
2879 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2880 }
2881
2882 static int
2883 iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
2884 enum iwm_ucode_type ucode_type)
2885 {
2886 enum iwm_ucode_type old_type = sc->sc_uc_current;
2887 int error;
2888
2889 if ((error = iwm_read_firmware(sc)) != 0)
2890 return error;
2891
2892 sc->sc_uc_current = ucode_type;
2893 error = iwm_start_fw(sc, ucode_type);
2894 if (error) {
2895 sc->sc_uc_current = old_type;
2896 return error;
2897 }
2898
2899 return iwm_fw_alive(sc, sc->sched_base);
2900 }
2901
2902 /*
2903 * mvm misc bits
2904 */
2905
2906 /*
2907 * follows iwlwifi/fw.c
2908 */
2909 static int
2910 iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
2911 {
2912 int error;
2913
2914 /* do not operate with rfkill switch turned on */
2915 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2916 aprint_error_dev(sc->sc_dev,
2917 "radio is disabled by hardware switch\n");
2918 return EPERM;
2919 }
2920
2921 sc->sc_init_complete = 0;
2922 if ((error = iwm_mvm_load_ucode_wait_alive(sc,
2923 IWM_UCODE_TYPE_INIT)) != 0)
2924 return error;
2925
2926 if (justnvm) {
2927 if ((error = iwm_nvm_init(sc)) != 0) {
2928 aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
2929 return error;
2930 }
2931 memcpy(&sc->sc_ic.ic_myaddr,
2932 &sc->sc_nvm.hw_addr, ETHER_ADDR_LEN);
2933
2934 sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
2935 + sc->sc_capa_max_probe_len
2936 + IWM_MAX_NUM_SCAN_CHANNELS
2937 * sizeof(struct iwm_scan_channel);
2938 sc->sc_scan_cmd = kmem_alloc(sc->sc_scan_cmd_len, KM_SLEEP);
2939
2940 return 0;
2941 }
2942
2943 /* Send TX valid antennas before triggering calibrations */
2944 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
2945 return error;
2946
2947 /*
2948 * Send phy configurations command to init uCode
2949 * to start the 16.0 uCode init image internal calibrations.
2950 */
2951 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
2952 DPRINTF(("%s: failed to run internal calibration: %d\n",
2953 DEVNAME(sc), error));
2954 return error;
2955 }
2956
2957 /*
2958 * Nothing to do but wait for the init complete notification
2959 * from the firmware
2960 */
2961 while (!sc->sc_init_complete)
2962 if ((error = tsleep(&sc->sc_init_complete,
2963 0, "iwminit", 2*hz)) != 0)
2964 break;
2965
2966 return error;
2967 }
2968
2969 /*
2970 * receive side
2971 */
2972
2973 /* (re)stock rx ring, called at init-time and at runtime */
2974 static int
2975 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2976 {
2977 struct iwm_rx_ring *ring = &sc->rxq;
2978 struct iwm_rx_data *data = &ring->data[idx];
2979 struct mbuf *m;
2980 int error;
2981 int fatal = 0;
2982
2983 m = m_gethdr(M_DONTWAIT, MT_DATA);
2984 if (m == NULL)
2985 return ENOBUFS;
2986
2987 if (size <= MCLBYTES) {
2988 MCLGET(m, M_DONTWAIT);
2989 } else {
2990 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
2991 }
2992 if ((m->m_flags & M_EXT) == 0) {
2993 m_freem(m);
2994 return ENOBUFS;
2995 }
2996
2997 if (data->m != NULL) {
2998 bus_dmamap_unload(sc->sc_dmat, data->map);
2999 fatal = 1;
3000 }
3001
3002 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3003 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3004 BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) {
3005 /* XXX */
3006 if (fatal)
3007 panic("iwm: could not load RX mbuf");
3008 m_freem(m);
3009 return error;
3010 }
3011 data->m = m;
3012 bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3013
3014 /* Update RX descriptor. */
3015 ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3016 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3017 idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3018
3019 return 0;
3020 }
3021
3022 /* iwlwifi: mvm/rx.c */
3023 #define IWM_RSSI_OFFSET 50
3024 static int
3025 iwm_mvm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3026 {
3027 int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3028 uint32_t agc_a, agc_b;
3029 uint32_t val;
3030
3031 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3032 agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3033 agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3034
3035 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3036 rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3037 rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3038
3039 /*
3040 * dBm = rssi dB - agc dB - constant.
3041 * Higher AGC (higher radio gain) means lower signal.
3042 */
3043 rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3044 rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3045 max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3046
3047 DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3048 rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3049
3050 return max_rssi_dbm;
3051 }
3052
3053 /* iwlwifi: mvm/rx.c */
3054 /*
3055 * iwm_mvm_get_signal_strength - use new rx PHY INFO API
3056 * values are reported by the fw as positive values - need to negate
3057 * to obtain their dBM. Account for missing antennas by replacing 0
3058 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3059 */
3060 static int
3061 iwm_mvm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3062 {
3063 int energy_a, energy_b, energy_c, max_energy;
3064 uint32_t val;
3065
3066 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3067 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3068 IWM_RX_INFO_ENERGY_ANT_A_POS;
3069 energy_a = energy_a ? -energy_a : -256;
3070 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3071 IWM_RX_INFO_ENERGY_ANT_B_POS;
3072 energy_b = energy_b ? -energy_b : -256;
3073 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3074 IWM_RX_INFO_ENERGY_ANT_C_POS;
3075 energy_c = energy_c ? -energy_c : -256;
3076 max_energy = MAX(energy_a, energy_b);
3077 max_energy = MAX(max_energy, energy_c);
3078
3079 DPRINTFN(12, ("energy In A %d B %d C %d , and max %d\n",
3080 energy_a, energy_b, energy_c, max_energy));
3081
3082 return max_energy;
3083 }
3084
3085 static void
3086 iwm_mvm_rx_rx_phy_cmd(struct iwm_softc *sc,
3087 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3088 {
3089 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3090
3091 DPRINTFN(20, ("received PHY stats\n"));
3092 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3093 sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3094
3095 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3096 }
3097
3098 /*
3099 * Retrieve the average noise (in dBm) among receivers.
3100 */
3101 static int
3102 iwm_get_noise(const struct iwm_mvm_statistics_rx_non_phy *stats)
3103 {
3104 int i, total, nbant, noise;
3105
3106 total = nbant = noise = 0;
3107 for (i = 0; i < 3; i++) {
3108 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3109 if (noise) {
3110 total += noise;
3111 nbant++;
3112 }
3113 }
3114
3115 /* There should be at least one antenna but check anyway. */
3116 return (nbant == 0) ? -127 : (total / nbant) - 107;
3117 }
3118
3119 /*
3120 * iwm_mvm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3121 *
3122 * Handles the actual data of the Rx packet from the fw
3123 */
3124 static void
3125 iwm_mvm_rx_rx_mpdu(struct iwm_softc *sc,
3126 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3127 {
3128 struct ieee80211com *ic = &sc->sc_ic;
3129 struct ieee80211_frame *wh;
3130 struct ieee80211_node *ni;
3131 struct ieee80211_channel *c = NULL;
3132 struct mbuf *m;
3133 struct iwm_rx_phy_info *phy_info;
3134 struct iwm_rx_mpdu_res_start *rx_res;
3135 int device_timestamp;
3136 uint32_t len;
3137 uint32_t rx_pkt_status;
3138 int rssi;
3139
3140 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3141 BUS_DMASYNC_POSTREAD);
3142
3143 phy_info = &sc->sc_last_phy_info;
3144 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3145 wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3146 len = le16toh(rx_res->byte_count);
3147 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3148
3149 m = data->m;
3150 m->m_data = pkt->data + sizeof(*rx_res);
3151 m->m_pkthdr.len = m->m_len = len;
3152
3153 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3154 DPRINTF(("dsp size out of range [0,20]: %d\n",
3155 phy_info->cfg_phy_cnt));
3156 return;
3157 }
3158
3159 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3160 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3161 DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
3162 return; /* drop */
3163 }
3164
3165 device_timestamp = le32toh(phy_info->system_timestamp);
3166
3167 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
3168 rssi = iwm_mvm_get_signal_strength(sc, phy_info);
3169 } else {
3170 rssi = iwm_mvm_calc_rssi(sc, phy_info);
3171 }
3172 rssi = -rssi;
3173
3174 if (ic->ic_state == IEEE80211_S_SCAN)
3175 iwm_fix_channel(ic, m);
3176
3177 /* replenish ring for the buffer we're going to feed to the sharks */
3178 if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
3179 return;
3180
3181 m->m_pkthdr.rcvif = IC2IFP(ic);
3182
3183 if (sc->sc_scanband == IEEE80211_CHAN_5GHZ) {
3184 if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
3185 c = &ic->ic_channels[le32toh(phy_info->channel)];
3186 }
3187
3188 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3189 if (c)
3190 ni->ni_chan = c;
3191
3192 if (sc->sc_drvbpf != NULL) {
3193 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3194
3195 tap->wr_flags = 0;
3196 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3197 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3198 tap->wr_chan_freq =
3199 htole16(ic->ic_channels[phy_info->channel].ic_freq);
3200 tap->wr_chan_flags =
3201 htole16(ic->ic_channels[phy_info->channel].ic_flags);
3202 tap->wr_dbm_antsignal = (int8_t)rssi;
3203 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3204 tap->wr_tsft = phy_info->system_timestamp;
3205 switch (phy_info->rate) {
3206 /* CCK rates. */
3207 case 10: tap->wr_rate = 2; break;
3208 case 20: tap->wr_rate = 4; break;
3209 case 55: tap->wr_rate = 11; break;
3210 case 110: tap->wr_rate = 22; break;
3211 /* OFDM rates. */
3212 case 0xd: tap->wr_rate = 12; break;
3213 case 0xf: tap->wr_rate = 18; break;
3214 case 0x5: tap->wr_rate = 24; break;
3215 case 0x7: tap->wr_rate = 36; break;
3216 case 0x9: tap->wr_rate = 48; break;
3217 case 0xb: tap->wr_rate = 72; break;
3218 case 0x1: tap->wr_rate = 96; break;
3219 case 0x3: tap->wr_rate = 108; break;
3220 /* Unknown rate: should not happen. */
3221 default: tap->wr_rate = 0;
3222 }
3223
3224 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
3225 }
3226 ieee80211_input(ic, m, ni, rssi, device_timestamp);
3227 ieee80211_free_node(ni);
3228 }
3229
3230 static void
3231 iwm_mvm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3232 struct iwm_node *in)
3233 {
3234 struct ieee80211com *ic = &sc->sc_ic;
3235 struct ifnet *ifp = IC2IFP(ic);
3236 struct iwm_mvm_tx_resp *tx_resp = (void *)pkt->data;
3237 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3238 int failack = tx_resp->failure_frame;
3239
3240 KASSERT(tx_resp->frame_count == 1);
3241
3242 /* Update rate control statistics. */
3243 in->in_amn.amn_txcnt++;
3244 if (failack > 0) {
3245 in->in_amn.amn_retrycnt++;
3246 }
3247
3248 if (status != IWM_TX_STATUS_SUCCESS &&
3249 status != IWM_TX_STATUS_DIRECT_DONE)
3250 ifp->if_oerrors++;
3251 else
3252 ifp->if_opackets++;
3253 }
3254
3255 static void
3256 iwm_mvm_rx_tx_cmd(struct iwm_softc *sc,
3257 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
3258 {
3259 struct ieee80211com *ic = &sc->sc_ic;
3260 struct ifnet *ifp = IC2IFP(ic);
3261 struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
3262 int idx = cmd_hdr->idx;
3263 int qid = cmd_hdr->qid;
3264 struct iwm_tx_ring *ring = &sc->txq[qid];
3265 struct iwm_tx_data *txd = &ring->data[idx];
3266 struct iwm_node *in = txd->in;
3267
3268 if (txd->done) {
3269 DPRINTF(("%s: got tx interrupt that's already been handled!\n",
3270 DEVNAME(sc)));
3271 return;
3272 }
3273
3274 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3275 BUS_DMASYNC_POSTREAD);
3276
3277 sc->sc_tx_timer = 0;
3278
3279 iwm_mvm_rx_tx_cmd_single(sc, pkt, in);
3280
3281 /* Unmap and free mbuf. */
3282 bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
3283 BUS_DMASYNC_POSTWRITE);
3284 bus_dmamap_unload(sc->sc_dmat, txd->map);
3285 m_freem(txd->m);
3286
3287 DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
3288 KASSERT(txd->done == 0);
3289 txd->done = 1;
3290 KASSERT(txd->in);
3291
3292 txd->m = NULL;
3293 txd->in = NULL;
3294 ieee80211_free_node(&in->in_ni);
3295
3296 if (--ring->queued < IWM_TX_RING_LOMARK) {
3297 sc->qfullmsk &= ~(1 << ring->qid);
3298 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
3299 ifp->if_flags &= ~IFF_OACTIVE;
3300 /*
3301 * Well, we're in interrupt context, but then again
3302 * I guess net80211 does all sorts of stunts in
3303 * interrupt context, so maybe this is no biggie.
3304 */
3305 (*ifp->if_start)(ifp);
3306 }
3307 }
3308 }
3309
3310 /*
3311 * BEGIN iwlwifi/mvm/binding.c
3312 */
3313
3314 static int
3315 iwm_mvm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
3316 {
3317 struct iwm_binding_cmd cmd;
3318 struct iwm_mvm_phy_ctxt *phyctxt = in->in_phyctxt;
3319 int i, ret;
3320 uint32_t status;
3321
3322 memset(&cmd, 0, sizeof(cmd));
3323
3324 cmd.id_and_color
3325 = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3326 cmd.action = htole32(action);
3327 cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
3328
3329 cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
3330 for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
3331 cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
3332
3333 status = 0;
3334 ret = iwm_mvm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
3335 sizeof(cmd), &cmd, &status);
3336 if (ret) {
3337 DPRINTF(("%s: Failed to send binding (action:%d): %d\n",
3338 DEVNAME(sc), action, ret));
3339 return ret;
3340 }
3341
3342 if (status) {
3343 DPRINTF(("%s: Binding command failed: %u\n", DEVNAME(sc),
3344 status));
3345 ret = EIO;
3346 }
3347
3348 return ret;
3349 }
3350
3351 static int
3352 iwm_mvm_binding_update(struct iwm_softc *sc, struct iwm_node *in, int add)
3353 {
3354 return iwm_mvm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
3355 }
3356
3357 static int
3358 iwm_mvm_binding_add_vif(struct iwm_softc *sc, struct iwm_node *in)
3359 {
3360 return iwm_mvm_binding_update(sc, in, IWM_FW_CTXT_ACTION_ADD);
3361 }
3362
3363 /*
3364 * END iwlwifi/mvm/binding.c
3365 */
3366
3367 /*
3368 * BEGIN iwlwifi/mvm/phy-ctxt.c
3369 */
3370
3371 /*
3372 * Construct the generic fields of the PHY context command
3373 */
3374 static void
3375 iwm_mvm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3376 struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
3377 {
3378 memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
3379
3380 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
3381 ctxt->color));
3382 cmd->action = htole32(action);
3383 cmd->apply_time = htole32(apply_time);
3384 }
3385
3386 /*
3387 * Add the phy configuration to the PHY context command
3388 */
3389 static void
3390 iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
3391 struct iwm_phy_context_cmd *cmd, struct ieee80211_channel *chan,
3392 uint8_t chains_static, uint8_t chains_dynamic)
3393 {
3394 struct ieee80211com *ic = &sc->sc_ic;
3395 uint8_t active_cnt, idle_cnt;
3396
3397 cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
3398 IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
3399
3400 cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
3401 cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
3402 cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
3403
3404 /* Set rx the chains */
3405 idle_cnt = chains_static;
3406 active_cnt = chains_dynamic;
3407
3408 cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
3409 IWM_PHY_RX_CHAIN_VALID_POS);
3410 cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
3411 cmd->rxchain_info |= htole32(active_cnt <<
3412 IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
3413
3414 cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
3415 }
3416
3417 /*
3418 * Send a command
3419 * only if something in the configuration changed: in case that this is the
3420 * first time that the phy configuration is applied or in case that the phy
3421 * configuration changed from the previous apply.
3422 */
3423 static int
3424 iwm_mvm_phy_ctxt_apply(struct iwm_softc *sc,
3425 struct iwm_mvm_phy_ctxt *ctxt,
3426 uint8_t chains_static, uint8_t chains_dynamic,
3427 uint32_t action, uint32_t apply_time)
3428 {
3429 struct iwm_phy_context_cmd cmd;
3430 int ret;
3431
3432 /* Set the command header fields */
3433 iwm_mvm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
3434
3435 /* Set the command data */
3436 iwm_mvm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
3437 chains_static, chains_dynamic);
3438
3439 ret = iwm_mvm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, IWM_CMD_SYNC,
3440 sizeof(struct iwm_phy_context_cmd), &cmd);
3441 if (ret) {
3442 DPRINTF(("PHY ctxt cmd error. ret=%d\n", ret));
3443 }
3444 return ret;
3445 }
3446
3447 /*
3448 * Send a command to add a PHY context based on the current HW configuration.
3449 */
3450 static int
3451 iwm_mvm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_mvm_phy_ctxt *ctxt,
3452 struct ieee80211_channel *chan,
3453 uint8_t chains_static, uint8_t chains_dynamic)
3454 {
3455 ctxt->channel = chan;
3456 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3457 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_ADD, 0);
3458 }
3459
3460 /*
3461 * Send a command to modify the PHY context based on the current HW
3462 * configuration. Note that the function does not check that the configuration
3463 * changed.
3464 */
3465 static int
3466 iwm_mvm_phy_ctxt_changed(struct iwm_softc *sc,
3467 struct iwm_mvm_phy_ctxt *ctxt, struct ieee80211_channel *chan,
3468 uint8_t chains_static, uint8_t chains_dynamic)
3469 {
3470 ctxt->channel = chan;
3471 return iwm_mvm_phy_ctxt_apply(sc, ctxt,
3472 chains_static, chains_dynamic, IWM_FW_CTXT_ACTION_MODIFY, 0);
3473 }
3474
3475 /*
3476 * END iwlwifi/mvm/phy-ctxt.c
3477 */
3478
3479 /*
3480 * transmit side
3481 */
3482
3483 /*
3484 * Send a command to the firmware. We try to implement the Linux
3485 * driver interface for the routine.
3486 * mostly from if_iwn (iwn_cmd()).
3487 *
3488 * For now, we always copy the first part and map the second one (if it exists).
3489 */
3490 static int
3491 iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3492 {
3493 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3494 struct iwm_tfd *desc;
3495 struct iwm_tx_data *data;
3496 struct iwm_device_cmd *cmd;
3497 struct mbuf *m;
3498 bus_addr_t paddr;
3499 uint32_t addr_lo;
3500 int error, i, paylen, off, s;
3501 int code;
3502 int async, wantresp;
3503
3504 code = hcmd->id;
3505 async = hcmd->flags & IWM_CMD_ASYNC;
3506 wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
3507
3508 for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
3509 paylen += hcmd->len[i];
3510 }
3511
3512 /* if the command wants an answer, busy sc_cmd_resp */
3513 if (wantresp) {
3514 KASSERT(!async);
3515 while (sc->sc_wantresp != -1)
3516 tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
3517 sc->sc_wantresp = ring->qid << 16 | ring->cur;
3518 DPRINTFN(12, ("wantresp is %x\n", sc->sc_wantresp));
3519 }
3520
3521 /*
3522 * Is the hardware still available? (after e.g. above wait).
3523 */
3524 s = splnet();
3525 if (sc->sc_flags & IWM_FLAG_STOPPED) {
3526 error = ENXIO;
3527 goto out;
3528 }
3529
3530 desc = &ring->desc[ring->cur];
3531 data = &ring->data[ring->cur];
3532
3533 if (paylen > sizeof(cmd->data)) {
3534 /* Command is too large */
3535 if (sizeof(cmd->hdr) + paylen > IWM_RBUF_SIZE) {
3536 error = EINVAL;
3537 goto out;
3538 }
3539 m = m_gethdr(M_DONTWAIT, MT_DATA);
3540 if (m == NULL) {
3541 error = ENOMEM;
3542 goto out;
3543 }
3544 MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3545 if (!(m->m_flags & M_EXT)) {
3546 m_freem(m);
3547 error = ENOMEM;
3548 goto out;
3549 }
3550 cmd = mtod(m, struct iwm_device_cmd *);
3551 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd,
3552 hcmd->len[0], NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3553 if (error != 0) {
3554 m_freem(m);
3555 goto out;
3556 }
3557 data->m = m;
3558 paddr = data->map->dm_segs[0].ds_addr;
3559 } else {
3560 cmd = &ring->cmd[ring->cur];
3561 paddr = data->cmd_paddr;
3562 }
3563
3564 cmd->hdr.code = code;
3565 cmd->hdr.flags = 0;
3566 cmd->hdr.qid = ring->qid;
3567 cmd->hdr.idx = ring->cur;
3568
3569 for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
3570 if (hcmd->len[i] == 0)
3571 continue;
3572 memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
3573 off += hcmd->len[i];
3574 }
3575 KASSERT(off == paylen);
3576
3577 /* lo field is not aligned */
3578 addr_lo = htole32((uint32_t)paddr);
3579 memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
3580 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
3581 | ((sizeof(cmd->hdr) + paylen) << 4));
3582 desc->num_tbs = 1;
3583
3584 DPRINTFN(8, ("iwm_send_cmd 0x%x size=%lu %s\n",
3585 code, hcmd->len[0] + hcmd->len[1] + sizeof(cmd->hdr),
3586 async ? " (async)" : ""));
3587
3588 if (hcmd->len[0] > sizeof(cmd->data)) {
3589 bus_dmamap_sync(sc->sc_dmat, data->map, 0, hcmd->len[0],
3590 BUS_DMASYNC_PREWRITE);
3591 } else {
3592 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3593 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3594 hcmd->len[0] + 4, BUS_DMASYNC_PREWRITE);
3595 }
3596 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3597 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3598 sizeof (*desc), BUS_DMASYNC_PREWRITE);
3599
3600 IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
3601 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
3602 if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
3603 IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
3604 (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
3605 IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000)) {
3606 DPRINTF(("%s: acquiring device failed\n", DEVNAME(sc)));
3607 error = EBUSY;
3608 goto out;
3609 }
3610
3611 #if 0
3612 iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
3613 #endif
3614 DPRINTF(("sending command 0x%x qid %d, idx %d\n",
3615 code, ring->qid, ring->cur));
3616
3617 /* Kick command ring. */
3618 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3619 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3620
3621 if (!async) {
3622 /* m..m-mmyy-mmyyyy-mym-ym m-my generation */
3623 int generation = sc->sc_generation;
3624 error = tsleep(desc, PCATCH, "iwmcmd", hz);
3625 if (error == 0) {
3626 /* if hardware is no longer up, return error */
3627 if (generation != sc->sc_generation) {
3628 error = ENXIO;
3629 } else {
3630 hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
3631 }
3632 }
3633 }
3634 out:
3635 if (wantresp && error != 0) {
3636 iwm_free_resp(sc, hcmd);
3637 }
3638 splx(s);
3639
3640 return error;
3641 }
3642
3643 /* iwlwifi: mvm/utils.c */
3644 static int
3645 iwm_mvm_send_cmd_pdu(struct iwm_softc *sc, uint8_t id,
3646 uint32_t flags, uint16_t len, const void *data)
3647 {
3648 struct iwm_host_cmd cmd = {
3649 .id = id,
3650 .len = { len, },
3651 .data = { data, },
3652 .flags = flags,
3653 };
3654
3655 return iwm_send_cmd(sc, &cmd);
3656 }
3657
3658 /* iwlwifi: mvm/utils.c */
3659 static int
3660 iwm_mvm_send_cmd_status(struct iwm_softc *sc,
3661 struct iwm_host_cmd *cmd, uint32_t *status)
3662 {
3663 struct iwm_rx_packet *pkt;
3664 struct iwm_cmd_response *resp;
3665 int error, resp_len;
3666
3667 //lockdep_assert_held(&mvm->mutex);
3668
3669 KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
3670 cmd->flags |= IWM_CMD_SYNC | IWM_CMD_WANT_SKB;
3671
3672 if ((error = iwm_send_cmd(sc, cmd)) != 0)
3673 return error;
3674 pkt = cmd->resp_pkt;
3675
3676 /* Can happen if RFKILL is asserted */
3677 if (!pkt) {
3678 error = 0;
3679 goto out_free_resp;
3680 }
3681
3682 if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
3683 error = EIO;
3684 goto out_free_resp;
3685 }
3686
3687 resp_len = iwm_rx_packet_payload_len(pkt);
3688 if (resp_len != sizeof(*resp)) {
3689 error = EIO;
3690 goto out_free_resp;
3691 }
3692
3693 resp = (void *)pkt->data;
3694 *status = le32toh(resp->status);
3695 out_free_resp:
3696 iwm_free_resp(sc, cmd);
3697 return error;
3698 }
3699
3700 /* iwlwifi/mvm/utils.c */
3701 static int
3702 iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
3703 uint16_t len, const void *data, uint32_t *status)
3704 {
3705 struct iwm_host_cmd cmd = {
3706 .id = id,
3707 .len = { len, },
3708 .data = { data, },
3709 };
3710
3711 return iwm_mvm_send_cmd_status(sc, &cmd, status);
3712 }
3713
3714 static void
3715 iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
3716 {
3717 KASSERT(sc->sc_wantresp != -1);
3718 KASSERT((hcmd->flags & (IWM_CMD_WANT_SKB|IWM_CMD_SYNC))
3719 == (IWM_CMD_WANT_SKB|IWM_CMD_SYNC));
3720 sc->sc_wantresp = -1;
3721 wakeup(&sc->sc_wantresp);
3722 }
3723
3724 /*
3725 * Process a "command done" firmware notification. This is where we wakeup
3726 * processes waiting for a synchronous command completion.
3727 * from if_iwn
3728 */
3729 static void
3730 iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3731 {
3732 struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
3733 struct iwm_tx_data *data;
3734
3735 if (pkt->hdr.qid != IWM_MVM_CMD_QUEUE) {
3736 return; /* Not a command ack. */
3737 }
3738
3739 data = &ring->data[pkt->hdr.idx];
3740
3741 /* If the command was mapped in an mbuf, free it. */
3742 if (data->m != NULL) {
3743 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
3744 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
3745 bus_dmamap_unload(sc->sc_dmat, data->map);
3746 m_freem(data->m);
3747 data->m = NULL;
3748 }
3749 wakeup(&ring->desc[pkt->hdr.idx]);
3750 }
3751
3752 #if 0
3753 /*
3754 * necessary only for block ack mode
3755 */
3756 void
3757 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3758 uint16_t len)
3759 {
3760 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3761 uint16_t w_val;
3762
3763 scd_bc_tbl = sc->sched_dma.vaddr;
3764
3765 len += 8; /* magic numbers came naturally from paris */
3766 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
3767 len = roundup(len, 4) / 4;
3768
3769 w_val = htole16(sta_id << 12 | len);
3770
3771 /* Update TX scheduler. */
3772 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3773 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3774 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
3775 sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
3776
3777 /* I really wonder what this is ?!? */
3778 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3779 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3780 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
3781 (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
3782 (char *)(void *)sc->sched_dma.vaddr,
3783 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
3784 }
3785 }
3786 #endif
3787
3788 /*
3789 * Fill in various bit for management frames, and leave them
3790 * unfilled for data frames (firmware takes care of that).
3791 * Return the selected TX rate.
3792 */
3793 static const struct iwm_rate *
3794 iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3795 struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
3796 {
3797 const struct iwm_rate *rinfo;
3798 int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3799 int ridx, rate_flags;
3800 int nrates = in->in_ni.ni_rates.rs_nrates;
3801
3802 tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
3803 tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
3804
3805 /* for data frames, use RS table */
3806 if (type == IEEE80211_FC0_TYPE_DATA) {
3807 if (sc->sc_fixed_ridx != -1) {
3808 tx->initial_rate_index = sc->sc_fixed_ridx;
3809 } else {
3810 tx->initial_rate_index = (nrates-1) - in->in_ni.ni_txrate;
3811 }
3812 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3813 DPRINTFN(12, ("start with txrate %d\n", tx->initial_rate_index));
3814 return &iwm_rates[tx->initial_rate_index];
3815 }
3816
3817 /* for non-data, use the lowest supported rate */
3818 ridx = in->in_ridx[0];
3819 rinfo = &iwm_rates[ridx];
3820
3821 rate_flags = 1 << IWM_RATE_MCS_ANT_POS;
3822 if (IWM_RIDX_IS_CCK(ridx))
3823 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3824 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3825
3826 return rinfo;
3827 }
3828
3829 #define TB0_SIZE 16
3830 static int
3831 iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3832 {
3833 struct ieee80211com *ic = &sc->sc_ic;
3834 struct iwm_node *in = (void *)ni;
3835 struct iwm_tx_ring *ring;
3836 struct iwm_tx_data *data;
3837 struct iwm_tfd *desc;
3838 struct iwm_device_cmd *cmd;
3839 struct iwm_tx_cmd *tx;
3840 struct ieee80211_frame *wh;
3841 struct ieee80211_key *k = NULL;
3842 struct mbuf *m1;
3843 const struct iwm_rate *rinfo;
3844 uint32_t flags;
3845 u_int hdrlen;
3846 bus_dma_segment_t *seg;
3847 uint8_t tid, type;
3848 int i, totlen, error, pad;
3849 int hdrlen2;
3850
3851 wh = mtod(m, struct ieee80211_frame *);
3852 hdrlen = ieee80211_anyhdrsize(wh);
3853 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3854
3855 hdrlen2 = (ieee80211_has_qos(wh)) ?
3856 sizeof (struct ieee80211_qosframe) :
3857 sizeof (struct ieee80211_frame);
3858
3859 if (hdrlen != hdrlen2)
3860 DPRINTF(("%s: hdrlen error (%d != %d)\n",
3861 DEVNAME(sc), hdrlen, hdrlen2));
3862
3863 tid = 0;
3864
3865 ring = &sc->txq[ac];
3866 desc = &ring->desc[ring->cur];
3867 memset(desc, 0, sizeof(*desc));
3868 data = &ring->data[ring->cur];
3869
3870 /* Fill out iwm_tx_cmd to send to the firmware */
3871 cmd = &ring->cmd[ring->cur];
3872 cmd->hdr.code = IWM_TX_CMD;
3873 cmd->hdr.flags = 0;
3874 cmd->hdr.qid = ring->qid;
3875 cmd->hdr.idx = ring->cur;
3876
3877 tx = (void *)cmd->data;
3878 memset(tx, 0, sizeof(*tx));
3879
3880 rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
3881
3882 if (sc->sc_drvbpf != NULL) {
3883 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3884
3885 tap->wt_flags = 0;
3886 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3887 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3888 tap->wt_rate = rinfo->rate;
3889 tap->wt_hwqueue = ac;
3890 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
3891 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3892
3893 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
3894 }
3895
3896 /* Encrypt the frame if need be. */
3897 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
3898 k = ieee80211_crypto_encap(ic, ni, m);
3899 if (k == NULL) {
3900 m_freem(m);
3901 return ENOBUFS;
3902 }
3903 /* Packet header may have moved, reset our local pointer. */
3904 wh = mtod(m, struct ieee80211_frame *);
3905 }
3906 totlen = m->m_pkthdr.len;
3907
3908 flags = 0;
3909 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3910 flags |= IWM_TX_CMD_FLG_ACK;
3911 }
3912
3913 if (type != IEEE80211_FC0_TYPE_DATA
3914 && (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold)
3915 && !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3916 flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
3917 }
3918
3919 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
3920 type != IEEE80211_FC0_TYPE_DATA)
3921 tx->sta_id = sc->sc_aux_sta.sta_id;
3922 else
3923 tx->sta_id = IWM_STATION_ID;
3924
3925 if (type == IEEE80211_FC0_TYPE_MGT) {
3926 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3927
3928 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3929 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
3930 tx->pm_frame_timeout = htole16(3);
3931 else
3932 tx->pm_frame_timeout = htole16(2);
3933 } else {
3934 tx->pm_frame_timeout = htole16(0);
3935 }
3936
3937 if (hdrlen & 3) {
3938 /* First segment length must be a multiple of 4. */
3939 flags |= IWM_TX_CMD_FLG_MH_PAD;
3940 pad = 4 - (hdrlen & 3);
3941 } else
3942 pad = 0;
3943
3944 tx->driver_txop = 0;
3945 tx->next_frame_len = 0;
3946
3947 tx->len = htole16(totlen);
3948 tx->tid_tspec = tid;
3949 tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
3950
3951 /* Set physical address of "scratch area". */
3952 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3953 tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
3954
3955 /* Copy 802.11 header in TX command. */
3956 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
3957
3958 flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
3959
3960 tx->sec_ctl = 0;
3961 tx->tx_flags |= htole32(flags);
3962
3963 /* Trim 802.11 header. */
3964 m_adj(m, hdrlen);
3965
3966 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3967 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3968 if (error != 0) {
3969 if (error != EFBIG) {
3970 aprint_error_dev(sc->sc_dev,
3971 "can't map mbuf (error %d)\n", error);
3972 m_freem(m);
3973 return error;
3974 }
3975 /* Too many DMA segments, linearize mbuf. */
3976 MGETHDR(m1, M_DONTWAIT, MT_DATA);
3977 if (m1 == NULL) {
3978 m_freem(m);
3979 return ENOBUFS;
3980 }
3981 if (m->m_pkthdr.len > MHLEN) {
3982 MCLGET(m1, M_DONTWAIT);
3983 if (!(m1->m_flags & M_EXT)) {
3984 m_freem(m);
3985 m_freem(m1);
3986 return ENOBUFS;
3987 }
3988 }
3989 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
3990 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
3991 m_freem(m);
3992 m = m1;
3993
3994 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3995 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3996 if (error != 0) {
3997 aprint_error_dev(sc->sc_dev,
3998 "can't map mbuf (error %d)\n", error);
3999 m_freem(m);
4000 return error;
4001 }
4002 }
4003 data->m = m;
4004 data->in = in;
4005 data->done = 0;
4006
4007 DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4008 KASSERT(data->in != NULL);
4009
4010 DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
4011 ring->qid, ring->cur, totlen, data->map->dm_nsegs));
4012
4013 /* Fill TX descriptor. */
4014 desc->num_tbs = 2 + data->map->dm_nsegs;
4015
4016 desc->tbs[0].lo = htole32(data->cmd_paddr);
4017 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4018 (TB0_SIZE << 4);
4019 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4020 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4021 ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4022 + hdrlen + pad - TB0_SIZE) << 4);
4023
4024 /* Other DMA segments are for data payload. */
4025 seg = data->map->dm_segs;
4026 for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4027 desc->tbs[i+2].lo = htole32(seg->ds_addr);
4028 desc->tbs[i+2].hi_n_len = \
4029 htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4030 | ((seg->ds_len) << 4);
4031 }
4032
4033 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4034 BUS_DMASYNC_PREWRITE);
4035 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4036 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
4037 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
4038 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4039 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
4040 sizeof (*desc), BUS_DMASYNC_PREWRITE);
4041
4042 #if 0
4043 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
4044 #endif
4045
4046 /* Kick TX ring. */
4047 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4048 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4049
4050 /* Mark TX ring as full if we reach a certain threshold. */
4051 if (++ring->queued > IWM_TX_RING_HIMARK) {
4052 sc->qfullmsk |= 1 << ring->qid;
4053 }
4054
4055 return 0;
4056 }
4057
4058 #if 0
4059 /* not necessary? */
4060 static int
4061 iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4062 {
4063 struct iwm_tx_path_flush_cmd flush_cmd = {
4064 .queues_ctl = htole32(tfd_msk),
4065 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4066 };
4067 int ret;
4068
4069 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH,
4070 sync ? IWM_CMD_SYNC : IWM_CMD_ASYNC,
4071 sizeof(flush_cmd), &flush_cmd);
4072 if (ret)
4073 aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4074 ret);
4075 return ret;
4076 }
4077 #endif
4078
4079
4080 /*
4081 * BEGIN mvm/power.c
4082 */
4083
4084 #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4085
4086 static int
4087 iwm_mvm_beacon_filter_send_cmd(struct iwm_softc *sc,
4088 struct iwm_beacon_filter_cmd *cmd)
4089 {
4090 int ret;
4091
4092 ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4093 IWM_CMD_SYNC, sizeof(struct iwm_beacon_filter_cmd), cmd);
4094
4095 if (!ret) {
4096 DPRINTF(("ba_enable_beacon_abort is: %d\n",
4097 le32toh(cmd->ba_enable_beacon_abort)));
4098 DPRINTF(("ba_escape_timer is: %d\n",
4099 le32toh(cmd->ba_escape_timer)));
4100 DPRINTF(("bf_debug_flag is: %d\n",
4101 le32toh(cmd->bf_debug_flag)));
4102 DPRINTF(("bf_enable_beacon_filter is: %d\n",
4103 le32toh(cmd->bf_enable_beacon_filter)));
4104 DPRINTF(("bf_energy_delta is: %d\n",
4105 le32toh(cmd->bf_energy_delta)));
4106 DPRINTF(("bf_escape_timer is: %d\n",
4107 le32toh(cmd->bf_escape_timer)));
4108 DPRINTF(("bf_roaming_energy_delta is: %d\n",
4109 le32toh(cmd->bf_roaming_energy_delta)));
4110 DPRINTF(("bf_roaming_state is: %d\n",
4111 le32toh(cmd->bf_roaming_state)));
4112 DPRINTF(("bf_temp_threshold is: %d\n",
4113 le32toh(cmd->bf_temp_threshold)));
4114 DPRINTF(("bf_temp_fast_filter is: %d\n",
4115 le32toh(cmd->bf_temp_fast_filter)));
4116 DPRINTF(("bf_temp_slow_filter is: %d\n",
4117 le32toh(cmd->bf_temp_slow_filter)));
4118 }
4119 return ret;
4120 }
4121
4122 static void
4123 iwm_mvm_beacon_filter_set_cqm_params(struct iwm_softc *sc,
4124 struct iwm_node *in, struct iwm_beacon_filter_cmd *cmd)
4125 {
4126 cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4127 }
4128
4129 static int
4130 iwm_mvm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in,
4131 int enable)
4132 {
4133 struct iwm_beacon_filter_cmd cmd = {
4134 IWM_BF_CMD_CONFIG_DEFAULTS,
4135 .bf_enable_beacon_filter = htole32(1),
4136 .ba_enable_beacon_abort = htole32(enable),
4137 };
4138
4139 if (!sc->sc_bf.bf_enabled)
4140 return 0;
4141
4142 sc->sc_bf.ba_enabled = enable;
4143 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4144 return iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4145 }
4146
4147 static void
4148 iwm_mvm_power_log(struct iwm_softc *sc, struct iwm_mac_power_cmd *cmd)
4149 {
4150 DPRINTF(("Sending power table command on mac id 0x%X for "
4151 "power level %d, flags = 0x%X\n",
4152 cmd->id_and_color, IWM_POWER_SCHEME_CAM, le16toh(cmd->flags)));
4153 DPRINTF(("Keep alive = %u sec\n", le16toh(cmd->keep_alive_seconds)));
4154
4155 if (!(cmd->flags & htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
4156 DPRINTF(("Disable power management\n"));
4157 return;
4158 }
4159 KASSERT(0);
4160
4161 #if 0
4162 DPRINTF(mvm, "Rx timeout = %u usec\n",
4163 le32_to_cpu(cmd->rx_data_timeout));
4164 DPRINTF(mvm, "Tx timeout = %u usec\n",
4165 le32_to_cpu(cmd->tx_data_timeout));
4166 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_SKIP_OVER_DTIM_MSK))
4167 DPRINTF(mvm, "DTIM periods to skip = %u\n",
4168 cmd->skip_dtim_periods);
4169 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_LPRX_ENA_MSK))
4170 DPRINTF(mvm, "LP RX RSSI threshold = %u\n",
4171 cmd->lprx_rssi_threshold);
4172 if (cmd->flags & cpu_to_le16(IWM_POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
4173 DPRINTF(mvm, "uAPSD enabled\n");
4174 DPRINTF(mvm, "Rx timeout (uAPSD) = %u usec\n",
4175 le32_to_cpu(cmd->rx_data_timeout_uapsd));
4176 DPRINTF(mvm, "Tx timeout (uAPSD) = %u usec\n",
4177 le32_to_cpu(cmd->tx_data_timeout_uapsd));
4178 DPRINTF(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
4179 DPRINTF(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
4180 DPRINTF(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
4181 }
4182 #endif
4183 }
4184
4185 static void
4186 iwm_mvm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4187 struct iwm_mac_power_cmd *cmd)
4188 {
4189 struct ieee80211com *ic = &sc->sc_ic;
4190 struct ieee80211_node *ni = &in->in_ni;
4191 int dtimper, dtimper_msec;
4192 int keep_alive;
4193
4194 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4195 in->in_color));
4196 dtimper = ic->ic_dtim_period ?: 1;
4197
4198 /*
4199 * Regardless of power management state the driver must set
4200 * keep alive period. FW will use it for sending keep alive NDPs
4201 * immediately after association. Check that keep alive period
4202 * is at least 3 * DTIM
4203 */
4204 dtimper_msec = dtimper * ni->ni_intval;
4205 keep_alive
4206 = MAX(3 * dtimper_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
4207 keep_alive = roundup(keep_alive, 1000) / 1000;
4208 cmd->keep_alive_seconds = htole16(keep_alive);
4209 }
4210
4211 static int
4212 iwm_mvm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
4213 {
4214 int ret;
4215 int ba_enable;
4216 struct iwm_mac_power_cmd cmd;
4217
4218 memset(&cmd, 0, sizeof(cmd));
4219
4220 iwm_mvm_power_build_cmd(sc, in, &cmd);
4221 iwm_mvm_power_log(sc, &cmd);
4222
4223 if ((ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE,
4224 IWM_CMD_SYNC, sizeof(cmd), &cmd)) != 0)
4225 return ret;
4226
4227 ba_enable = !!(cmd.flags &
4228 htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
4229 return iwm_mvm_update_beacon_abort(sc, in, ba_enable);
4230 }
4231
4232 static int
4233 iwm_mvm_power_update_device(struct iwm_softc *sc)
4234 {
4235 struct iwm_device_power_cmd cmd = {
4236 .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
4237 };
4238
4239 if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
4240 return 0;
4241
4242 cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
4243 DPRINTF(("Sending device power command with flags = 0x%X\n", cmd.flags));
4244
4245 return iwm_mvm_send_cmd_pdu(sc,
4246 IWM_POWER_TABLE_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
4247 }
4248
4249 static int
4250 iwm_mvm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4251 {
4252 struct iwm_beacon_filter_cmd cmd = {
4253 IWM_BF_CMD_CONFIG_DEFAULTS,
4254 .bf_enable_beacon_filter = htole32(1),
4255 };
4256 int ret;
4257
4258 iwm_mvm_beacon_filter_set_cqm_params(sc, in, &cmd);
4259 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4260
4261 if (ret == 0)
4262 sc->sc_bf.bf_enabled = 1;
4263
4264 return ret;
4265 }
4266
4267 static int
4268 iwm_mvm_disable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4269 {
4270 struct iwm_beacon_filter_cmd cmd;
4271 int ret;
4272
4273 memset(&cmd, 0, sizeof(cmd));
4274 if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
4275 return 0;
4276
4277 ret = iwm_mvm_beacon_filter_send_cmd(sc, &cmd);
4278 if (ret == 0)
4279 sc->sc_bf.bf_enabled = 0;
4280
4281 return ret;
4282 }
4283
4284 #if 0
4285 static int
4286 iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
4287 {
4288 if (!sc->sc_bf.bf_enabled)
4289 return 0;
4290
4291 return iwm_mvm_enable_beacon_filter(sc, in);
4292 }
4293 #endif
4294
4295 /*
4296 * END mvm/power.c
4297 */
4298
4299 /*
4300 * BEGIN mvm/sta.c
4301 */
4302
4303 static void
4304 iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
4305 struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
4306 {
4307 memset(cmd_v5, 0, sizeof(*cmd_v5));
4308
4309 cmd_v5->add_modify = cmd_v6->add_modify;
4310 cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
4311 cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
4312 memcpy(cmd_v5->addr, cmd_v6->addr, ETHER_ADDR_LEN);
4313 cmd_v5->sta_id = cmd_v6->sta_id;
4314 cmd_v5->modify_mask = cmd_v6->modify_mask;
4315 cmd_v5->station_flags = cmd_v6->station_flags;
4316 cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
4317 cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
4318 cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
4319 cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
4320 cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
4321 cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
4322 cmd_v5->assoc_id = cmd_v6->assoc_id;
4323 cmd_v5->beamform_flags = cmd_v6->beamform_flags;
4324 cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
4325 }
4326
4327 static int
4328 iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
4329 struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
4330 {
4331 struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
4332
4333 if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
4334 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
4335 sizeof(*cmd), cmd, status);
4336 }
4337
4338 iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
4339
4340 return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
4341 &cmd_v5, status);
4342 }
4343
4344 /* send station add/update command to firmware */
4345 static int
4346 iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
4347 {
4348 struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
4349 int ret;
4350 uint32_t status;
4351
4352 memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
4353
4354 add_sta_cmd.sta_id = IWM_STATION_ID;
4355 add_sta_cmd.mac_id_n_color
4356 = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4357 if (!update) {
4358 add_sta_cmd.tfd_queue_msk = htole32(0xf);
4359 IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
4360 }
4361 add_sta_cmd.add_modify = update ? 1 : 0;
4362 add_sta_cmd.station_flags_msk
4363 |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
4364
4365 status = IWM_ADD_STA_SUCCESS;
4366 ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
4367 if (ret)
4368 return ret;
4369
4370 switch (status) {
4371 case IWM_ADD_STA_SUCCESS:
4372 break;
4373 default:
4374 ret = EIO;
4375 DPRINTF(("IWM_ADD_STA failed\n"));
4376 break;
4377 }
4378
4379 return ret;
4380 }
4381
4382 static int
4383 iwm_mvm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
4384 {
4385 int ret;
4386
4387 ret = iwm_mvm_sta_send_to_fw(sc, in, 0);
4388 if (ret)
4389 return ret;
4390
4391 return 0;
4392 }
4393
4394 static int
4395 iwm_mvm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
4396 {
4397 return iwm_mvm_sta_send_to_fw(sc, in, 1);
4398 }
4399
4400 static int
4401 iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
4402 const uint8_t *addr, uint16_t mac_id, uint16_t color)
4403 {
4404 struct iwm_mvm_add_sta_cmd_v6 cmd;
4405 int ret;
4406 uint32_t status;
4407
4408 memset(&cmd, 0, sizeof(cmd));
4409 cmd.sta_id = sta->sta_id;
4410 cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
4411
4412 cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
4413
4414 if (addr)
4415 memcpy(cmd.addr, addr, ETHER_ADDR_LEN);
4416
4417 ret = iwm_mvm_send_add_sta_cmd_status(sc, &cmd, &status);
4418 if (ret)
4419 return ret;
4420
4421 switch (status) {
4422 case IWM_ADD_STA_SUCCESS:
4423 DPRINTF(("Internal station added.\n"));
4424 return 0;
4425 default:
4426 DPRINTF(("%s: Add internal station failed, status=0x%x\n",
4427 DEVNAME(sc), status));
4428 ret = EIO;
4429 break;
4430 }
4431 return ret;
4432 }
4433
4434 static int
4435 iwm_mvm_add_aux_sta(struct iwm_softc *sc)
4436 {
4437 int ret;
4438
4439 sc->sc_aux_sta.sta_id = 3;
4440 sc->sc_aux_sta.tfd_queue_msk = 0;
4441
4442 ret = iwm_mvm_add_int_sta_common(sc,
4443 &sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
4444
4445 if (ret)
4446 memset(&sc->sc_aux_sta, 0, sizeof(sc->sc_aux_sta));
4447 return ret;
4448 }
4449
4450 /*
4451 * END mvm/sta.c
4452 */
4453
4454 /*
4455 * BEGIN mvm/scan.c
4456 */
4457
4458 #define IWM_PLCP_QUIET_THRESH 1
4459 #define IWM_ACTIVE_QUIET_TIME 10
4460 #define LONG_OUT_TIME_PERIOD 600
4461 #define SHORT_OUT_TIME_PERIOD 200
4462 #define SUSPEND_TIME_PERIOD 100
4463
4464 static uint16_t
4465 iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
4466 {
4467 uint16_t rx_chain;
4468 uint8_t rx_ant;
4469
4470 rx_ant = IWM_FW_VALID_RX_ANT(sc);
4471 rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
4472 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
4473 rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
4474 rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
4475 return htole16(rx_chain);
4476 }
4477
4478 #define ieee80211_tu_to_usec(a) (1024*(a))
4479
4480 static uint32_t
4481 iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
4482 {
4483 if (!is_assoc)
4484 return 0;
4485 if (flags & 0x1)
4486 return htole32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
4487 return htole32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
4488 }
4489
4490 static uint32_t
4491 iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
4492 {
4493 if (!is_assoc)
4494 return 0;
4495 return htole32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
4496 }
4497
4498 static uint32_t
4499 iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
4500 {
4501 if (flags & IEEE80211_CHAN_2GHZ)
4502 return htole32(IWM_PHY_BAND_24);
4503 else
4504 return htole32(IWM_PHY_BAND_5);
4505 }
4506
4507 static uint32_t
4508 iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
4509 {
4510 uint32_t tx_ant;
4511 int i, ind;
4512
4513 for (i = 0, ind = sc->sc_scan_last_antenna;
4514 i < IWM_RATE_MCS_ANT_NUM; i++) {
4515 ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4516 if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
4517 sc->sc_scan_last_antenna = ind;
4518 break;
4519 }
4520 }
4521 tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
4522
4523 if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
4524 return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
4525 tx_ant);
4526 else
4527 return htole32(IWM_RATE_6M_PLCP | tx_ant);
4528 }
4529
4530 /*
4531 * If req->n_ssids > 0, it means we should do an active scan.
4532 * In case of active scan w/o directed scan, we receive a zero-length SSID
4533 * just to notify that this scan is active and not passive.
4534 * In order to notify the FW of the number of SSIDs we wish to scan (including
4535 * the zero-length one), we need to set the corresponding bits in chan->type,
4536 * one for each SSID, and set the active bit (first). If the first SSID is
4537 * already included in the probe template, so we need to set only
4538 * req->n_ssids - 1 bits in addition to the first bit.
4539 */
4540 static uint16_t
4541 iwm_mvm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
4542 {
4543 if (flags & IEEE80211_CHAN_2GHZ)
4544 return 30 + 3 * (n_ssids + 1);
4545 return 20 + 2 * (n_ssids + 1);
4546 }
4547
4548 static uint16_t
4549 iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
4550 {
4551 return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
4552 }
4553
4554 static int
4555 iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
4556 int flags, int n_ssids, int basic_ssid)
4557 {
4558 struct ieee80211com *ic = &sc->sc_ic;
4559 uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
4560 uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
4561 struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
4562 (cmd->data + le16toh(cmd->tx_cmd.len));
4563 int type = (1 << n_ssids) - 1;
4564 struct ieee80211_channel *c;
4565 int nchan;
4566
4567 if (!basic_ssid)
4568 type |= (1 << n_ssids);
4569
4570 for (nchan = 0, c = &ic->ic_channels[1];
4571 c <= &ic->ic_channels[IEEE80211_CHAN_MAX];
4572 c++) {
4573 if ((c->ic_flags & flags) != flags)
4574 continue;
4575
4576 chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
4577 chan->type = htole32(type);
4578 if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
4579 chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
4580 chan->active_dwell = htole16(active_dwell);
4581 chan->passive_dwell = htole16(passive_dwell);
4582 chan->iteration_count = htole16(1);
4583 chan++;
4584 nchan++;
4585 }
4586 if (nchan == 0)
4587 DPRINTF(("%s: NO CHANNEL!\n", DEVNAME(sc)));
4588 return nchan;
4589 }
4590
4591 /*
4592 * Fill in probe request with the following parameters:
4593 * TA is our vif HW address, which mac80211 ensures we have.
4594 * Packet is broadcasted, so this is both SA and DA.
4595 * The probe request IE is made out of two: first comes the most prioritized
4596 * SSID if a directed scan is requested. Second comes whatever extra
4597 * information was given to us as the scan request IE.
4598 */
4599 static uint16_t
4600 iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
4601 const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
4602 const uint8_t *ie, int ie_len, int left)
4603 {
4604 int len = 0;
4605 uint8_t *pos = NULL;
4606
4607 /* Make sure there is enough space for the probe request,
4608 * two mandatory IEs and the data */
4609 left -= sizeof(*frame);
4610 if (left < 0)
4611 return 0;
4612
4613 frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4614 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4615 frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4616 IEEE80211_ADDR_COPY(frame->i_addr1, etherbroadcastaddr);
4617 memcpy(frame->i_addr2, ta, ETHER_ADDR_LEN);
4618 IEEE80211_ADDR_COPY(frame->i_addr3, etherbroadcastaddr);
4619
4620 len += sizeof(*frame);
4621 CTASSERT(sizeof(*frame) == 24);
4622
4623 /* for passive scans, no need to fill anything */
4624 if (n_ssids == 0)
4625 return (uint16_t)len;
4626
4627 /* points to the payload of the request */
4628 pos = (uint8_t *)frame + sizeof(*frame);
4629
4630 /* fill in our SSID IE */
4631 left -= ssid_len + 2;
4632 if (left < 0)
4633 return 0;
4634 *pos++ = IEEE80211_ELEMID_SSID;
4635 *pos++ = ssid_len;
4636 if (ssid && ssid_len) { /* ssid_len may be == 0 even if ssid is valid */
4637 memcpy(pos, ssid, ssid_len);
4638 pos += ssid_len;
4639 }
4640
4641 len += ssid_len + 2;
4642
4643 if (left < ie_len)
4644 return len;
4645
4646 if (ie && ie_len) {
4647 memcpy(pos, ie, ie_len);
4648 len += ie_len;
4649 }
4650
4651 return (uint16_t)len;
4652 }
4653
4654 static int
4655 iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
4656 int n_ssids, uint8_t *ssid, int ssid_len)
4657 {
4658 struct ieee80211com *ic = &sc->sc_ic;
4659 struct iwm_host_cmd hcmd = {
4660 .id = IWM_SCAN_REQUEST_CMD,
4661 .len = { 0, },
4662 .data = { sc->sc_scan_cmd, },
4663 .flags = IWM_CMD_SYNC,
4664 .dataflags = { IWM_HCMD_DFL_NOCOPY, },
4665 };
4666 struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
4667 int is_assoc = 0;
4668 int ret;
4669 uint32_t status;
4670 int basic_ssid = !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
4671
4672 //lockdep_assert_held(&mvm->mutex);
4673
4674 sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
4675
4676 DPRINTF(("Handling ieee80211 scan request\n"));
4677 memset(cmd, 0, sc->sc_scan_cmd_len);
4678
4679 cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
4680 cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
4681 cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
4682 cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
4683 cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
4684 cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
4685 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
4686 IWM_MAC_FILTER_IN_BEACON);
4687
4688 cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
4689 cmd->repeats = htole32(1);
4690
4691 /*
4692 * If the user asked for passive scan, don't change to active scan if
4693 * you see any activity on the channel - remain passive.
4694 */
4695 if (n_ssids > 0) {
4696 cmd->passive2active = htole16(1);
4697 cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4698 #if 0
4699 if (basic_ssid) {
4700 ssid = req->ssids[0].ssid;
4701 ssid_len = req->ssids[0].ssid_len;
4702 }
4703 #endif
4704 } else {
4705 cmd->passive2active = 0;
4706 cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
4707 }
4708
4709 cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
4710 IWM_TX_CMD_FLG_BT_DIS);
4711 cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
4712 cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4713 cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
4714
4715 cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
4716 (struct ieee80211_frame *)cmd->data,
4717 ic->ic_myaddr, n_ssids, ssid, ssid_len,
4718 NULL, 0, sc->sc_capa_max_probe_len));
4719
4720 cmd->channel_count
4721 = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
4722
4723 cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
4724 le16toh(cmd->tx_cmd.len) +
4725 (cmd->channel_count * sizeof(struct iwm_scan_channel)));
4726 hcmd.len[0] = le16toh(cmd->len);
4727
4728 status = IWM_SCAN_RESPONSE_OK;
4729 ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
4730 if (!ret && status == IWM_SCAN_RESPONSE_OK) {
4731 DPRINTF(("Scan request was sent successfully\n"));
4732 } else {
4733 /*
4734 * If the scan failed, it usually means that the FW was unable
4735 * to allocate the time events. Warn on it, but maybe we
4736 * should try to send the command again with different params.
4737 */
4738 sc->sc_scanband = 0;
4739 ret = EIO;
4740 }
4741 return ret;
4742 }
4743
4744 /*
4745 * END mvm/scan.c
4746 */
4747
4748 /*
4749 * BEGIN mvm/mac-ctxt.c
4750 */
4751
4752 static void
4753 iwm_mvm_ack_rates(struct iwm_softc *sc, struct iwm_node *in,
4754 int *cck_rates, int *ofdm_rates)
4755 {
4756 int lowest_present_ofdm = 100;
4757 int lowest_present_cck = 100;
4758 uint8_t cck = 0;
4759 uint8_t ofdm = 0;
4760 int i;
4761
4762 for (i = 0; i <= IWM_LAST_CCK_RATE; i++) {
4763 cck |= (1 << i);
4764 if (lowest_present_cck > i)
4765 lowest_present_cck = i;
4766 }
4767 for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
4768 int adj = i - IWM_FIRST_OFDM_RATE;
4769 ofdm |= (1 << adj);
4770 if (lowest_present_cck > adj)
4771 lowest_present_cck = adj;
4772 }
4773
4774 /*
4775 * Now we've got the basic rates as bitmaps in the ofdm and cck
4776 * variables. This isn't sufficient though, as there might not
4777 * be all the right rates in the bitmap. E.g. if the only basic
4778 * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
4779 * and 6 Mbps because the 802.11-2007 standard says in 9.6:
4780 *
4781 * [...] a STA responding to a received frame shall transmit
4782 * its Control Response frame [...] at the highest rate in the
4783 * BSSBasicRateSet parameter that is less than or equal to the
4784 * rate of the immediately previous frame in the frame exchange
4785 * sequence ([...]) and that is of the same modulation class
4786 * ([...]) as the received frame. If no rate contained in the
4787 * BSSBasicRateSet parameter meets these conditions, then the
4788 * control frame sent in response to a received frame shall be
4789 * transmitted at the highest mandatory rate of the PHY that is
4790 * less than or equal to the rate of the received frame, and
4791 * that is of the same modulation class as the received frame.
4792 *
4793 * As a consequence, we need to add all mandatory rates that are
4794 * lower than all of the basic rates to these bitmaps.
4795 */
4796
4797 if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
4798 ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
4799 if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
4800 ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
4801 /* 6M already there or needed so always add */
4802 ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
4803
4804 /*
4805 * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
4806 * Note, however:
4807 * - if no CCK rates are basic, it must be ERP since there must
4808 * be some basic rates at all, so they're OFDM => ERP PHY
4809 * (or we're in 5 GHz, and the cck bitmap will never be used)
4810 * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
4811 * - if 5.5M is basic, 1M and 2M are mandatory
4812 * - if 2M is basic, 1M is mandatory
4813 * - if 1M is basic, that's the only valid ACK rate.
4814 * As a consequence, it's not as complicated as it sounds, just add
4815 * any lower rates to the ACK rate bitmap.
4816 */
4817 if (IWM_RATE_11M_INDEX < lowest_present_cck)
4818 cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
4819 if (IWM_RATE_5M_INDEX < lowest_present_cck)
4820 cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
4821 if (IWM_RATE_2M_INDEX < lowest_present_cck)
4822 cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
4823 /* 1M already there or needed so always add */
4824 cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
4825
4826 *cck_rates = cck;
4827 *ofdm_rates = ofdm;
4828 }
4829
4830 static void
4831 iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
4832 struct iwm_mac_ctx_cmd *cmd, uint32_t action)
4833 {
4834 struct ieee80211com *ic = &sc->sc_ic;
4835 struct ieee80211_node *ni = ic->ic_bss;
4836 int cck_ack_rates, ofdm_ack_rates;
4837 int i;
4838
4839 cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4840 in->in_color));
4841 cmd->action = htole32(action);
4842
4843 cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
4844 cmd->tsf_id = htole32(in->in_tsfid);
4845
4846 IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
4847 if (in->in_assoc) {
4848 IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
4849 } else {
4850 memset(cmd->bssid_addr, 0, sizeof(cmd->bssid_addr));
4851 }
4852 iwm_mvm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
4853 cmd->cck_rates = htole32(cck_ack_rates);
4854 cmd->ofdm_rates = htole32(ofdm_ack_rates);
4855
4856 cmd->cck_short_preamble
4857 = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4858 ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
4859 cmd->short_slot
4860 = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
4861 ? IWM_MAC_FLG_SHORT_SLOT : 0);
4862
4863 for (i = 0; i < IWM_AC_NUM+1; i++) {
4864 int txf = i;
4865
4866 cmd->ac[txf].cw_min = htole16(0x0f);
4867 cmd->ac[txf].cw_max = htole16(0x3f);
4868 cmd->ac[txf].aifsn = 1;
4869 cmd->ac[txf].fifos_mask = (1 << txf);
4870 cmd->ac[txf].edca_txop = 0;
4871 }
4872
4873 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
4874 cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_SELF_CTS_EN);
4875
4876 cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
4877 }
4878
4879 static int
4880 iwm_mvm_mac_ctxt_send_cmd(struct iwm_softc *sc, struct iwm_mac_ctx_cmd *cmd)
4881 {
4882 int ret = iwm_mvm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC,
4883 sizeof(*cmd), cmd);
4884 if (ret)
4885 DPRINTF(("%s: Failed to send MAC context (action:%d): %d\n",
4886 DEVNAME(sc), le32toh(cmd->action), ret));
4887 return ret;
4888 }
4889
4890 /*
4891 * Fill the specific data for mac context of type station or p2p client
4892 */
4893 static void
4894 iwm_mvm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
4895 struct iwm_mac_data_sta *ctxt_sta, int force_assoc_off)
4896 {
4897 struct ieee80211_node *ni = &in->in_ni;
4898 unsigned dtim_period, dtim_count;
4899
4900 dtim_period = ni->ni_dtim_period;
4901 dtim_count = ni->ni_dtim_count;
4902
4903 /* We need the dtim_period to set the MAC as associated */
4904 if (in->in_assoc && dtim_period && !force_assoc_off) {
4905 uint64_t tsf;
4906 uint32_t dtim_offs;
4907
4908 /*
4909 * The DTIM count counts down, so when it is N that means N
4910 * more beacon intervals happen until the DTIM TBTT. Therefore
4911 * add this to the current time. If that ends up being in the
4912 * future, the firmware will handle it.
4913 *
4914 * Also note that the system_timestamp (which we get here as
4915 * "sync_device_ts") and TSF timestamp aren't at exactly the
4916 * same offset in the frame -- the TSF is at the first symbol
4917 * of the TSF, the system timestamp is at signal acquisition
4918 * time. This means there's an offset between them of at most
4919 * a few hundred microseconds (24 * 8 bits + PLCP time gives
4920 * 384us in the longest case), this is currently not relevant
4921 * as the firmware wakes up around 2ms before the TBTT.
4922 */
4923 dtim_offs = dtim_count * ni->ni_intval;
4924 /* convert TU to usecs */
4925 dtim_offs *= 1024;
4926
4927 tsf = ni->ni_tstamp.tsf;
4928
4929 ctxt_sta->dtim_tsf = htole64(tsf + dtim_offs);
4930 ctxt_sta->dtim_time = htole64(ni->ni_rstamp + dtim_offs);
4931
4932 DPRINTF(("DTIM TBTT is 0x%llx/0x%x, offset %d\n",
4933 (long long)le64toh(ctxt_sta->dtim_tsf),
4934 le32toh(ctxt_sta->dtim_time), dtim_offs));
4935
4936 ctxt_sta->is_assoc = htole32(1);
4937 } else {
4938 ctxt_sta->is_assoc = htole32(0);
4939 }
4940
4941 ctxt_sta->bi = htole32(ni->ni_intval);
4942 ctxt_sta->bi_reciprocal = htole32(iwm_mvm_reciprocal(ni->ni_intval));
4943 ctxt_sta->dtim_interval = htole32(ni->ni_intval * dtim_period);
4944 ctxt_sta->dtim_reciprocal =
4945 htole32(iwm_mvm_reciprocal(ni->ni_intval * dtim_period));
4946
4947 /* 10 = CONN_MAX_LISTEN_INTERVAL */
4948 ctxt_sta->listen_interval = htole32(10);
4949 ctxt_sta->assoc_id = htole32(ni->ni_associd);
4950 }
4951
4952 static int
4953 iwm_mvm_mac_ctxt_cmd_station(struct iwm_softc *sc, struct iwm_node *in,
4954 uint32_t action)
4955 {
4956 struct iwm_mac_ctx_cmd cmd;
4957
4958 memset(&cmd, 0, sizeof(cmd));
4959
4960 /* Fill the common data for all mac context types */
4961 iwm_mvm_mac_ctxt_cmd_common(sc, in, &cmd, action);
4962
4963 if (in->in_assoc)
4964 cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
4965 else
4966 cmd.filter_flags &= ~htole32(IWM_MAC_FILTER_IN_BEACON);
4967
4968 /* Fill the data specific for station mode */
4969 iwm_mvm_mac_ctxt_cmd_fill_sta(sc, in,
4970 &cmd.sta, action == IWM_FW_CTXT_ACTION_ADD);
4971
4972 return iwm_mvm_mac_ctxt_send_cmd(sc, &cmd);
4973 }
4974
4975 static int
4976 iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4977 {
4978 return iwm_mvm_mac_ctxt_cmd_station(sc, in, action);
4979 }
4980
4981 static int
4982 iwm_mvm_mac_ctxt_add(struct iwm_softc *sc, struct iwm_node *in)
4983 {
4984 int ret;
4985
4986 ret = iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_ADD);
4987 if (ret)
4988 return ret;
4989
4990 return 0;
4991 }
4992
4993 static int
4994 iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct iwm_node *in)
4995 {
4996 return iwm_mvm_mac_ctx_send(sc, in, IWM_FW_CTXT_ACTION_MODIFY);
4997 }
4998
4999 #if 0
5000 static int
5001 iwm_mvm_mac_ctxt_remove(struct iwm_softc *sc, struct iwm_node *in)
5002 {
5003 struct iwm_mac_ctx_cmd cmd;
5004 int ret;
5005
5006 if (!in->in_uploaded) {
5007 print("%s: attempt to remove !uploaded node %p", DEVNAME(sc), in);
5008 return EIO;
5009 }
5010
5011 memset(&cmd, 0, sizeof(cmd));
5012
5013 cmd.id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5014 in->in_color));
5015 cmd.action = htole32(IWM_FW_CTXT_ACTION_REMOVE);
5016
5017 ret = iwm_mvm_send_cmd_pdu(sc,
5018 IWM_MAC_CONTEXT_CMD, IWM_CMD_SYNC, sizeof(cmd), &cmd);
5019 if (ret) {
5020 aprint_error_dev(sc->sc_dev,
5021 "Failed to remove MAC context: %d\n", ret);
5022 return ret;
5023 }
5024 in->in_uploaded = 0;
5025
5026 return 0;
5027 }
5028 #endif
5029
5030 #define IWM_MVM_MISSED_BEACONS_THRESHOLD 8
5031
5032 static void
5033 iwm_mvm_rx_missed_beacons_notif(struct iwm_softc *sc,
5034 struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5035 {
5036 struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5037
5038 DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5039 le32toh(mb->mac_id),
5040 le32toh(mb->consec_missed_beacons),
5041 le32toh(mb->consec_missed_beacons_since_last_rx),
5042 le32toh(mb->num_recvd_beacons),
5043 le32toh(mb->num_expected_beacons)));
5044
5045 /*
5046 * TODO: the threshold should be adjusted based on latency conditions,
5047 * and/or in case of a CS flow on one of the other AP vifs.
5048 */
5049 if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5050 IWM_MVM_MISSED_BEACONS_THRESHOLD)
5051 ieee80211_beacon_miss(&sc->sc_ic);
5052 }
5053
5054 /*
5055 * END mvm/mac-ctxt.c
5056 */
5057
5058 /*
5059 * BEGIN mvm/quota.c
5060 */
5061
5062 static int
5063 iwm_mvm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5064 {
5065 struct iwm_time_quota_cmd cmd;
5066 int i, idx, ret, num_active_macs, quota, quota_rem;
5067 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5068 int n_ifs[IWM_MAX_BINDINGS] = {0, };
5069 uint16_t id;
5070
5071 memset(&cmd, 0, sizeof(cmd));
5072
5073 /* currently, PHY ID == binding ID */
5074 if (in) {
5075 id = in->in_phyctxt->id;
5076 KASSERT(id < IWM_MAX_BINDINGS);
5077 colors[id] = in->in_phyctxt->color;
5078
5079 if (1)
5080 n_ifs[id] = 1;
5081 }
5082
5083 /*
5084 * The FW's scheduling session consists of
5085 * IWM_MVM_MAX_QUOTA fragments. Divide these fragments
5086 * equally between all the bindings that require quota
5087 */
5088 num_active_macs = 0;
5089 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5090 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5091 num_active_macs += n_ifs[i];
5092 }
5093
5094 quota = 0;
5095 quota_rem = 0;
5096 if (num_active_macs) {
5097 quota = IWM_MVM_MAX_QUOTA / num_active_macs;
5098 quota_rem = IWM_MVM_MAX_QUOTA % num_active_macs;
5099 }
5100
5101 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5102 if (colors[i] < 0)
5103 continue;
5104
5105 cmd.quotas[idx].id_and_color =
5106 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5107
5108 if (n_ifs[i] <= 0) {
5109 cmd.quotas[idx].quota = htole32(0);
5110 cmd.quotas[idx].max_duration = htole32(0);
5111 } else {
5112 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5113 cmd.quotas[idx].max_duration = htole32(0);
5114 }
5115 idx++;
5116 }
5117
5118 /* Give the remainder of the session to the first binding */
5119 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5120
5121 ret = iwm_mvm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, IWM_CMD_SYNC,
5122 sizeof(cmd), &cmd);
5123 if (ret)
5124 DPRINTF(("%s: Failed to send quota: %d\n", DEVNAME(sc), ret));
5125 return ret;
5126 }
5127
5128 /*
5129 * END mvm/quota.c
5130 */
5131
5132 /*
5133 * aieee80211 routines
5134 */
5135
5136 /*
5137 * Change to AUTH state in 80211 state machine. Roughly matches what
5138 * Linux does in bss_info_changed().
5139 */
5140 static int
5141 iwm_auth(struct iwm_softc *sc)
5142 {
5143 struct ieee80211com *ic = &sc->sc_ic;
5144 struct iwm_node *in = (void *)ic->ic_bss;
5145 uint32_t duration;
5146 uint32_t min_duration;
5147 int error;
5148
5149 in->in_assoc = 0;
5150 if ((error = iwm_mvm_mac_ctxt_add(sc, in)) != 0) {
5151 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5152 return error;
5153 }
5154
5155 if ((error = iwm_mvm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
5156 in->in_ni.ni_chan, 1, 1)) != 0) {
5157 DPRINTF(("%s: failed add phy ctxt\n", DEVNAME(sc)));
5158 return error;
5159 }
5160 in->in_phyctxt = &sc->sc_phyctxt[0];
5161
5162 if ((error = iwm_mvm_binding_add_vif(sc, in)) != 0) {
5163 DPRINTF(("%s: binding cmd\n", DEVNAME(sc)));
5164 return error;
5165 }
5166
5167 if ((error = iwm_mvm_add_sta(sc, in)) != 0) {
5168 DPRINTF(("%s: failed to add MAC\n", DEVNAME(sc)));
5169 return error;
5170 }
5171
5172 /* a bit superfluous? */
5173 while (sc->sc_auth_prot)
5174 tsleep(&sc->sc_auth_prot, 0, "iwmauth", 0);
5175 sc->sc_auth_prot = 1;
5176
5177 duration = min(IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS,
5178 200 + in->in_ni.ni_intval);
5179 min_duration = min(IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS,
5180 100 + in->in_ni.ni_intval);
5181 iwm_mvm_protect_session(sc, in, duration, min_duration, 500);
5182
5183 while (sc->sc_auth_prot != 2) {
5184 /*
5185 * well, meh, but if the kernel is sleeping for half a
5186 * second, we have bigger problems
5187 */
5188 if (sc->sc_auth_prot == 0) {
5189 DPRINTF(("%s: missed auth window!\n", DEVNAME(sc)));
5190 return ETIMEDOUT;
5191 } else if (sc->sc_auth_prot == -1) {
5192 DPRINTF(("%s: no time event, denied!\n", DEVNAME(sc)));
5193 sc->sc_auth_prot = 0;
5194 return EAUTH;
5195 }
5196 tsleep(&sc->sc_auth_prot, 0, "iwmau2", 0);
5197 }
5198
5199 return 0;
5200 }
5201
5202 static int
5203 iwm_assoc(struct iwm_softc *sc)
5204 {
5205 struct ieee80211com *ic = &sc->sc_ic;
5206 struct iwm_node *in = (void *)ic->ic_bss;
5207 int error;
5208
5209 if ((error = iwm_mvm_update_sta(sc, in)) != 0) {
5210 DPRINTF(("%s: failed to update STA\n", DEVNAME(sc)));
5211 return error;
5212 }
5213
5214 in->in_assoc = 1;
5215 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5216 DPRINTF(("%s: failed to update MAC\n", DEVNAME(sc)));
5217 return error;
5218 }
5219
5220 return 0;
5221 }
5222
5223 static int
5224 iwm_release(struct iwm_softc *sc, struct iwm_node *in)
5225 {
5226 /*
5227 * Ok, so *technically* the proper set of calls for going
5228 * from RUN back to SCAN is:
5229 *
5230 * iwm_mvm_power_mac_disable(sc, in);
5231 * iwm_mvm_mac_ctxt_changed(sc, in);
5232 * iwm_mvm_rm_sta(sc, in);
5233 * iwm_mvm_update_quotas(sc, NULL);
5234 * iwm_mvm_mac_ctxt_changed(sc, in);
5235 * iwm_mvm_binding_remove_vif(sc, in);
5236 * iwm_mvm_mac_ctxt_remove(sc, in);
5237 *
5238 * However, that freezes the device not matter which permutations
5239 * and modifications are attempted. Obviously, this driver is missing
5240 * something since it works in the Linux driver, but figuring out what
5241 * is missing is a little more complicated. Now, since we're going
5242 * back to nothing anyway, we'll just do a complete device reset.
5243 * Up your's, device!
5244 */
5245 //iwm_mvm_flush_tx_path(sc, 0xf, 1);
5246 iwm_stop_device(sc);
5247 iwm_init_hw(sc);
5248 if (in)
5249 in->in_assoc = 0;
5250 return 0;
5251
5252 #if 0
5253 int error;
5254
5255 iwm_mvm_power_mac_disable(sc, in);
5256
5257 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5258 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 1 %d\n",
5259 error);
5260 return error;
5261 }
5262
5263 if ((error = iwm_mvm_rm_sta(sc, in)) != 0) {
5264 aprint_error_dev(sc->sc_dev, "sta remove fail %d\n", error);
5265 return error;
5266 }
5267 error = iwm_mvm_rm_sta(sc, in);
5268 in->in_assoc = 0;
5269 iwm_mvm_update_quotas(sc, NULL);
5270 if ((error = iwm_mvm_mac_ctxt_changed(sc, in)) != 0) {
5271 aprint_error_dev(sc->sc_dev, "mac ctxt change fail 2 %d\n",
5272 error);
5273 return error;
5274 }
5275 iwm_mvm_binding_remove_vif(sc, in);
5276
5277 iwm_mvm_mac_ctxt_remove(sc, in);
5278
5279 return error;
5280 #endif
5281 }
5282
5283
5284 static struct ieee80211_node *
5285 iwm_node_alloc(struct ieee80211_node_table *nt)
5286 {
5287
5288 return kmem_zalloc(sizeof (struct iwm_node), KM_NOSLEEP | M_ZERO);
5289 }
5290
5291 static void
5292 iwm_calib_timeout(void *arg)
5293 {
5294 struct iwm_softc *sc = arg;
5295 struct ieee80211com *ic = &sc->sc_ic;
5296 int s;
5297
5298 s = splnet();
5299 if (ic->ic_fixed_rate == -1
5300 && ic->ic_opmode == IEEE80211_M_STA
5301 && ic->ic_bss) {
5302 struct iwm_node *in = (void *)ic->ic_bss;
5303 ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
5304 }
5305 splx(s);
5306
5307 callout_schedule(&sc->sc_calib_to, hz/2);
5308 }
5309
5310 static void
5311 iwm_setrates(struct iwm_node *in)
5312 {
5313 struct ieee80211_node *ni = &in->in_ni;
5314 struct ieee80211com *ic = ni->ni_ic;
5315 struct iwm_softc *sc = IC2IFP(ic)->if_softc;
5316 struct iwm_lq_cmd *lq = &in->in_lq;
5317 int nrates = ni->ni_rates.rs_nrates;
5318 int i, ridx, tab = 0;
5319 int txant = 0;
5320
5321 if (nrates > __arraycount(lq->rs_table)) {
5322 DPRINTF(("%s: node supports %d rates, driver handles only "
5323 "%zu\n", DEVNAME(sc), nrates, __arraycount(lq->rs_table)));
5324 return;
5325 }
5326
5327 /* first figure out which rates we should support */
5328 memset(&in->in_ridx, -1, sizeof(in->in_ridx));
5329 for (i = 0; i < nrates; i++) {
5330 int rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
5331
5332 /* Map 802.11 rate to HW rate index. */
5333 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5334 if (iwm_rates[ridx].rate == rate)
5335 break;
5336 if (ridx > IWM_RIDX_MAX)
5337 DPRINTF(("%s: WARNING: device rate for %d not found!\n",
5338 DEVNAME(sc), rate));
5339 else
5340 in->in_ridx[i] = ridx;
5341 }
5342
5343 /* then construct a lq_cmd based on those */
5344 memset(lq, 0, sizeof(*lq));
5345 lq->sta_id = IWM_STATION_ID;
5346
5347 /*
5348 * are these used? (we don't do SISO or MIMO)
5349 * need to set them to non-zero, though, or we get an error.
5350 */
5351 lq->single_stream_ant_msk = 1;
5352 lq->dual_stream_ant_msk = 1;
5353
5354 /*
5355 * Build the actual rate selection table.
5356 * The lowest bits are the rates. Additionally,
5357 * CCK needs bit 9 to be set. The rest of the bits
5358 * we add to the table select the tx antenna
5359 * Note that we add the rates in the highest rate first
5360 * (opposite of ni_rates).
5361 */
5362 for (i = 0; i < nrates; i++) {
5363 int nextant;
5364
5365 if (txant == 0)
5366 txant = IWM_FW_VALID_TX_ANT(sc);
5367 nextant = 1<<(ffs(txant)-1);
5368 txant &= ~nextant;
5369
5370 ridx = in->in_ridx[(nrates-1)-i];
5371 tab = iwm_rates[ridx].plcp;
5372 tab |= nextant << IWM_RATE_MCS_ANT_POS;
5373 if (IWM_RIDX_IS_CCK(ridx))
5374 tab |= IWM_RATE_MCS_CCK_MSK;
5375 DPRINTFN(2, ("station rate %d %x\n", i, tab));
5376 lq->rs_table[i] = htole32(tab);
5377 }
5378 /* then fill the rest with the lowest possible rate */
5379 for (i = nrates; i < __arraycount(lq->rs_table); i++) {
5380 KASSERT(tab != 0);
5381 lq->rs_table[i] = htole32(tab);
5382 }
5383
5384 /* init amrr */
5385 ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
5386 ni->ni_txrate = nrates-1;
5387 }
5388
5389 static int
5390 iwm_media_change(struct ifnet *ifp)
5391 {
5392 struct iwm_softc *sc = ifp->if_softc;
5393 struct ieee80211com *ic = &sc->sc_ic;
5394 uint8_t rate, ridx;
5395 int error;
5396
5397 error = ieee80211_media_change(ifp);
5398 if (error != ENETRESET)
5399 return error;
5400
5401 if (ic->ic_fixed_rate != -1) {
5402 rate = ic->ic_sup_rates[ic->ic_curmode].
5403 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
5404 /* Map 802.11 rate to HW rate index. */
5405 for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
5406 if (iwm_rates[ridx].rate == rate)
5407 break;
5408 sc->sc_fixed_ridx = ridx;
5409 }
5410
5411 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5412 (IFF_UP | IFF_RUNNING)) {
5413 iwm_stop(ifp, 0);
5414 error = iwm_init(ifp);
5415 }
5416 return error;
5417 }
5418
5419 static void
5420 iwm_newstate_cb(void *wk)
5421 {
5422 struct iwm_newstate_state *iwmns = (void *)wk;
5423 struct ieee80211com *ic = iwmns->ns_ic;
5424 enum ieee80211_state nstate = iwmns->ns_nstate;
5425 int generation = iwmns->ns_generation;
5426 struct iwm_node *in;
5427 int arg = iwmns->ns_arg;
5428 struct ifnet *ifp = IC2IFP(ic);
5429 struct iwm_softc *sc = ifp->if_softc;
5430 int error;
5431
5432 kmem_free(iwmns, sizeof(*iwmns));
5433
5434 DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
5435 if (sc->sc_generation != generation) {
5436 DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
5437 if (nstate == IEEE80211_S_INIT) {
5438 DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: calling sc_newstate()\n"));
5439 sc->sc_newstate(ic, nstate, arg);
5440 }
5441 return;
5442 }
5443
5444 DPRINTF(("switching state %d->%d\n", ic->ic_state, nstate));
5445
5446 /* disable beacon filtering if we're hopping out of RUN */
5447 if (ic->ic_state == IEEE80211_S_RUN && nstate != ic->ic_state) {
5448 iwm_mvm_disable_beacon_filter(sc, (void *)ic->ic_bss);
5449
5450 if (((in = (void *)ic->ic_bss) != NULL))
5451 in->in_assoc = 0;
5452 iwm_release(sc, NULL);
5453
5454 /*
5455 * It's impossible to directly go RUN->SCAN. If we iwm_release()
5456 * above then the card will be completely reinitialized,
5457 * so the driver must do everything necessary to bring the card
5458 * from INIT to SCAN.
5459 *
5460 * Additionally, upon receiving deauth frame from AP,
5461 * OpenBSD 802.11 stack puts the driver in IEEE80211_S_AUTH
5462 * state. This will also fail with this driver, so bring the FSM
5463 * from IEEE80211_S_RUN to IEEE80211_S_SCAN in this case as well.
5464 */
5465 if (nstate == IEEE80211_S_SCAN ||
5466 nstate == IEEE80211_S_AUTH ||
5467 nstate == IEEE80211_S_ASSOC) {
5468 DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
5469 sc->sc_newstate(ic, IEEE80211_S_INIT, arg);
5470 DPRINTF(("Going INIT->SCAN\n"));
5471 nstate = IEEE80211_S_SCAN;
5472 }
5473 }
5474
5475 switch (nstate) {
5476 case IEEE80211_S_INIT:
5477 sc->sc_scanband = 0;
5478 break;
5479
5480 case IEEE80211_S_SCAN:
5481 if (sc->sc_scanband)
5482 break;
5483
5484 if ((error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ,
5485 ic->ic_des_esslen != 0,
5486 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5487 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5488 return;
5489 }
5490 ic->ic_state = nstate;
5491 return;
5492
5493 case IEEE80211_S_AUTH:
5494 if ((error = iwm_auth(sc)) != 0) {
5495 DPRINTF(("%s: could not move to auth state: %d\n",
5496 DEVNAME(sc), error));
5497 return;
5498 }
5499
5500 break;
5501
5502 case IEEE80211_S_ASSOC:
5503 if ((error = iwm_assoc(sc)) != 0) {
5504 DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
5505 error));
5506 return;
5507 }
5508 break;
5509
5510 case IEEE80211_S_RUN: {
5511 struct iwm_host_cmd cmd = {
5512 .id = IWM_LQ_CMD,
5513 .len = { sizeof(in->in_lq), },
5514 .flags = IWM_CMD_SYNC,
5515 };
5516
5517 in = (struct iwm_node *)ic->ic_bss;
5518 iwm_mvm_power_mac_update_mode(sc, in);
5519 iwm_mvm_enable_beacon_filter(sc, in);
5520 iwm_mvm_update_quotas(sc, in);
5521 iwm_setrates(in);
5522
5523 cmd.data[0] = &in->in_lq;
5524 if ((error = iwm_send_cmd(sc, &cmd)) != 0) {
5525 DPRINTF(("%s: IWM_LQ_CMD failed\n", DEVNAME(sc)));
5526 }
5527
5528 callout_schedule(&sc->sc_calib_to, hz/2);
5529
5530 break; }
5531
5532 default:
5533 break;
5534 }
5535
5536 sc->sc_newstate(ic, nstate, arg);
5537 }
5538
5539 static int
5540 iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
5541 {
5542 struct iwm_newstate_state *iwmns;
5543 struct ifnet *ifp = IC2IFP(ic);
5544 struct iwm_softc *sc = ifp->if_softc;
5545
5546 callout_stop(&sc->sc_calib_to);
5547
5548 iwmns = kmem_alloc(sizeof(*iwmns), KM_NOSLEEP);
5549 if (!iwmns) {
5550 DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
5551 return ENOMEM;
5552 }
5553
5554 iwmns->ns_ic = ic;
5555 iwmns->ns_nstate = nstate;
5556 iwmns->ns_arg = arg;
5557 iwmns->ns_generation = sc->sc_generation;
5558
5559 workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
5560
5561 return 0;
5562 }
5563
5564 static void
5565 iwm_endscan_cb(void *arg)
5566 {
5567 struct iwm_softc *sc = arg;
5568 struct ieee80211com *ic = &sc->sc_ic;
5569 int done;
5570
5571 DPRINTF(("scan ended\n"));
5572
5573 if (sc->sc_scanband == IEEE80211_CHAN_2GHZ) {
5574 #ifndef IWM_NO_5GHZ
5575 int error;
5576 done = 0;
5577 if ((error = iwm_mvm_scan_request(sc,
5578 IEEE80211_CHAN_5GHZ, ic->ic_des_esslen != 0,
5579 ic->ic_des_essid, ic->ic_des_esslen)) != 0) {
5580 DPRINTF(("%s: could not initiate scan\n", DEVNAME(sc)));
5581 done = 1;
5582 }
5583 #else
5584 done = 1;
5585 #endif
5586 } else {
5587 done = 1;
5588 }
5589
5590 if (done) {
5591 if (!sc->sc_scanband) {
5592 ieee80211_cancel_scan(ic);
5593 } else {
5594 ieee80211_end_scan(ic);
5595 }
5596 sc->sc_scanband = 0;
5597 }
5598 }
5599
5600 static int
5601 iwm_init_hw(struct iwm_softc *sc)
5602 {
5603 struct ieee80211com *ic = &sc->sc_ic;
5604 int error, i, qid;
5605
5606 if ((error = iwm_preinit(sc)) != 0)
5607 return error;
5608
5609 if ((error = iwm_start_hw(sc)) != 0)
5610 return error;
5611
5612 if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
5613 return error;
5614 }
5615
5616 /*
5617 * should stop and start HW since that INIT
5618 * image just loaded
5619 */
5620 iwm_stop_device(sc);
5621 if ((error = iwm_start_hw(sc)) != 0) {
5622 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
5623 return error;
5624 }
5625
5626 /* omstart, this time with the regular firmware */
5627 error = iwm_mvm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
5628 if (error) {
5629 aprint_error_dev(sc->sc_dev, "could not load firmware\n");
5630 goto error;
5631 }
5632
5633 if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
5634 goto error;
5635
5636 /* Send phy db control command and then phy db calibration*/
5637 if ((error = iwm_send_phy_db_data(sc)) != 0)
5638 goto error;
5639
5640 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
5641 goto error;
5642
5643 /* Add auxiliary station for scanning */
5644 if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
5645 goto error;
5646
5647 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
5648 /*
5649 * The channel used here isn't relevant as it's
5650 * going to be overwritten in the other flows.
5651 * For now use the first channel we have.
5652 */
5653 if ((error = iwm_mvm_phy_ctxt_add(sc,
5654 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
5655 goto error;
5656 }
5657
5658 error = iwm_mvm_power_update_device(sc);
5659 if (error)
5660 goto error;
5661
5662 /* Mark TX rings as active. */
5663 for (qid = 0; qid < 4; qid++) {
5664 iwm_enable_txq(sc, qid, qid);
5665 }
5666
5667 return 0;
5668
5669 error:
5670 iwm_stop_device(sc);
5671 return error;
5672 }
5673
5674 /*
5675 * ifnet interfaces
5676 */
5677
5678 static int
5679 iwm_init(struct ifnet *ifp)
5680 {
5681 struct iwm_softc *sc = ifp->if_softc;
5682 int error;
5683
5684 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
5685 return 0;
5686 }
5687 sc->sc_generation++;
5688 sc->sc_flags &= ~IWM_FLAG_STOPPED;
5689
5690 if ((error = iwm_init_hw(sc)) != 0) {
5691 iwm_stop(ifp, 1);
5692 return error;
5693 }
5694
5695 /*
5696 * Ok, firmware loaded and we are jogging
5697 */
5698
5699 ifp->if_flags &= ~IFF_OACTIVE;
5700 ifp->if_flags |= IFF_RUNNING;
5701
5702 ieee80211_begin_scan(&sc->sc_ic, 0);
5703 sc->sc_flags |= IWM_FLAG_HW_INITED;
5704
5705 return 0;
5706 }
5707
5708 /*
5709 * Dequeue packets from sendq and call send.
5710 * mostly from iwn
5711 */
5712 static void
5713 iwm_start(struct ifnet *ifp)
5714 {
5715 struct iwm_softc *sc = ifp->if_softc;
5716 struct ieee80211com *ic = &sc->sc_ic;
5717 struct ieee80211_node *ni;
5718 struct ether_header *eh;
5719 struct mbuf *m;
5720 int ac;
5721
5722 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
5723 return;
5724
5725 for (;;) {
5726 /* why isn't this done per-queue? */
5727 if (sc->qfullmsk != 0) {
5728 ifp->if_flags |= IFF_OACTIVE;
5729 break;
5730 }
5731
5732 /* need to send management frames even if we're not RUNning */
5733 IF_DEQUEUE(&ic->ic_mgtq, m);
5734 if (m) {
5735 ni = (void *)m->m_pkthdr.rcvif;
5736 ac = 0;
5737 goto sendit;
5738 }
5739 if (ic->ic_state != IEEE80211_S_RUN) {
5740 break;
5741 }
5742
5743 IFQ_DEQUEUE(&ifp->if_snd, m);
5744 if (!m)
5745 break;
5746 if (m->m_len < sizeof (*eh) &&
5747 (m = m_pullup(m, sizeof (*eh))) == NULL) {
5748 ifp->if_oerrors++;
5749 continue;
5750 }
5751 if (ifp->if_bpf != NULL)
5752 bpf_mtap(ifp, m);
5753
5754 eh = mtod(m, struct ether_header *);
5755 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
5756 if (ni == NULL) {
5757 m_freem(m);
5758 ifp->if_oerrors++;
5759 continue;
5760 }
5761 /* classify mbuf so we can find which tx ring to use */
5762 if (ieee80211_classify(ic, m, ni) != 0) {
5763 m_freem(m);
5764 ieee80211_free_node(ni);
5765 ifp->if_oerrors++;
5766 continue;
5767 }
5768
5769 /* No QoS encapsulation for EAPOL frames. */
5770 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
5771 M_WME_GETAC(m) : WME_AC_BE;
5772
5773 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
5774 ieee80211_free_node(ni);
5775 ifp->if_oerrors++;
5776 continue;
5777 }
5778
5779 sendit:
5780 if (ic->ic_rawbpf != NULL)
5781 bpf_mtap3(ic->ic_rawbpf, m);
5782 if (iwm_tx(sc, m, ni, ac) != 0) {
5783 ieee80211_free_node(ni);
5784 ifp->if_oerrors++;
5785 continue;
5786 }
5787
5788 if (ifp->if_flags & IFF_UP) {
5789 sc->sc_tx_timer = 15;
5790 ifp->if_timer = 1;
5791 }
5792 }
5793
5794 return;
5795 }
5796
5797 static void
5798 iwm_stop(struct ifnet *ifp, int disable)
5799 {
5800 struct iwm_softc *sc = ifp->if_softc;
5801 struct ieee80211com *ic = &sc->sc_ic;
5802
5803 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
5804 sc->sc_flags |= IWM_FLAG_STOPPED;
5805 sc->sc_generation++;
5806 sc->sc_scanband = 0;
5807 sc->sc_auth_prot = 0;
5808 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5809
5810 if (ic->ic_state != IEEE80211_S_INIT)
5811 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5812
5813 ifp->if_timer = sc->sc_tx_timer = 0;
5814 iwm_stop_device(sc);
5815 }
5816
5817 static void
5818 iwm_watchdog(struct ifnet *ifp)
5819 {
5820 struct iwm_softc *sc = ifp->if_softc;
5821
5822 ifp->if_timer = 0;
5823 if (sc->sc_tx_timer > 0) {
5824 if (--sc->sc_tx_timer == 0) {
5825 aprint_error_dev(sc->sc_dev, "device timeout\n");
5826 #ifdef IWM_DEBUG
5827 iwm_nic_error(sc);
5828 #endif
5829 ifp->if_flags &= ~IFF_UP;
5830 iwm_stop(ifp, 1);
5831 ifp->if_oerrors++;
5832 return;
5833 }
5834 ifp->if_timer = 1;
5835 }
5836
5837 ieee80211_watchdog(&sc->sc_ic);
5838 }
5839
5840 static int
5841 iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
5842 {
5843 struct iwm_softc *sc = ifp->if_softc;
5844 struct ieee80211com *ic = &sc->sc_ic;
5845 const struct sockaddr *sa;
5846 int s, error = 0;
5847
5848 s = splnet();
5849
5850 switch (cmd) {
5851 case SIOCSIFADDR:
5852 ifp->if_flags |= IFF_UP;
5853 /* FALLTHROUGH */
5854 case SIOCSIFFLAGS:
5855 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
5856 break;
5857 if (ifp->if_flags & IFF_UP) {
5858 if (!(ifp->if_flags & IFF_RUNNING)) {
5859 if ((error = iwm_init(ifp)) != 0)
5860 ifp->if_flags &= ~IFF_UP;
5861 }
5862 } else {
5863 if (ifp->if_flags & IFF_RUNNING)
5864 iwm_stop(ifp, 1);
5865 }
5866 break;
5867
5868 case SIOCADDMULTI:
5869 case SIOCDELMULTI:
5870 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
5871 error = (cmd == SIOCADDMULTI) ?
5872 ether_addmulti(sa, &sc->sc_ec) :
5873 ether_delmulti(sa, &sc->sc_ec);
5874
5875 if (error == ENETRESET)
5876 error = 0;
5877 break;
5878
5879 default:
5880 error = ieee80211_ioctl(ic, cmd, data);
5881 }
5882
5883 if (error == ENETRESET) {
5884 error = 0;
5885 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
5886 (IFF_UP | IFF_RUNNING)) {
5887 iwm_stop(ifp, 0);
5888 error = iwm_init(ifp);
5889 }
5890 }
5891
5892 splx(s);
5893 return error;
5894 }
5895
5896 /*
5897 * The interrupt side of things
5898 */
5899
5900 /*
5901 * error dumping routines are from iwlwifi/mvm/utils.c
5902 */
5903
5904 /*
5905 * Note: This structure is read from the device with IO accesses,
5906 * and the reading already does the endian conversion. As it is
5907 * read with uint32_t-sized accesses, any members with a different size
5908 * need to be ordered correctly though!
5909 */
5910 struct iwm_error_event_table {
5911 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5912 uint32_t error_id; /* type of error */
5913 uint32_t pc; /* program counter */
5914 uint32_t blink1; /* branch link */
5915 uint32_t blink2; /* branch link */
5916 uint32_t ilink1; /* interrupt link */
5917 uint32_t ilink2; /* interrupt link */
5918 uint32_t data1; /* error-specific data */
5919 uint32_t data2; /* error-specific data */
5920 uint32_t data3; /* error-specific data */
5921 uint32_t bcon_time; /* beacon timer */
5922 uint32_t tsf_low; /* network timestamp function timer */
5923 uint32_t tsf_hi; /* network timestamp function timer */
5924 uint32_t gp1; /* GP1 timer register */
5925 uint32_t gp2; /* GP2 timer register */
5926 uint32_t gp3; /* GP3 timer register */
5927 uint32_t ucode_ver; /* uCode version */
5928 uint32_t hw_ver; /* HW Silicon version */
5929 uint32_t brd_ver; /* HW board version */
5930 uint32_t log_pc; /* log program counter */
5931 uint32_t frame_ptr; /* frame pointer */
5932 uint32_t stack_ptr; /* stack pointer */
5933 uint32_t hcmd; /* last host command header */
5934 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5935 * rxtx_flag */
5936 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5937 * host_flag */
5938 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5939 * enc_flag */
5940 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5941 * time_flag */
5942 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5943 * wico interrupt */
5944 uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
5945 uint32_t wait_event; /* wait event() caller address */
5946 uint32_t l2p_control; /* L2pControlField */
5947 uint32_t l2p_duration; /* L2pDurationField */
5948 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5949 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5950 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5951 * (LMPM_PMG_SEL) */
5952 uint32_t u_timestamp; /* indicate when the date and time of the
5953 * compilation */
5954 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5955 } __packed;
5956
5957 #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5958 #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5959
5960 #ifdef IWM_DEBUG
5961 static const struct {
5962 const char *name;
5963 uint8_t num;
5964 } advanced_lookup[] = {
5965 { "NMI_INTERRUPT_WDG", 0x34 },
5966 { "SYSASSERT", 0x35 },
5967 { "UCODE_VERSION_MISMATCH", 0x37 },
5968 { "BAD_COMMAND", 0x38 },
5969 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5970 { "FATAL_ERROR", 0x3D },
5971 { "NMI_TRM_HW_ERR", 0x46 },
5972 { "NMI_INTERRUPT_TRM", 0x4C },
5973 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5974 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5975 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5976 { "NMI_INTERRUPT_HOST", 0x66 },
5977 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5978 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5979 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5980 { "ADVANCED_SYSASSERT", 0 },
5981 };
5982
5983 static const char *
5984 iwm_desc_lookup(uint32_t num)
5985 {
5986 int i;
5987
5988 for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
5989 if (advanced_lookup[i].num == num)
5990 return advanced_lookup[i].name;
5991
5992 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5993 return advanced_lookup[i].name;
5994 }
5995
5996 /*
5997 * Support for dumping the error log seemed like a good idea ...
5998 * but it's mostly hex junk and the only sensible thing is the
5999 * hw/ucode revision (which we know anyway). Since it's here,
6000 * I'll just leave it in, just in case e.g. the Intel guys want to
6001 * help us decipher some "ADVANCED_SYSASSERT" later.
6002 */
6003 static void
6004 iwm_nic_error(struct iwm_softc *sc)
6005 {
6006 struct iwm_error_event_table table;
6007 uint32_t base;
6008
6009 aprint_error_dev(sc->sc_dev, "dumping device error log\n");
6010 base = sc->sc_uc.uc_error_event_table;
6011 if (base < 0x800000 || base >= 0x80C000) {
6012 aprint_error_dev(sc->sc_dev,
6013 "Not valid error log pointer 0x%08x\n", base);
6014 return;
6015 }
6016
6017 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
6018 aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
6019 return;
6020 }
6021
6022 if (!table.valid) {
6023 aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
6024 return;
6025 }
6026
6027 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
6028 aprint_error_dev(sc->sc_dev, "Start IWL Error Log Dump:\n");
6029 aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
6030 sc->sc_flags, table.valid);
6031 }
6032
6033 aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", table.error_id,
6034 iwm_desc_lookup(table.error_id));
6035 aprint_error_dev(sc->sc_dev, "%08X | uPc\n", table.pc);
6036 aprint_error_dev(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
6037 aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
6038 aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
6039 aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
6040 aprint_error_dev(sc->sc_dev, "%08X | data1\n", table.data1);
6041 aprint_error_dev(sc->sc_dev, "%08X | data2\n", table.data2);
6042 aprint_error_dev(sc->sc_dev, "%08X | data3\n", table.data3);
6043 aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
6044 aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
6045 aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
6046 aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", table.gp1);
6047 aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", table.gp2);
6048 aprint_error_dev(sc->sc_dev, "%08X | time gp3\n", table.gp3);
6049 aprint_error_dev(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
6050 aprint_error_dev(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
6051 aprint_error_dev(sc->sc_dev, "%08X | board version\n", table.brd_ver);
6052 aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
6053 aprint_error_dev(sc->sc_dev, "%08X | isr0\n", table.isr0);
6054 aprint_error_dev(sc->sc_dev, "%08X | isr1\n", table.isr1);
6055 aprint_error_dev(sc->sc_dev, "%08X | isr2\n", table.isr2);
6056 aprint_error_dev(sc->sc_dev, "%08X | isr3\n", table.isr3);
6057 aprint_error_dev(sc->sc_dev, "%08X | isr4\n", table.isr4);
6058 aprint_error_dev(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
6059 aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
6060 aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
6061 aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n",
6062 table.l2p_duration);
6063 aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
6064 aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
6065 table.l2p_addr_match);
6066 aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n",
6067 table.lmpm_pmg_sel);
6068 aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
6069 aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n",
6070 table.flow_handler);
6071 }
6072 #endif
6073
6074 #define SYNC_RESP_STRUCT(_var_, _pkt_) \
6075 do { \
6076 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6077 sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
6078 _var_ = (void *)((_pkt_)+1); \
6079 } while (/*CONSTCOND*/0)
6080
6081 #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
6082 do { \
6083 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
6084 sizeof(len), BUS_DMASYNC_POSTREAD); \
6085 _ptr_ = (void *)((_pkt_)+1); \
6086 } while (/*CONSTCOND*/0)
6087
6088 #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
6089
6090 /*
6091 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
6092 * Basic structure from if_iwn
6093 */
6094 static void
6095 iwm_notif_intr(struct iwm_softc *sc)
6096 {
6097 uint16_t hw;
6098
6099 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
6100 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
6101
6102 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
6103 while (sc->rxq.cur != hw) {
6104 struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
6105 struct iwm_rx_packet *pkt;
6106 struct iwm_cmd_response *cresp;
6107 int qid, idx;
6108
6109 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
6110 BUS_DMASYNC_POSTREAD);
6111 pkt = mtod(data->m, struct iwm_rx_packet *);
6112
6113 qid = pkt->hdr.qid & ~0x80;
6114 idx = pkt->hdr.idx;
6115
6116 DPRINTFN(12, ("rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
6117 pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
6118 pkt->hdr.code, sc->rxq.cur, hw));
6119
6120 /*
6121 * randomly get these from the firmware, no idea why.
6122 * they at least seem harmless, so just ignore them for now
6123 */
6124 if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
6125 || pkt->len_n_flags == htole32(0x55550000))) {
6126 ADVANCE_RXQ(sc);
6127 continue;
6128 }
6129
6130 switch (pkt->hdr.code) {
6131 case IWM_REPLY_RX_PHY_CMD:
6132 iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
6133 break;
6134
6135 case IWM_REPLY_RX_MPDU_CMD:
6136 iwm_mvm_rx_rx_mpdu(sc, pkt, data);
6137 break;
6138
6139 case IWM_TX_CMD:
6140 iwm_mvm_rx_tx_cmd(sc, pkt, data);
6141 break;
6142
6143 case IWM_MISSED_BEACONS_NOTIFICATION:
6144 iwm_mvm_rx_missed_beacons_notif(sc, pkt, data);
6145 break;
6146
6147 case IWM_MVM_ALIVE: {
6148 struct iwm_mvm_alive_resp *resp;
6149 SYNC_RESP_STRUCT(resp, pkt);
6150
6151 sc->sc_uc.uc_error_event_table
6152 = le32toh(resp->error_event_table_ptr);
6153 sc->sc_uc.uc_log_event_table
6154 = le32toh(resp->log_event_table_ptr);
6155 sc->sched_base = le32toh(resp->scd_base_ptr);
6156 sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
6157
6158 sc->sc_uc.uc_intr = 1;
6159 wakeup(&sc->sc_uc);
6160 break; }
6161
6162 case IWM_CALIB_RES_NOTIF_PHY_DB: {
6163 struct iwm_calib_res_notif_phy_db *phy_db_notif;
6164 SYNC_RESP_STRUCT(phy_db_notif, pkt);
6165
6166 iwm_phy_db_set_section(sc, phy_db_notif);
6167
6168 break; }
6169
6170 case IWM_STATISTICS_NOTIFICATION: {
6171 struct iwm_notif_statistics *stats;
6172 SYNC_RESP_STRUCT(stats, pkt);
6173 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
6174 sc->sc_noise = iwm_get_noise(&stats->rx.general);
6175 break; }
6176
6177 case IWM_NVM_ACCESS_CMD:
6178 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6179 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
6180 sizeof(sc->sc_cmd_resp),
6181 BUS_DMASYNC_POSTREAD);
6182 memcpy(sc->sc_cmd_resp,
6183 pkt, sizeof(sc->sc_cmd_resp));
6184 }
6185 break;
6186
6187 case IWM_PHY_CONFIGURATION_CMD:
6188 case IWM_TX_ANT_CONFIGURATION_CMD:
6189 case IWM_ADD_STA:
6190 case IWM_MAC_CONTEXT_CMD:
6191 case IWM_REPLY_SF_CFG_CMD:
6192 case IWM_POWER_TABLE_CMD:
6193 case IWM_PHY_CONTEXT_CMD:
6194 case IWM_BINDING_CONTEXT_CMD:
6195 case IWM_TIME_EVENT_CMD:
6196 case IWM_SCAN_REQUEST_CMD:
6197 case IWM_REPLY_BEACON_FILTERING_CMD:
6198 case IWM_MAC_PM_POWER_TABLE:
6199 case IWM_TIME_QUOTA_CMD:
6200 case IWM_REMOVE_STA:
6201 case IWM_TXPATH_FLUSH:
6202 case IWM_LQ_CMD:
6203 SYNC_RESP_STRUCT(cresp, pkt);
6204 if (sc->sc_wantresp == ((qid << 16) | idx)) {
6205 memcpy(sc->sc_cmd_resp,
6206 pkt, sizeof(*pkt)+sizeof(*cresp));
6207 }
6208 break;
6209
6210 /* ignore */
6211 case 0x6c: /* IWM_PHY_DB_CMD, no idea why it's not in fw-api.h */
6212 break;
6213
6214 case IWM_INIT_COMPLETE_NOTIF:
6215 sc->sc_init_complete = 1;
6216 wakeup(&sc->sc_init_complete);
6217 break;
6218
6219 case IWM_SCAN_COMPLETE_NOTIFICATION: {
6220 struct iwm_scan_complete_notif *notif;
6221 SYNC_RESP_STRUCT(notif, pkt);
6222
6223 workqueue_enqueue(sc->sc_eswq, &sc->sc_eswk, NULL);
6224 break; }
6225
6226 case IWM_REPLY_ERROR: {
6227 struct iwm_error_resp *resp;
6228 SYNC_RESP_STRUCT(resp, pkt);
6229
6230 aprint_error_dev(sc->sc_dev,
6231 "firmware error 0x%x, cmd 0x%x\n",
6232 le32toh(resp->error_type), resp->cmd_id);
6233 break; }
6234
6235 case IWM_TIME_EVENT_NOTIFICATION: {
6236 struct iwm_time_event_notif *notif;
6237 SYNC_RESP_STRUCT(notif, pkt);
6238
6239 if (notif->status) {
6240 if (le32toh(notif->action) &
6241 IWM_TE_V2_NOTIF_HOST_EVENT_START)
6242 sc->sc_auth_prot = 2;
6243 else
6244 sc->sc_auth_prot = 0;
6245 } else {
6246 sc->sc_auth_prot = -1;
6247 }
6248 wakeup(&sc->sc_auth_prot);
6249 break; }
6250
6251 default:
6252 aprint_error_dev(sc->sc_dev,
6253 "frame %d/%d %x UNHANDLED (this should "
6254 "not happen)\n", qid, idx, pkt->len_n_flags);
6255 break;
6256 }
6257
6258 /*
6259 * Why test bit 0x80? The Linux driver:
6260 *
6261 * There is one exception: uCode sets bit 15 when it
6262 * originates the response/notification, i.e. when the
6263 * response/notification is not a direct response to a
6264 * command sent by the driver. For example, uCode issues
6265 * IWM_REPLY_RX when it sends a received frame to the driver;
6266 * it is not a direct response to any driver command.
6267 *
6268 * Ok, so since when is 7 == 15? Well, the Linux driver
6269 * uses a slightly different format for pkt->hdr, and "qid"
6270 * is actually the upper byte of a two-byte field.
6271 */
6272 if (!(pkt->hdr.qid & (1 << 7))) {
6273 iwm_cmd_done(sc, pkt);
6274 }
6275
6276 ADVANCE_RXQ(sc);
6277 }
6278
6279 IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
6280 IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
6281
6282 /*
6283 * Tell the firmware what we have processed.
6284 * Seems like the hardware gets upset unless we align
6285 * the write by 8??
6286 */
6287 hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
6288 IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
6289 }
6290
6291 static int
6292 iwm_intr(void *arg)
6293 {
6294 struct iwm_softc *sc = arg;
6295 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6296 int handled = 0;
6297 int r1, r2, rv = 0;
6298 int isperiodic = 0;
6299
6300 IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
6301
6302 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
6303 uint32_t *ict = sc->ict_dma.vaddr;
6304 int tmp;
6305
6306 tmp = htole32(ict[sc->ict_cur]);
6307 if (!tmp)
6308 goto out_ena;
6309
6310 /*
6311 * ok, there was something. keep plowing until we have all.
6312 */
6313 r1 = r2 = 0;
6314 while (tmp) {
6315 r1 |= tmp;
6316 ict[sc->ict_cur] = 0;
6317 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
6318 tmp = htole32(ict[sc->ict_cur]);
6319 }
6320
6321 /* this is where the fun begins. don't ask */
6322 if (r1 == 0xffffffff)
6323 r1 = 0;
6324
6325 /* i am not expected to understand this */
6326 if (r1 & 0xc0000)
6327 r1 |= 0x8000;
6328 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
6329 } else {
6330 r1 = IWM_READ(sc, IWM_CSR_INT);
6331 /* "hardware gone" (where, fishing?) */
6332 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
6333 goto out;
6334 r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
6335 }
6336 if (r1 == 0 && r2 == 0) {
6337 goto out_ena;
6338 }
6339
6340 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
6341
6342 /* ignored */
6343 handled |= (r1 & (IWM_CSR_INT_BIT_ALIVE /*| IWM_CSR_INT_BIT_SCD*/));
6344
6345 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
6346 #ifdef IWM_DEBUG
6347 int i;
6348
6349 iwm_nic_error(sc);
6350
6351 /* Dump driver status (TX and RX rings) while we're here. */
6352 DPRINTF(("driver status:\n"));
6353 for (i = 0; i < IWM_MVM_MAX_QUEUES; i++) {
6354 struct iwm_tx_ring *ring = &sc->txq[i];
6355 DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
6356 "queued=%-3d\n",
6357 i, ring->qid, ring->cur, ring->queued));
6358 }
6359 DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
6360 DPRINTF((" 802.11 state %d\n", sc->sc_ic.ic_state));
6361 #endif
6362
6363 aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
6364 ifp->if_flags &= ~IFF_UP;
6365 iwm_stop(ifp, 1);
6366 rv = 1;
6367 goto out;
6368
6369 }
6370
6371 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
6372 handled |= IWM_CSR_INT_BIT_HW_ERR;
6373 aprint_error_dev(sc->sc_dev,
6374 "hardware error, stopping device\n");
6375 ifp->if_flags &= ~IFF_UP;
6376 iwm_stop(ifp, 1);
6377 rv = 1;
6378 goto out;
6379 }
6380
6381 /* firmware chunk loaded */
6382 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
6383 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
6384 handled |= IWM_CSR_INT_BIT_FH_TX;
6385
6386 sc->sc_fw_chunk_done = 1;
6387 wakeup(&sc->sc_fw);
6388 }
6389
6390 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
6391 handled |= IWM_CSR_INT_BIT_RF_KILL;
6392 if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP)) {
6393 DPRINTF(("%s: rfkill switch, disabling interface\n",
6394 DEVNAME(sc)));
6395 ifp->if_flags &= ~IFF_UP;
6396 iwm_stop(ifp, 1);
6397 }
6398 }
6399
6400 /*
6401 * The Linux driver uses periodic interrupts to avoid races.
6402 * We cargo-cult like it's going out of fashion.
6403 */
6404 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
6405 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
6406 IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
6407 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
6408 IWM_WRITE_1(sc,
6409 IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
6410 isperiodic = 1;
6411 }
6412
6413 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
6414 handled |= (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX);
6415 IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
6416
6417 iwm_notif_intr(sc);
6418
6419 /* enable periodic interrupt, see above */
6420 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
6421 IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
6422 IWM_CSR_INT_PERIODIC_ENA);
6423 }
6424
6425 if (__predict_false(r1 & ~handled))
6426 DPRINTF(("%s: unhandled interrupts: %x\n", DEVNAME(sc), r1));
6427 rv = 1;
6428
6429 out_ena:
6430 iwm_restore_interrupts(sc);
6431 out:
6432 return rv;
6433 }
6434
6435 /*
6436 * Autoconf glue-sniffing
6437 */
6438
6439 static const pci_product_id_t iwm_devices[] = {
6440 0x08b1,
6441 #if 0
6442 PCI_PRODUCT_INTEL_WL_7260_1,
6443 PCI_PRODUCT_INTEL_WL_7260_2,
6444 #endif
6445 };
6446
6447 static int
6448 iwm_match(device_t parent, cfdata_t match __unused, void *aux)
6449 {
6450 struct pci_attach_args *pa = aux;
6451 size_t i;
6452
6453 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
6454 return 0;
6455
6456 for (i = 0; i < __arraycount(iwm_devices); i++)
6457 if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
6458 return 1;
6459
6460 return 0;
6461 }
6462
6463 static int
6464 iwm_preinit(struct iwm_softc *sc)
6465 {
6466 int error;
6467
6468 if ((error = iwm_prepare_card_hw(sc)) != 0) {
6469 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6470 return error;
6471 }
6472
6473 if (sc->sc_flags & IWM_FLAG_ATTACHED)
6474 return 0;
6475
6476 if ((error = iwm_start_hw(sc)) != 0) {
6477 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6478 return error;
6479 }
6480
6481 error = iwm_run_init_mvm_ucode(sc, 1);
6482 iwm_stop_device(sc);
6483 return error;
6484 }
6485
6486 static void
6487 iwm_attach_hook(device_t dev)
6488 {
6489 struct iwm_softc *sc = device_private(dev);
6490 struct ieee80211com *ic = &sc->sc_ic;
6491 struct ifnet *ifp = &sc->sc_ec.ec_if;
6492
6493 KASSERT(!cold);
6494
6495 sc->sc_wantresp = -1;
6496
6497 if (iwm_preinit(sc) != 0)
6498 return;
6499
6500 sc->sc_flags |= IWM_FLAG_ATTACHED;
6501
6502 ic->ic_ifp = ifp;
6503 aprint_normal_dev(sc->sc_dev,
6504 "hw rev: 0x%x, fw ver %d.%d (API ver %d), address %s\n",
6505 sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
6506 IWM_UCODE_MAJOR(sc->sc_fwver),
6507 IWM_UCODE_MINOR(sc->sc_fwver),
6508 IWM_UCODE_API(sc->sc_fwver),
6509 ether_sprintf(sc->sc_nvm.hw_addr));
6510
6511 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6512 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6513 ic->ic_state = IEEE80211_S_INIT;
6514
6515 /* Set device capabilities. */
6516 ic->ic_caps =
6517 IEEE80211_C_WEP | /* WEP */
6518 IEEE80211_C_WPA | /* 802.11i */
6519 IEEE80211_C_SHSLOT | /* short slot time supported */
6520 IEEE80211_C_SHPREAMBLE; /* short preamble supported */
6521
6522 #ifndef IWM_NO_5GHZ
6523 ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
6524 #endif
6525 ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
6526 ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
6527
6528 for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
6529 sc->sc_phyctxt[i].id = i;
6530 }
6531
6532 sc->sc_amrr.amrr_min_success_threshold = 1;
6533 sc->sc_amrr.amrr_max_success_threshold = 15;
6534
6535 /* IBSS channel undefined for now. */
6536 ic->ic_ibss_chan = &ic->ic_channels[1];
6537
6538 #if 0
6539 /* Max RSSI */
6540 ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
6541 #endif
6542
6543 ifp->if_softc = sc;
6544 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
6545 ifp->if_init = iwm_init;
6546 ifp->if_stop = iwm_stop;
6547 ifp->if_ioctl = iwm_ioctl;
6548 ifp->if_start = iwm_start;
6549 ifp->if_watchdog = iwm_watchdog;
6550 IFQ_SET_READY(&ifp->if_snd);
6551 memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
6552
6553 if_attach(ifp);
6554 ic->ic_debug = 0;
6555 ieee80211_ifattach(ic);
6556
6557 ic->ic_node_alloc = iwm_node_alloc;
6558
6559 /* Override 802.11 state transition machine. */
6560 sc->sc_newstate = ic->ic_newstate;
6561 ic->ic_newstate = iwm_newstate;
6562 ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
6563 ieee80211_announce(ic);
6564
6565 iwm_radiotap_attach(sc);
6566 callout_init(&sc->sc_calib_to, 0);
6567 callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
6568 iwm_radiotap_attach(sc);
6569
6570 //task_set(&sc->init_task, iwm_init_task, sc);
6571 }
6572
6573 static void
6574 iwm_attach(device_t parent, device_t self, void *aux)
6575 {
6576 struct iwm_softc *sc = device_private(self);
6577 struct pci_attach_args *pa = aux;
6578 pci_intr_handle_t ih;
6579 pcireg_t reg, memtype;
6580 const char *intrstr;
6581 int error;
6582 int txq_i;
6583
6584 sc->sc_dev = self;
6585 sc->sc_pct = pa->pa_pc;
6586 sc->sc_pcitag = pa->pa_tag;
6587 sc->sc_dmat = pa->pa_dmat;
6588
6589 pci_aprint_devinfo(pa, NULL);
6590
6591 /*
6592 * Get the offset of the PCI Express Capability Structure in PCI
6593 * Configuration Space.
6594 */
6595 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
6596 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
6597 if (error == 0) {
6598 aprint_error_dev(self,
6599 "PCIe capability structure not found!\n");
6600 return;
6601 }
6602
6603 /* Clear device-specific "PCI retry timeout" register (41h). */
6604 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6605 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6606
6607 /* Enable bus-mastering and hardware bug workaround. */
6608 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
6609 reg |= PCI_COMMAND_MASTER_ENABLE;
6610 /* if !MSI */
6611 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
6612 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
6613 }
6614 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
6615
6616 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
6617 error = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
6618 &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
6619 if (error != 0) {
6620 aprint_error_dev(self, "can't map mem space\n");
6621 return;
6622 }
6623
6624 /* Install interrupt handler. */
6625 if (pci_intr_map(pa, &ih)) {
6626 aprint_error_dev(self, "can't map interrupt\n");
6627 return;
6628 }
6629
6630 char intrbuf[PCI_INTRSTR_LEN];
6631 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
6632 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
6633 if (sc->sc_ih == NULL) {
6634 aprint_error_dev(self, "can't establish interrupt");
6635 if (intrstr != NULL)
6636 aprint_error(" at %s", intrstr);
6637 aprint_error("\n");
6638 return;
6639 }
6640 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
6641
6642 /* only one firmware possibility for now */
6643 sc->sc_fwname = IWM_FWNAME;
6644 sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
6645
6646 /*
6647 * We now start fiddling with the hardware
6648 */
6649
6650 sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
6651 if (iwm_prepare_card_hw(sc) != 0) {
6652 aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6653 return;
6654 }
6655
6656 /* Allocate DMA memory for firmware transfers. */
6657 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6658 aprint_error_dev(sc->sc_dev,
6659 "could not allocate memory for firmware\n");
6660 return;
6661 }
6662
6663 /* Allocate "Keep Warm" page. */
6664 if ((error = iwm_alloc_kw(sc)) != 0) {
6665 aprint_error_dev(sc->sc_dev,
6666 "could not allocate keep warm page\n");
6667 goto fail1;
6668 }
6669
6670 /* We use ICT interrupts */
6671 if ((error = iwm_alloc_ict(sc)) != 0) {
6672 aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
6673 goto fail2;
6674 }
6675
6676 /* Allocate TX scheduler "rings". */
6677 if ((error = iwm_alloc_sched(sc)) != 0) {
6678 aprint_error_dev(sc->sc_dev,
6679 "could not allocate TX scheduler rings\n");
6680 goto fail3;
6681 }
6682
6683 /* Allocate TX rings */
6684 for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
6685 if ((error = iwm_alloc_tx_ring(sc,
6686 &sc->txq[txq_i], txq_i)) != 0) {
6687 aprint_error_dev(sc->sc_dev,
6688 "could not allocate TX ring %d\n", txq_i);
6689 goto fail4;
6690 }
6691 }
6692
6693 /* Allocate RX ring. */
6694 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6695 aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
6696 goto fail4;
6697 }
6698
6699 workqueue_create(&sc->sc_eswq, "iwmes",
6700 (void *)iwm_endscan_cb, sc, PRI_NONE, IPL_NET, 0);
6701 workqueue_create(&sc->sc_nswq, "iwmns",
6702 (void *)iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0);
6703
6704 /* Clear pending interrupts. */
6705 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6706
6707 /*
6708 * We can't do normal attach before the file system is mounted
6709 * because we cannot read the MAC address without loading the
6710 * firmware from disk. So we postpone until mountroot is done.
6711 * Notably, this will require a full driver unload/load cycle
6712 * (or reboot) in case the firmware is not present when the
6713 * hook runs.
6714 */
6715 config_mountroot(self, iwm_attach_hook);
6716
6717 return;
6718
6719 /* Free allocated memory if something failed during attachment. */
6720 fail4: while (--txq_i >= 0)
6721 iwm_free_tx_ring(sc, &sc->txq[txq_i]);
6722 iwm_free_sched(sc);
6723 fail3: if (sc->ict_dma.vaddr != NULL)
6724 iwm_free_ict(sc);
6725 fail2: iwm_free_kw(sc);
6726 fail1: iwm_free_fwmem(sc);
6727 }
6728
6729 /*
6730 * Attach the interface to 802.11 radiotap.
6731 */
6732 void
6733 iwm_radiotap_attach(struct iwm_softc *sc)
6734 {
6735 struct ifnet *ifp = sc->sc_ic.ic_ifp;
6736
6737 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
6738 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
6739 &sc->sc_drvbpf);
6740
6741 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
6742 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
6743 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
6744
6745 sc->sc_txtap_len = sizeof sc->sc_txtapu;
6746 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
6747 sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
6748 }
6749
6750 #if 0
6751 static void
6752 iwm_init_task(void *arg1)
6753 {
6754 struct iwm_softc *sc = arg1;
6755 struct ifnet *ifp = &sc->sc_ic.ic_if;
6756 int s;
6757
6758 s = splnet();
6759 while (sc->sc_flags & IWM_FLAG_BUSY)
6760 tsleep(&sc->sc_flags, 0, "iwmpwr", 0);
6761 sc->sc_flags |= IWM_FLAG_BUSY;
6762
6763 iwm_stop(ifp, 0);
6764 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
6765 iwm_init(ifp);
6766
6767 sc->sc_flags &= ~IWM_FLAG_BUSY;
6768 wakeup(&sc->sc_flags);
6769 splx(s);
6770 }
6771
6772 static void
6773 iwm_wakeup(struct iwm_softc *sc)
6774 {
6775 pcireg_t reg;
6776
6777 /* Clear device-specific "PCI retry timeout" register (41h). */
6778 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
6779 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
6780
6781 iwm_init_task(sc);
6782 }
6783
6784 static int
6785 iwm_activate(device_t self, enum devact act)
6786 {
6787 struct iwm_softc *sc = device_private(self);
6788 struct ifnet *ifp = IC2IFP(&sc->sc_ic);
6789
6790 switch (act) {
6791 case DVACT_DEACTIVATE:
6792 if (ifp->if_flags & IFF_RUNNING)
6793 iwm_stop(ifp, 0);
6794 return 0;
6795 default:
6796 return EOPNOTSUPP;
6797 }
6798 }
6799 #endif
6800
6801 CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
6802 NULL, NULL);
6803