if_iwm.c revision 1.75.2.2 1 1.75.2.2 snj /* $NetBSD: if_iwm.c,v 1.75.2.2 2017/07/25 19:43:03 snj Exp $ */
2 1.75.2.2 snj /* OpenBSD: if_iwm.c,v 1.148 2016/11/19 21:07:08 stsp Exp */
3 1.75.2.2 snj #define IEEE80211_NO_HT
4 1.75.2.2 snj /*
5 1.75.2.2 snj * Copyright (c) 2014, 2016 genua gmbh <info (at) genua.de>
6 1.75.2.2 snj * Author: Stefan Sperling <stsp (at) openbsd.org>
7 1.75.2.2 snj * Copyright (c) 2014 Fixup Software Ltd.
8 1.75.2.2 snj *
9 1.75.2.2 snj * Permission to use, copy, modify, and distribute this software for any
10 1.75.2.2 snj * purpose with or without fee is hereby granted, provided that the above
11 1.75.2.2 snj * copyright notice and this permission notice appear in all copies.
12 1.75.2.2 snj *
13 1.75.2.2 snj * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14 1.75.2.2 snj * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15 1.75.2.2 snj * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16 1.75.2.2 snj * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17 1.75.2.2 snj * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 1.75.2.2 snj * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 1.75.2.2 snj * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 1.75.2.2 snj */
21 1.75.2.2 snj
22 1.75.2.2 snj /*-
23 1.75.2.2 snj * Based on BSD-licensed source modules in the Linux iwlwifi driver,
24 1.75.2.2 snj * which were used as the reference documentation for this implementation.
25 1.75.2.2 snj *
26 1.75.2.2 snj ***********************************************************************
27 1.75.2.2 snj *
28 1.75.2.2 snj * This file is provided under a dual BSD/GPLv2 license. When using or
29 1.75.2.2 snj * redistributing this file, you may do so under either license.
30 1.75.2.2 snj *
31 1.75.2.2 snj * GPL LICENSE SUMMARY
32 1.75.2.2 snj *
33 1.75.2.2 snj * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
34 1.75.2.2 snj * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 1.75.2.2 snj * Copyright(c) 2016 Intel Deutschland GmbH
36 1.75.2.2 snj *
37 1.75.2.2 snj * This program is free software; you can redistribute it and/or modify
38 1.75.2.2 snj * it under the terms of version 2 of the GNU General Public License as
39 1.75.2.2 snj * published by the Free Software Foundation.
40 1.75.2.2 snj *
41 1.75.2.2 snj * This program is distributed in the hope that it will be useful, but
42 1.75.2.2 snj * WITHOUT ANY WARRANTY; without even the implied warranty of
43 1.75.2.2 snj * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
44 1.75.2.2 snj * General Public License for more details.
45 1.75.2.2 snj *
46 1.75.2.2 snj * You should have received a copy of the GNU General Public License
47 1.75.2.2 snj * along with this program; if not, write to the Free Software
48 1.75.2.2 snj * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
49 1.75.2.2 snj * USA
50 1.75.2.2 snj *
51 1.75.2.2 snj * The full GNU General Public License is included in this distribution
52 1.75.2.2 snj * in the file called COPYING.
53 1.75.2.2 snj *
54 1.75.2.2 snj * Contact Information:
55 1.75.2.2 snj * Intel Linux Wireless <linuxwifi (at) intel.com>
56 1.75.2.2 snj * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
57 1.75.2.2 snj *
58 1.75.2.2 snj * BSD LICENSE
59 1.75.2.2 snj *
60 1.75.2.2 snj * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
61 1.75.2.2 snj * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
62 1.75.2.2 snj * Copyright(c) 2016 Intel Deutschland GmbH
63 1.75.2.2 snj * All rights reserved.
64 1.75.2.2 snj *
65 1.75.2.2 snj * Redistribution and use in source and binary forms, with or without
66 1.75.2.2 snj * modification, are permitted provided that the following conditions
67 1.75.2.2 snj * are met:
68 1.75.2.2 snj *
69 1.75.2.2 snj * * Redistributions of source code must retain the above copyright
70 1.75.2.2 snj * notice, this list of conditions and the following disclaimer.
71 1.75.2.2 snj * * Redistributions in binary form must reproduce the above copyright
72 1.75.2.2 snj * notice, this list of conditions and the following disclaimer in
73 1.75.2.2 snj * the documentation and/or other materials provided with the
74 1.75.2.2 snj * distribution.
75 1.75.2.2 snj * * Neither the name Intel Corporation nor the names of its
76 1.75.2.2 snj * contributors may be used to endorse or promote products derived
77 1.75.2.2 snj * from this software without specific prior written permission.
78 1.75.2.2 snj *
79 1.75.2.2 snj * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
80 1.75.2.2 snj * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
81 1.75.2.2 snj * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
82 1.75.2.2 snj * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
83 1.75.2.2 snj * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
84 1.75.2.2 snj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
85 1.75.2.2 snj * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
86 1.75.2.2 snj * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
87 1.75.2.2 snj * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
88 1.75.2.2 snj * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
89 1.75.2.2 snj * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
90 1.75.2.2 snj */
91 1.75.2.2 snj
92 1.75.2.2 snj /*-
93 1.75.2.2 snj * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
94 1.75.2.2 snj *
95 1.75.2.2 snj * Permission to use, copy, modify, and distribute this software for any
96 1.75.2.2 snj * purpose with or without fee is hereby granted, provided that the above
97 1.75.2.2 snj * copyright notice and this permission notice appear in all copies.
98 1.75.2.2 snj *
99 1.75.2.2 snj * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
100 1.75.2.2 snj * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
101 1.75.2.2 snj * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
102 1.75.2.2 snj * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
103 1.75.2.2 snj * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
104 1.75.2.2 snj * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
105 1.75.2.2 snj * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
106 1.75.2.2 snj */
107 1.75.2.2 snj
108 1.75.2.2 snj #include <sys/cdefs.h>
109 1.75.2.2 snj __KERNEL_RCSID(0, "$NetBSD: if_iwm.c,v 1.75.2.2 2017/07/25 19:43:03 snj Exp $");
110 1.75.2.2 snj
111 1.75.2.2 snj #include <sys/param.h>
112 1.75.2.2 snj #include <sys/conf.h>
113 1.75.2.2 snj #include <sys/kernel.h>
114 1.75.2.2 snj #include <sys/kmem.h>
115 1.75.2.2 snj #include <sys/mbuf.h>
116 1.75.2.2 snj #include <sys/mutex.h>
117 1.75.2.2 snj #include <sys/proc.h>
118 1.75.2.2 snj #include <sys/socket.h>
119 1.75.2.2 snj #include <sys/sockio.h>
120 1.75.2.2 snj #include <sys/sysctl.h>
121 1.75.2.2 snj #include <sys/systm.h>
122 1.75.2.2 snj
123 1.75.2.2 snj #include <sys/cpu.h>
124 1.75.2.2 snj #include <sys/bus.h>
125 1.75.2.2 snj #include <sys/workqueue.h>
126 1.75.2.2 snj #include <machine/endian.h>
127 1.75.2.2 snj #include <sys/intr.h>
128 1.75.2.2 snj
129 1.75.2.2 snj #include <dev/pci/pcireg.h>
130 1.75.2.2 snj #include <dev/pci/pcivar.h>
131 1.75.2.2 snj #include <dev/pci/pcidevs.h>
132 1.75.2.2 snj #include <dev/firmload.h>
133 1.75.2.2 snj
134 1.75.2.2 snj #include <net/bpf.h>
135 1.75.2.2 snj #include <net/if.h>
136 1.75.2.2 snj #include <net/if_dl.h>
137 1.75.2.2 snj #include <net/if_media.h>
138 1.75.2.2 snj #include <net/if_ether.h>
139 1.75.2.2 snj
140 1.75.2.2 snj #include <netinet/in.h>
141 1.75.2.2 snj #include <netinet/ip.h>
142 1.75.2.2 snj
143 1.75.2.2 snj #include <net80211/ieee80211_var.h>
144 1.75.2.2 snj #include <net80211/ieee80211_amrr.h>
145 1.75.2.2 snj #include <net80211/ieee80211_radiotap.h>
146 1.75.2.2 snj
147 1.75.2.2 snj #define DEVNAME(_s) device_xname((_s)->sc_dev)
148 1.75.2.2 snj #define IC2IFP(_ic_) ((_ic_)->ic_ifp)
149 1.75.2.2 snj
150 1.75.2.2 snj #define le16_to_cpup(_a_) (le16toh(*(const uint16_t *)(_a_)))
151 1.75.2.2 snj #define le32_to_cpup(_a_) (le32toh(*(const uint32_t *)(_a_)))
152 1.75.2.2 snj
153 1.75.2.2 snj #ifdef IWM_DEBUG
154 1.75.2.2 snj #define DPRINTF(x) do { if (iwm_debug > 0) printf x; } while (0)
155 1.75.2.2 snj #define DPRINTFN(n, x) do { if (iwm_debug >= (n)) printf x; } while (0)
156 1.75.2.2 snj int iwm_debug = 0;
157 1.75.2.2 snj #else
158 1.75.2.2 snj #define DPRINTF(x) do { ; } while (0)
159 1.75.2.2 snj #define DPRINTFN(n, x) do { ; } while (0)
160 1.75.2.2 snj #endif
161 1.75.2.2 snj
162 1.75.2.2 snj #include <dev/pci/if_iwmreg.h>
163 1.75.2.2 snj #include <dev/pci/if_iwmvar.h>
164 1.75.2.2 snj
165 1.75.2.2 snj static const uint8_t iwm_nvm_channels[] = {
166 1.75.2.2 snj /* 2.4 GHz */
167 1.75.2.2 snj 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
168 1.75.2.2 snj /* 5 GHz */
169 1.75.2.2 snj 36, 40, 44, 48, 52, 56, 60, 64,
170 1.75.2.2 snj 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
171 1.75.2.2 snj 149, 153, 157, 161, 165
172 1.75.2.2 snj };
173 1.75.2.2 snj
174 1.75.2.2 snj static const uint8_t iwm_nvm_channels_8000[] = {
175 1.75.2.2 snj /* 2.4 GHz */
176 1.75.2.2 snj 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
177 1.75.2.2 snj /* 5 GHz */
178 1.75.2.2 snj 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
179 1.75.2.2 snj 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
180 1.75.2.2 snj 149, 153, 157, 161, 165, 169, 173, 177, 181
181 1.75.2.2 snj };
182 1.75.2.2 snj
183 1.75.2.2 snj #define IWM_NUM_2GHZ_CHANNELS 14
184 1.75.2.2 snj
185 1.75.2.2 snj static const struct iwm_rate {
186 1.75.2.2 snj uint8_t rate;
187 1.75.2.2 snj uint8_t plcp;
188 1.75.2.2 snj uint8_t ht_plcp;
189 1.75.2.2 snj } iwm_rates[] = {
190 1.75.2.2 snj /* Legacy */ /* HT */
191 1.75.2.2 snj { 2, IWM_RATE_1M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
192 1.75.2.2 snj { 4, IWM_RATE_2M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
193 1.75.2.2 snj { 11, IWM_RATE_5M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
194 1.75.2.2 snj { 22, IWM_RATE_11M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
195 1.75.2.2 snj { 12, IWM_RATE_6M_PLCP, IWM_RATE_HT_SISO_MCS_0_PLCP },
196 1.75.2.2 snj { 18, IWM_RATE_9M_PLCP, IWM_RATE_HT_SISO_MCS_INV_PLCP },
197 1.75.2.2 snj { 24, IWM_RATE_12M_PLCP, IWM_RATE_HT_SISO_MCS_1_PLCP },
198 1.75.2.2 snj { 36, IWM_RATE_18M_PLCP, IWM_RATE_HT_SISO_MCS_2_PLCP },
199 1.75.2.2 snj { 48, IWM_RATE_24M_PLCP, IWM_RATE_HT_SISO_MCS_3_PLCP },
200 1.75.2.2 snj { 72, IWM_RATE_36M_PLCP, IWM_RATE_HT_SISO_MCS_4_PLCP },
201 1.75.2.2 snj { 96, IWM_RATE_48M_PLCP, IWM_RATE_HT_SISO_MCS_5_PLCP },
202 1.75.2.2 snj { 108, IWM_RATE_54M_PLCP, IWM_RATE_HT_SISO_MCS_6_PLCP },
203 1.75.2.2 snj { 128, IWM_RATE_INVM_PLCP, IWM_RATE_HT_SISO_MCS_7_PLCP },
204 1.75.2.2 snj };
205 1.75.2.2 snj #define IWM_RIDX_CCK 0
206 1.75.2.2 snj #define IWM_RIDX_OFDM 4
207 1.75.2.2 snj #define IWM_RIDX_MAX (__arraycount(iwm_rates)-1)
208 1.75.2.2 snj #define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
209 1.75.2.2 snj #define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
210 1.75.2.2 snj
211 1.75.2.2 snj #ifndef IEEE80211_NO_HT
212 1.75.2.2 snj /* Convert an MCS index into an iwm_rates[] index. */
213 1.75.2.2 snj static const int iwm_mcs2ridx[] = {
214 1.75.2.2 snj IWM_RATE_MCS_0_INDEX,
215 1.75.2.2 snj IWM_RATE_MCS_1_INDEX,
216 1.75.2.2 snj IWM_RATE_MCS_2_INDEX,
217 1.75.2.2 snj IWM_RATE_MCS_3_INDEX,
218 1.75.2.2 snj IWM_RATE_MCS_4_INDEX,
219 1.75.2.2 snj IWM_RATE_MCS_5_INDEX,
220 1.75.2.2 snj IWM_RATE_MCS_6_INDEX,
221 1.75.2.2 snj IWM_RATE_MCS_7_INDEX,
222 1.75.2.2 snj };
223 1.75.2.2 snj #endif
224 1.75.2.2 snj
225 1.75.2.2 snj struct iwm_nvm_section {
226 1.75.2.2 snj uint16_t length;
227 1.75.2.2 snj uint8_t *data;
228 1.75.2.2 snj };
229 1.75.2.2 snj
230 1.75.2.2 snj struct iwm_newstate_state {
231 1.75.2.2 snj struct work ns_wk;
232 1.75.2.2 snj enum ieee80211_state ns_nstate;
233 1.75.2.2 snj int ns_arg;
234 1.75.2.2 snj int ns_generation;
235 1.75.2.2 snj };
236 1.75.2.2 snj
237 1.75.2.2 snj static int iwm_store_cscheme(struct iwm_softc *, uint8_t *, size_t);
238 1.75.2.2 snj static int iwm_firmware_store_section(struct iwm_softc *,
239 1.75.2.2 snj enum iwm_ucode_type, uint8_t *, size_t);
240 1.75.2.2 snj static int iwm_set_default_calib(struct iwm_softc *, const void *);
241 1.75.2.2 snj static int iwm_read_firmware(struct iwm_softc *, enum iwm_ucode_type);
242 1.75.2.2 snj static uint32_t iwm_read_prph(struct iwm_softc *, uint32_t);
243 1.75.2.2 snj static void iwm_write_prph(struct iwm_softc *, uint32_t, uint32_t);
244 1.75.2.2 snj #ifdef IWM_DEBUG
245 1.75.2.2 snj static int iwm_read_mem(struct iwm_softc *, uint32_t, void *, int);
246 1.75.2.2 snj #endif
247 1.75.2.2 snj static int iwm_write_mem(struct iwm_softc *, uint32_t, const void *, int);
248 1.75.2.2 snj static int iwm_write_mem32(struct iwm_softc *, uint32_t, uint32_t);
249 1.75.2.2 snj static int iwm_poll_bit(struct iwm_softc *, int, uint32_t, uint32_t, int);
250 1.75.2.2 snj static int iwm_nic_lock(struct iwm_softc *);
251 1.75.2.2 snj static void iwm_nic_unlock(struct iwm_softc *);
252 1.75.2.2 snj static void iwm_set_bits_mask_prph(struct iwm_softc *, uint32_t, uint32_t,
253 1.75.2.2 snj uint32_t);
254 1.75.2.2 snj static void iwm_set_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
255 1.75.2.2 snj static void iwm_clear_bits_prph(struct iwm_softc *, uint32_t, uint32_t);
256 1.75.2.2 snj static int iwm_dma_contig_alloc(bus_dma_tag_t, struct iwm_dma_info *,
257 1.75.2.2 snj bus_size_t, bus_size_t);
258 1.75.2.2 snj static void iwm_dma_contig_free(struct iwm_dma_info *);
259 1.75.2.2 snj static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
260 1.75.2.2 snj static void iwm_disable_rx_dma(struct iwm_softc *);
261 1.75.2.2 snj static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
262 1.75.2.2 snj static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
263 1.75.2.2 snj static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
264 1.75.2.2 snj int);
265 1.75.2.2 snj static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
266 1.75.2.2 snj static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
267 1.75.2.2 snj static void iwm_enable_rfkill_int(struct iwm_softc *);
268 1.75.2.2 snj static int iwm_check_rfkill(struct iwm_softc *);
269 1.75.2.2 snj static void iwm_enable_interrupts(struct iwm_softc *);
270 1.75.2.2 snj static void iwm_restore_interrupts(struct iwm_softc *);
271 1.75.2.2 snj static void iwm_disable_interrupts(struct iwm_softc *);
272 1.75.2.2 snj static void iwm_ict_reset(struct iwm_softc *);
273 1.75.2.2 snj static int iwm_set_hw_ready(struct iwm_softc *);
274 1.75.2.2 snj static int iwm_prepare_card_hw(struct iwm_softc *);
275 1.75.2.2 snj static void iwm_apm_config(struct iwm_softc *);
276 1.75.2.2 snj static int iwm_apm_init(struct iwm_softc *);
277 1.75.2.2 snj static void iwm_apm_stop(struct iwm_softc *);
278 1.75.2.2 snj static int iwm_allow_mcast(struct iwm_softc *);
279 1.75.2.2 snj static int iwm_start_hw(struct iwm_softc *);
280 1.75.2.2 snj static void iwm_stop_device(struct iwm_softc *);
281 1.75.2.2 snj static void iwm_nic_config(struct iwm_softc *);
282 1.75.2.2 snj static int iwm_nic_rx_init(struct iwm_softc *);
283 1.75.2.2 snj static int iwm_nic_tx_init(struct iwm_softc *);
284 1.75.2.2 snj static int iwm_nic_init(struct iwm_softc *);
285 1.75.2.2 snj static int iwm_enable_txq(struct iwm_softc *, int, int, int);
286 1.75.2.2 snj static int iwm_post_alive(struct iwm_softc *);
287 1.75.2.2 snj static struct iwm_phy_db_entry *
288 1.75.2.2 snj iwm_phy_db_get_section(struct iwm_softc *,
289 1.75.2.2 snj enum iwm_phy_db_section_type, uint16_t);
290 1.75.2.2 snj static int iwm_phy_db_set_section(struct iwm_softc *,
291 1.75.2.2 snj struct iwm_calib_res_notif_phy_db *, uint16_t);
292 1.75.2.2 snj static int iwm_is_valid_channel(uint16_t);
293 1.75.2.2 snj static uint8_t iwm_ch_id_to_ch_index(uint16_t);
294 1.75.2.2 snj static uint16_t iwm_channel_id_to_papd(uint16_t);
295 1.75.2.2 snj static uint16_t iwm_channel_id_to_txp(struct iwm_softc *, uint16_t);
296 1.75.2.2 snj static int iwm_phy_db_get_section_data(struct iwm_softc *, uint32_t,
297 1.75.2.2 snj uint8_t **, uint16_t *, uint16_t);
298 1.75.2.2 snj static int iwm_send_phy_db_cmd(struct iwm_softc *, uint16_t, uint16_t,
299 1.75.2.2 snj void *);
300 1.75.2.2 snj static int iwm_phy_db_send_all_channel_groups(struct iwm_softc *,
301 1.75.2.2 snj enum iwm_phy_db_section_type, uint8_t);
302 1.75.2.2 snj static int iwm_send_phy_db_data(struct iwm_softc *);
303 1.75.2.2 snj static void iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *,
304 1.75.2.2 snj struct iwm_time_event_cmd_v1 *);
305 1.75.2.2 snj static int iwm_send_time_event_cmd(struct iwm_softc *,
306 1.75.2.2 snj const struct iwm_time_event_cmd_v2 *);
307 1.75.2.2 snj static void iwm_protect_session(struct iwm_softc *, struct iwm_node *,
308 1.75.2.2 snj uint32_t, uint32_t);
309 1.75.2.2 snj static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
310 1.75.2.2 snj uint16_t, uint8_t *, uint16_t *);
311 1.75.2.2 snj static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
312 1.75.2.2 snj uint16_t *, size_t);
313 1.75.2.2 snj static void iwm_init_channel_map(struct iwm_softc *, const uint16_t * const,
314 1.75.2.2 snj const uint8_t *, size_t);
315 1.75.2.2 snj #ifndef IEEE80211_NO_HT
316 1.75.2.2 snj static void iwm_setup_ht_rates(struct iwm_softc *);
317 1.75.2.2 snj static void iwm_htprot_task(void *);
318 1.75.2.2 snj static void iwm_update_htprot(struct ieee80211com *,
319 1.75.2.2 snj struct ieee80211_node *);
320 1.75.2.2 snj static int iwm_ampdu_rx_start(struct ieee80211com *,
321 1.75.2.2 snj struct ieee80211_node *, uint8_t);
322 1.75.2.2 snj static void iwm_ampdu_rx_stop(struct ieee80211com *,
323 1.75.2.2 snj struct ieee80211_node *, uint8_t);
324 1.75.2.2 snj static void iwm_sta_rx_agg(struct iwm_softc *, struct ieee80211_node *,
325 1.75.2.2 snj uint8_t, uint16_t, int);
326 1.75.2.2 snj #ifdef notyet
327 1.75.2.2 snj static int iwm_ampdu_tx_start(struct ieee80211com *,
328 1.75.2.2 snj struct ieee80211_node *, uint8_t);
329 1.75.2.2 snj static void iwm_ampdu_tx_stop(struct ieee80211com *,
330 1.75.2.2 snj struct ieee80211_node *, uint8_t);
331 1.75.2.2 snj #endif
332 1.75.2.2 snj static void iwm_ba_task(void *);
333 1.75.2.2 snj #endif
334 1.75.2.2 snj
335 1.75.2.2 snj static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
336 1.75.2.2 snj const uint16_t *, const uint16_t *, const uint16_t *,
337 1.75.2.2 snj const uint16_t *, const uint16_t *);
338 1.75.2.2 snj static void iwm_set_hw_address_8000(struct iwm_softc *,
339 1.75.2.2 snj struct iwm_nvm_data *, const uint16_t *, const uint16_t *);
340 1.75.2.2 snj static int iwm_parse_nvm_sections(struct iwm_softc *,
341 1.75.2.2 snj struct iwm_nvm_section *);
342 1.75.2.2 snj static int iwm_nvm_init(struct iwm_softc *);
343 1.75.2.2 snj static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
344 1.75.2.2 snj const uint8_t *, uint32_t);
345 1.75.2.2 snj static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
346 1.75.2.2 snj const uint8_t *, uint32_t);
347 1.75.2.2 snj static int iwm_load_cpu_sections_7000(struct iwm_softc *,
348 1.75.2.2 snj struct iwm_fw_sects *, int , int *);
349 1.75.2.2 snj static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
350 1.75.2.2 snj static int iwm_load_cpu_sections_8000(struct iwm_softc *,
351 1.75.2.2 snj struct iwm_fw_sects *, int , int *);
352 1.75.2.2 snj static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
353 1.75.2.2 snj static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
354 1.75.2.2 snj static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
355 1.75.2.2 snj static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
356 1.75.2.2 snj static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
357 1.75.2.2 snj static int iwm_load_ucode_wait_alive(struct iwm_softc *,
358 1.75.2.2 snj enum iwm_ucode_type);
359 1.75.2.2 snj static int iwm_run_init_mvm_ucode(struct iwm_softc *, int);
360 1.75.2.2 snj static int iwm_rx_addbuf(struct iwm_softc *, int, int);
361 1.75.2.2 snj static int iwm_calc_rssi(struct iwm_softc *, struct iwm_rx_phy_info *);
362 1.75.2.2 snj static int iwm_get_signal_strength(struct iwm_softc *,
363 1.75.2.2 snj struct iwm_rx_phy_info *);
364 1.75.2.2 snj static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
365 1.75.2.2 snj struct iwm_rx_packet *, struct iwm_rx_data *);
366 1.75.2.2 snj static int iwm_get_noise(const struct iwm_statistics_rx_non_phy *);
367 1.75.2.2 snj static void iwm_rx_rx_mpdu(struct iwm_softc *, struct iwm_rx_packet *,
368 1.75.2.2 snj struct iwm_rx_data *);
369 1.75.2.2 snj static void iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *);
370 1.75.2.2 snj static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *,
371 1.75.2.2 snj struct iwm_rx_data *);
372 1.75.2.2 snj static int iwm_binding_cmd(struct iwm_softc *, struct iwm_node *,
373 1.75.2.2 snj uint32_t);
374 1.75.2.2 snj #if 0
375 1.75.2.2 snj static int iwm_binding_update(struct iwm_softc *, struct iwm_node *, int);
376 1.75.2.2 snj static int iwm_binding_add_vif(struct iwm_softc *, struct iwm_node *);
377 1.75.2.2 snj #endif
378 1.75.2.2 snj static void iwm_phy_ctxt_cmd_hdr(struct iwm_softc *, struct iwm_phy_ctxt *,
379 1.75.2.2 snj struct iwm_phy_context_cmd *, uint32_t, uint32_t);
380 1.75.2.2 snj static void iwm_phy_ctxt_cmd_data(struct iwm_softc *,
381 1.75.2.2 snj struct iwm_phy_context_cmd *, struct ieee80211_channel *,
382 1.75.2.2 snj uint8_t, uint8_t);
383 1.75.2.2 snj static int iwm_phy_ctxt_cmd(struct iwm_softc *, struct iwm_phy_ctxt *,
384 1.75.2.2 snj uint8_t, uint8_t, uint32_t, uint32_t);
385 1.75.2.2 snj static int iwm_send_cmd(struct iwm_softc *, struct iwm_host_cmd *);
386 1.75.2.2 snj static int iwm_send_cmd_pdu(struct iwm_softc *, uint32_t, uint32_t,
387 1.75.2.2 snj uint16_t, const void *);
388 1.75.2.2 snj static int iwm_send_cmd_status(struct iwm_softc *, struct iwm_host_cmd *,
389 1.75.2.2 snj uint32_t *);
390 1.75.2.2 snj static int iwm_send_cmd_pdu_status(struct iwm_softc *, uint32_t, uint16_t,
391 1.75.2.2 snj const void *, uint32_t *);
392 1.75.2.2 snj static void iwm_free_resp(struct iwm_softc *, struct iwm_host_cmd *);
393 1.75.2.2 snj static void iwm_cmd_done(struct iwm_softc *, int qid, int idx);
394 1.75.2.2 snj #if 0
395 1.75.2.2 snj static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
396 1.75.2.2 snj uint16_t);
397 1.75.2.2 snj #endif
398 1.75.2.2 snj static const struct iwm_rate *
399 1.75.2.2 snj iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
400 1.75.2.2 snj struct ieee80211_frame *, struct iwm_tx_cmd *);
401 1.75.2.2 snj static int iwm_tx(struct iwm_softc *, struct mbuf *,
402 1.75.2.2 snj struct ieee80211_node *, int);
403 1.75.2.2 snj static void iwm_led_enable(struct iwm_softc *);
404 1.75.2.2 snj static void iwm_led_disable(struct iwm_softc *);
405 1.75.2.2 snj static int iwm_led_is_enabled(struct iwm_softc *);
406 1.75.2.2 snj static void iwm_led_blink_timeout(void *);
407 1.75.2.2 snj static void iwm_led_blink_start(struct iwm_softc *);
408 1.75.2.2 snj static void iwm_led_blink_stop(struct iwm_softc *);
409 1.75.2.2 snj static int iwm_beacon_filter_send_cmd(struct iwm_softc *,
410 1.75.2.2 snj struct iwm_beacon_filter_cmd *);
411 1.75.2.2 snj static void iwm_beacon_filter_set_cqm_params(struct iwm_softc *,
412 1.75.2.2 snj struct iwm_node *, struct iwm_beacon_filter_cmd *);
413 1.75.2.2 snj static int iwm_update_beacon_abort(struct iwm_softc *, struct iwm_node *,
414 1.75.2.2 snj int);
415 1.75.2.2 snj static void iwm_power_build_cmd(struct iwm_softc *, struct iwm_node *,
416 1.75.2.2 snj struct iwm_mac_power_cmd *);
417 1.75.2.2 snj static int iwm_power_mac_update_mode(struct iwm_softc *,
418 1.75.2.2 snj struct iwm_node *);
419 1.75.2.2 snj static int iwm_power_update_device(struct iwm_softc *);
420 1.75.2.2 snj #ifdef notyet
421 1.75.2.2 snj static int iwm_enable_beacon_filter(struct iwm_softc *, struct iwm_node *);
422 1.75.2.2 snj #endif
423 1.75.2.2 snj static int iwm_disable_beacon_filter(struct iwm_softc *);
424 1.75.2.2 snj static int iwm_add_sta_cmd(struct iwm_softc *, struct iwm_node *, int);
425 1.75.2.2 snj static int iwm_add_aux_sta(struct iwm_softc *);
426 1.75.2.2 snj static uint16_t iwm_scan_rx_chain(struct iwm_softc *);
427 1.75.2.2 snj static uint32_t iwm_scan_rate_n_flags(struct iwm_softc *, int, int);
428 1.75.2.2 snj #ifdef notyet
429 1.75.2.2 snj static uint16_t iwm_get_active_dwell(struct iwm_softc *, int, int);
430 1.75.2.2 snj static uint16_t iwm_get_passive_dwell(struct iwm_softc *, int);
431 1.75.2.2 snj #endif
432 1.75.2.2 snj static uint8_t iwm_lmac_scan_fill_channels(struct iwm_softc *,
433 1.75.2.2 snj struct iwm_scan_channel_cfg_lmac *, int);
434 1.75.2.2 snj static int iwm_fill_probe_req(struct iwm_softc *,
435 1.75.2.2 snj struct iwm_scan_probe_req *);
436 1.75.2.2 snj static int iwm_lmac_scan(struct iwm_softc *);
437 1.75.2.2 snj static int iwm_config_umac_scan(struct iwm_softc *);
438 1.75.2.2 snj static int iwm_umac_scan(struct iwm_softc *);
439 1.75.2.2 snj static uint8_t iwm_ridx2rate(struct ieee80211_rateset *, int);
440 1.75.2.2 snj static void iwm_ack_rates(struct iwm_softc *, struct iwm_node *, int *,
441 1.75.2.2 snj int *);
442 1.75.2.2 snj static void iwm_mac_ctxt_cmd_common(struct iwm_softc *, struct iwm_node *,
443 1.75.2.2 snj struct iwm_mac_ctx_cmd *, uint32_t, int);
444 1.75.2.2 snj static void iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *, struct iwm_node *,
445 1.75.2.2 snj struct iwm_mac_data_sta *, int);
446 1.75.2.2 snj static int iwm_mac_ctxt_cmd(struct iwm_softc *, struct iwm_node *,
447 1.75.2.2 snj uint32_t, int);
448 1.75.2.2 snj static int iwm_update_quotas(struct iwm_softc *, struct iwm_node *);
449 1.75.2.2 snj static int iwm_auth(struct iwm_softc *);
450 1.75.2.2 snj static int iwm_assoc(struct iwm_softc *);
451 1.75.2.2 snj static void iwm_calib_timeout(void *);
452 1.75.2.2 snj #ifndef IEEE80211_NO_HT
453 1.75.2.2 snj static void iwm_setrates_task(void *);
454 1.75.2.2 snj static int iwm_setrates(struct iwm_node *);
455 1.75.2.2 snj #endif
456 1.75.2.2 snj static int iwm_media_change(struct ifnet *);
457 1.75.2.2 snj static int iwm_do_newstate(struct ieee80211com *, enum ieee80211_state,
458 1.75.2.2 snj int);
459 1.75.2.2 snj static void iwm_newstate_cb(struct work *, void *);
460 1.75.2.2 snj static int iwm_newstate(struct ieee80211com *, enum ieee80211_state, int);
461 1.75.2.2 snj static void iwm_endscan(struct iwm_softc *);
462 1.75.2.2 snj static void iwm_fill_sf_command(struct iwm_softc *, struct iwm_sf_cfg_cmd *,
463 1.75.2.2 snj struct ieee80211_node *);
464 1.75.2.2 snj static int iwm_sf_config(struct iwm_softc *, int);
465 1.75.2.2 snj static int iwm_send_bt_init_conf(struct iwm_softc *);
466 1.75.2.2 snj static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
467 1.75.2.2 snj static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
468 1.75.2.2 snj static int iwm_init_hw(struct iwm_softc *);
469 1.75.2.2 snj static int iwm_init(struct ifnet *);
470 1.75.2.2 snj static void iwm_start(struct ifnet *);
471 1.75.2.2 snj static void iwm_stop(struct ifnet *, int);
472 1.75.2.2 snj static void iwm_watchdog(struct ifnet *);
473 1.75.2.2 snj static int iwm_ioctl(struct ifnet *, u_long, void *);
474 1.75.2.2 snj #ifdef IWM_DEBUG
475 1.75.2.2 snj static const char *iwm_desc_lookup(uint32_t);
476 1.75.2.2 snj static void iwm_nic_error(struct iwm_softc *);
477 1.75.2.2 snj static void iwm_nic_umac_error(struct iwm_softc *);
478 1.75.2.2 snj #endif
479 1.75.2.2 snj static void iwm_notif_intr(struct iwm_softc *);
480 1.75.2.2 snj static int iwm_intr(void *);
481 1.75.2.2 snj static void iwm_softintr(void *);
482 1.75.2.2 snj static int iwm_preinit(struct iwm_softc *);
483 1.75.2.2 snj static void iwm_attach_hook(device_t);
484 1.75.2.2 snj static void iwm_attach(device_t, device_t, void *);
485 1.75.2.2 snj #if 0
486 1.75.2.2 snj static void iwm_init_task(void *);
487 1.75.2.2 snj static int iwm_activate(device_t, enum devact);
488 1.75.2.2 snj static void iwm_wakeup(struct iwm_softc *);
489 1.75.2.2 snj #endif
490 1.75.2.2 snj static void iwm_radiotap_attach(struct iwm_softc *);
491 1.75.2.2 snj static int iwm_sysctl_fw_loaded_handler(SYSCTLFN_PROTO);
492 1.75.2.2 snj
493 1.75.2.2 snj /* XXX needed by iwn_scan */
494 1.75.2.2 snj static u_int8_t *ieee80211_add_ssid(u_int8_t *, const u_int8_t *, u_int);
495 1.75.2.2 snj static u_int8_t *ieee80211_add_rates(u_int8_t *,
496 1.75.2.2 snj const struct ieee80211_rateset *);
497 1.75.2.2 snj static u_int8_t *ieee80211_add_xrates(u_int8_t *,
498 1.75.2.2 snj const struct ieee80211_rateset *);
499 1.75.2.2 snj
500 1.75.2.2 snj static int iwm_sysctl_root_num;
501 1.75.2.2 snj static int iwm_lar_disable;
502 1.75.2.2 snj
503 1.75.2.2 snj #ifndef IWM_DEFAULT_MCC
504 1.75.2.2 snj #define IWM_DEFAULT_MCC "ZZ"
505 1.75.2.2 snj #endif
506 1.75.2.2 snj static char iwm_default_mcc[3] = IWM_DEFAULT_MCC;
507 1.75.2.2 snj
508 1.75.2.2 snj static int
509 1.75.2.2 snj iwm_firmload(struct iwm_softc *sc)
510 1.75.2.2 snj {
511 1.75.2.2 snj struct iwm_fw_info *fw = &sc->sc_fw;
512 1.75.2.2 snj firmware_handle_t fwh;
513 1.75.2.2 snj int err;
514 1.75.2.2 snj
515 1.75.2.2 snj if (ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED))
516 1.75.2.2 snj return 0;
517 1.75.2.2 snj
518 1.75.2.2 snj /* Open firmware image. */
519 1.75.2.2 snj err = firmware_open("if_iwm", sc->sc_fwname, &fwh);
520 1.75.2.2 snj if (err) {
521 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
522 1.75.2.2 snj "could not get firmware handle %s\n", sc->sc_fwname);
523 1.75.2.2 snj return err;
524 1.75.2.2 snj }
525 1.75.2.2 snj
526 1.75.2.2 snj if (fw->fw_rawdata != NULL && fw->fw_rawsize > 0) {
527 1.75.2.2 snj kmem_free(fw->fw_rawdata, fw->fw_rawsize);
528 1.75.2.2 snj fw->fw_rawdata = NULL;
529 1.75.2.2 snj }
530 1.75.2.2 snj
531 1.75.2.2 snj fw->fw_rawsize = firmware_get_size(fwh);
532 1.75.2.2 snj /*
533 1.75.2.2 snj * Well, this is how the Linux driver checks it ....
534 1.75.2.2 snj */
535 1.75.2.2 snj if (fw->fw_rawsize < sizeof(uint32_t)) {
536 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
537 1.75.2.2 snj "firmware too short: %zd bytes\n", fw->fw_rawsize);
538 1.75.2.2 snj err = EINVAL;
539 1.75.2.2 snj goto out;
540 1.75.2.2 snj }
541 1.75.2.2 snj
542 1.75.2.2 snj /* Read the firmware. */
543 1.75.2.2 snj fw->fw_rawdata = kmem_alloc(fw->fw_rawsize, KM_SLEEP);
544 1.75.2.2 snj if (fw->fw_rawdata == NULL) {
545 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
546 1.75.2.2 snj "not enough memory to stock firmware %s\n", sc->sc_fwname);
547 1.75.2.2 snj err = ENOMEM;
548 1.75.2.2 snj goto out;
549 1.75.2.2 snj }
550 1.75.2.2 snj err = firmware_read(fwh, 0, fw->fw_rawdata, fw->fw_rawsize);
551 1.75.2.2 snj if (err) {
552 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
553 1.75.2.2 snj "could not read firmware %s\n", sc->sc_fwname);
554 1.75.2.2 snj goto out;
555 1.75.2.2 snj }
556 1.75.2.2 snj
557 1.75.2.2 snj SET(sc->sc_flags, IWM_FLAG_FW_LOADED);
558 1.75.2.2 snj out:
559 1.75.2.2 snj /* caller will release memory, if necessary */
560 1.75.2.2 snj
561 1.75.2.2 snj firmware_close(fwh);
562 1.75.2.2 snj return err;
563 1.75.2.2 snj }
564 1.75.2.2 snj
565 1.75.2.2 snj /*
566 1.75.2.2 snj * XXX code from OpenBSD src/sys/net80211/ieee80211_output.c
567 1.75.2.2 snj * Copyright (c) 2001 Atsushi Onoe
568 1.75.2.2 snj * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
569 1.75.2.2 snj * Copyright (c) 2007-2009 Damien Bergamini
570 1.75.2.2 snj * All rights reserved.
571 1.75.2.2 snj */
572 1.75.2.2 snj
573 1.75.2.2 snj /*
574 1.75.2.2 snj * Add an SSID element to a frame (see 7.3.2.1).
575 1.75.2.2 snj */
576 1.75.2.2 snj static u_int8_t *
577 1.75.2.2 snj ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len)
578 1.75.2.2 snj {
579 1.75.2.2 snj *frm++ = IEEE80211_ELEMID_SSID;
580 1.75.2.2 snj *frm++ = len;
581 1.75.2.2 snj memcpy(frm, ssid, len);
582 1.75.2.2 snj return frm + len;
583 1.75.2.2 snj }
584 1.75.2.2 snj
585 1.75.2.2 snj /*
586 1.75.2.2 snj * Add a supported rates element to a frame (see 7.3.2.2).
587 1.75.2.2 snj */
588 1.75.2.2 snj static u_int8_t *
589 1.75.2.2 snj ieee80211_add_rates(u_int8_t *frm, const struct ieee80211_rateset *rs)
590 1.75.2.2 snj {
591 1.75.2.2 snj int nrates;
592 1.75.2.2 snj
593 1.75.2.2 snj *frm++ = IEEE80211_ELEMID_RATES;
594 1.75.2.2 snj nrates = min(rs->rs_nrates, IEEE80211_RATE_SIZE);
595 1.75.2.2 snj *frm++ = nrates;
596 1.75.2.2 snj memcpy(frm, rs->rs_rates, nrates);
597 1.75.2.2 snj return frm + nrates;
598 1.75.2.2 snj }
599 1.75.2.2 snj
600 1.75.2.2 snj /*
601 1.75.2.2 snj * Add an extended supported rates element to a frame (see 7.3.2.14).
602 1.75.2.2 snj */
603 1.75.2.2 snj static u_int8_t *
604 1.75.2.2 snj ieee80211_add_xrates(u_int8_t *frm, const struct ieee80211_rateset *rs)
605 1.75.2.2 snj {
606 1.75.2.2 snj int nrates;
607 1.75.2.2 snj
608 1.75.2.2 snj KASSERT(rs->rs_nrates > IEEE80211_RATE_SIZE);
609 1.75.2.2 snj
610 1.75.2.2 snj *frm++ = IEEE80211_ELEMID_XRATES;
611 1.75.2.2 snj nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
612 1.75.2.2 snj *frm++ = nrates;
613 1.75.2.2 snj memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
614 1.75.2.2 snj return frm + nrates;
615 1.75.2.2 snj }
616 1.75.2.2 snj
617 1.75.2.2 snj /*
618 1.75.2.2 snj * just maintaining status quo.
619 1.75.2.2 snj */
620 1.75.2.2 snj static void
621 1.75.2.2 snj iwm_fix_channel(struct iwm_softc *sc, struct mbuf *m)
622 1.75.2.2 snj {
623 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
624 1.75.2.2 snj struct ieee80211_frame *wh;
625 1.75.2.2 snj uint8_t subtype;
626 1.75.2.2 snj
627 1.75.2.2 snj wh = mtod(m, struct ieee80211_frame *);
628 1.75.2.2 snj
629 1.75.2.2 snj if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
630 1.75.2.2 snj return;
631 1.75.2.2 snj
632 1.75.2.2 snj subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
633 1.75.2.2 snj
634 1.75.2.2 snj if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
635 1.75.2.2 snj subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
636 1.75.2.2 snj return;
637 1.75.2.2 snj
638 1.75.2.2 snj int chan = le32toh(sc->sc_last_phy_info.channel);
639 1.75.2.2 snj if (chan < __arraycount(ic->ic_channels))
640 1.75.2.2 snj ic->ic_curchan = &ic->ic_channels[chan];
641 1.75.2.2 snj }
642 1.75.2.2 snj
643 1.75.2.2 snj static int
644 1.75.2.2 snj iwm_store_cscheme(struct iwm_softc *sc, uint8_t *data, size_t dlen)
645 1.75.2.2 snj {
646 1.75.2.2 snj struct iwm_fw_cscheme_list *l = (struct iwm_fw_cscheme_list *)data;
647 1.75.2.2 snj
648 1.75.2.2 snj if (dlen < sizeof(*l) ||
649 1.75.2.2 snj dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
650 1.75.2.2 snj return EINVAL;
651 1.75.2.2 snj
652 1.75.2.2 snj /* we don't actually store anything for now, always use s/w crypto */
653 1.75.2.2 snj
654 1.75.2.2 snj return 0;
655 1.75.2.2 snj }
656 1.75.2.2 snj
657 1.75.2.2 snj static int
658 1.75.2.2 snj iwm_firmware_store_section(struct iwm_softc *sc, enum iwm_ucode_type type,
659 1.75.2.2 snj uint8_t *data, size_t dlen)
660 1.75.2.2 snj {
661 1.75.2.2 snj struct iwm_fw_sects *fws;
662 1.75.2.2 snj struct iwm_fw_onesect *fwone;
663 1.75.2.2 snj
664 1.75.2.2 snj if (type >= IWM_UCODE_TYPE_MAX)
665 1.75.2.2 snj return EINVAL;
666 1.75.2.2 snj if (dlen < sizeof(uint32_t))
667 1.75.2.2 snj return EINVAL;
668 1.75.2.2 snj
669 1.75.2.2 snj fws = &sc->sc_fw.fw_sects[type];
670 1.75.2.2 snj if (fws->fw_count >= IWM_UCODE_SECT_MAX)
671 1.75.2.2 snj return EINVAL;
672 1.75.2.2 snj
673 1.75.2.2 snj fwone = &fws->fw_sect[fws->fw_count];
674 1.75.2.2 snj
675 1.75.2.2 snj /* first 32bit are device load offset */
676 1.75.2.2 snj memcpy(&fwone->fws_devoff, data, sizeof(uint32_t));
677 1.75.2.2 snj
678 1.75.2.2 snj /* rest is data */
679 1.75.2.2 snj fwone->fws_data = data + sizeof(uint32_t);
680 1.75.2.2 snj fwone->fws_len = dlen - sizeof(uint32_t);
681 1.75.2.2 snj
682 1.75.2.2 snj /* for freeing the buffer during driver unload */
683 1.75.2.2 snj fwone->fws_alloc = data;
684 1.75.2.2 snj fwone->fws_allocsize = dlen;
685 1.75.2.2 snj
686 1.75.2.2 snj fws->fw_count++;
687 1.75.2.2 snj fws->fw_totlen += fwone->fws_len;
688 1.75.2.2 snj
689 1.75.2.2 snj return 0;
690 1.75.2.2 snj }
691 1.75.2.2 snj
692 1.75.2.2 snj struct iwm_tlv_calib_data {
693 1.75.2.2 snj uint32_t ucode_type;
694 1.75.2.2 snj struct iwm_tlv_calib_ctrl calib;
695 1.75.2.2 snj } __packed;
696 1.75.2.2 snj
697 1.75.2.2 snj static int
698 1.75.2.2 snj iwm_set_default_calib(struct iwm_softc *sc, const void *data)
699 1.75.2.2 snj {
700 1.75.2.2 snj const struct iwm_tlv_calib_data *def_calib = data;
701 1.75.2.2 snj uint32_t ucode_type = le32toh(def_calib->ucode_type);
702 1.75.2.2 snj
703 1.75.2.2 snj if (ucode_type >= IWM_UCODE_TYPE_MAX) {
704 1.75.2.2 snj DPRINTF(("%s: Wrong ucode_type %u for default calibration.\n",
705 1.75.2.2 snj DEVNAME(sc), ucode_type));
706 1.75.2.2 snj return EINVAL;
707 1.75.2.2 snj }
708 1.75.2.2 snj
709 1.75.2.2 snj sc->sc_default_calib[ucode_type].flow_trigger =
710 1.75.2.2 snj def_calib->calib.flow_trigger;
711 1.75.2.2 snj sc->sc_default_calib[ucode_type].event_trigger =
712 1.75.2.2 snj def_calib->calib.event_trigger;
713 1.75.2.2 snj
714 1.75.2.2 snj return 0;
715 1.75.2.2 snj }
716 1.75.2.2 snj
717 1.75.2.2 snj static int
718 1.75.2.2 snj iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
719 1.75.2.2 snj {
720 1.75.2.2 snj struct iwm_fw_info *fw = &sc->sc_fw;
721 1.75.2.2 snj struct iwm_tlv_ucode_header *uhdr;
722 1.75.2.2 snj struct iwm_ucode_tlv tlv;
723 1.75.2.2 snj enum iwm_ucode_tlv_type tlv_type;
724 1.75.2.2 snj uint8_t *data;
725 1.75.2.2 snj int err, status;
726 1.75.2.2 snj size_t len;
727 1.75.2.2 snj
728 1.75.2.2 snj if (ucode_type != IWM_UCODE_TYPE_INIT &&
729 1.75.2.2 snj fw->fw_status == IWM_FW_STATUS_DONE)
730 1.75.2.2 snj return 0;
731 1.75.2.2 snj
732 1.75.2.2 snj if (fw->fw_status == IWM_FW_STATUS_NONE) {
733 1.75.2.2 snj fw->fw_status = IWM_FW_STATUS_INPROGRESS;
734 1.75.2.2 snj } else {
735 1.75.2.2 snj while (fw->fw_status == IWM_FW_STATUS_INPROGRESS)
736 1.75.2.2 snj tsleep(&sc->sc_fw, 0, "iwmfwp", 0);
737 1.75.2.2 snj }
738 1.75.2.2 snj status = fw->fw_status;
739 1.75.2.2 snj
740 1.75.2.2 snj if (status == IWM_FW_STATUS_DONE)
741 1.75.2.2 snj return 0;
742 1.75.2.2 snj
743 1.75.2.2 snj err = iwm_firmload(sc);
744 1.75.2.2 snj if (err) {
745 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
746 1.75.2.2 snj "could not read firmware %s (error %d)\n",
747 1.75.2.2 snj sc->sc_fwname, err);
748 1.75.2.2 snj goto out;
749 1.75.2.2 snj }
750 1.75.2.2 snj
751 1.75.2.2 snj sc->sc_capaflags = 0;
752 1.75.2.2 snj sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
753 1.75.2.2 snj memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
754 1.75.2.2 snj memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
755 1.75.2.2 snj
756 1.75.2.2 snj uhdr = (void *)fw->fw_rawdata;
757 1.75.2.2 snj if (*(uint32_t *)fw->fw_rawdata != 0
758 1.75.2.2 snj || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
759 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "invalid firmware %s\n",
760 1.75.2.2 snj sc->sc_fwname);
761 1.75.2.2 snj err = EINVAL;
762 1.75.2.2 snj goto out;
763 1.75.2.2 snj }
764 1.75.2.2 snj
765 1.75.2.2 snj snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
766 1.75.2.2 snj IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
767 1.75.2.2 snj IWM_UCODE_MINOR(le32toh(uhdr->ver)),
768 1.75.2.2 snj IWM_UCODE_API(le32toh(uhdr->ver)));
769 1.75.2.2 snj data = uhdr->data;
770 1.75.2.2 snj len = fw->fw_rawsize - sizeof(*uhdr);
771 1.75.2.2 snj
772 1.75.2.2 snj while (len >= sizeof(tlv)) {
773 1.75.2.2 snj size_t tlv_len;
774 1.75.2.2 snj void *tlv_data;
775 1.75.2.2 snj
776 1.75.2.2 snj memcpy(&tlv, data, sizeof(tlv));
777 1.75.2.2 snj tlv_len = le32toh(tlv.length);
778 1.75.2.2 snj tlv_type = le32toh(tlv.type);
779 1.75.2.2 snj
780 1.75.2.2 snj len -= sizeof(tlv);
781 1.75.2.2 snj data += sizeof(tlv);
782 1.75.2.2 snj tlv_data = data;
783 1.75.2.2 snj
784 1.75.2.2 snj if (len < tlv_len) {
785 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
786 1.75.2.2 snj "firmware too short: %zu bytes\n", len);
787 1.75.2.2 snj err = EINVAL;
788 1.75.2.2 snj goto parse_out;
789 1.75.2.2 snj }
790 1.75.2.2 snj
791 1.75.2.2 snj switch (tlv_type) {
792 1.75.2.2 snj case IWM_UCODE_TLV_PROBE_MAX_LEN:
793 1.75.2.2 snj if (tlv_len < sizeof(uint32_t)) {
794 1.75.2.2 snj err = EINVAL;
795 1.75.2.2 snj goto parse_out;
796 1.75.2.2 snj }
797 1.75.2.2 snj sc->sc_capa_max_probe_len
798 1.75.2.2 snj = le32toh(*(uint32_t *)tlv_data);
799 1.75.2.2 snj /* limit it to something sensible */
800 1.75.2.2 snj if (sc->sc_capa_max_probe_len >
801 1.75.2.2 snj IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
802 1.75.2.2 snj err = EINVAL;
803 1.75.2.2 snj goto parse_out;
804 1.75.2.2 snj }
805 1.75.2.2 snj break;
806 1.75.2.2 snj case IWM_UCODE_TLV_PAN:
807 1.75.2.2 snj if (tlv_len) {
808 1.75.2.2 snj err = EINVAL;
809 1.75.2.2 snj goto parse_out;
810 1.75.2.2 snj }
811 1.75.2.2 snj sc->sc_capaflags |= IWM_UCODE_TLV_FLAGS_PAN;
812 1.75.2.2 snj break;
813 1.75.2.2 snj case IWM_UCODE_TLV_FLAGS:
814 1.75.2.2 snj if (tlv_len < sizeof(uint32_t)) {
815 1.75.2.2 snj err = EINVAL;
816 1.75.2.2 snj goto parse_out;
817 1.75.2.2 snj }
818 1.75.2.2 snj if (tlv_len % sizeof(uint32_t)) {
819 1.75.2.2 snj err = EINVAL;
820 1.75.2.2 snj goto parse_out;
821 1.75.2.2 snj }
822 1.75.2.2 snj /*
823 1.75.2.2 snj * Apparently there can be many flags, but Linux driver
824 1.75.2.2 snj * parses only the first one, and so do we.
825 1.75.2.2 snj *
826 1.75.2.2 snj * XXX: why does this override IWM_UCODE_TLV_PAN?
827 1.75.2.2 snj * Intentional or a bug? Observations from
828 1.75.2.2 snj * current firmware file:
829 1.75.2.2 snj * 1) TLV_PAN is parsed first
830 1.75.2.2 snj * 2) TLV_FLAGS contains TLV_FLAGS_PAN
831 1.75.2.2 snj * ==> this resets TLV_PAN to itself... hnnnk
832 1.75.2.2 snj */
833 1.75.2.2 snj sc->sc_capaflags = le32toh(*(uint32_t *)tlv_data);
834 1.75.2.2 snj break;
835 1.75.2.2 snj case IWM_UCODE_TLV_CSCHEME:
836 1.75.2.2 snj err = iwm_store_cscheme(sc, tlv_data, tlv_len);
837 1.75.2.2 snj if (err)
838 1.75.2.2 snj goto parse_out;
839 1.75.2.2 snj break;
840 1.75.2.2 snj case IWM_UCODE_TLV_NUM_OF_CPU: {
841 1.75.2.2 snj uint32_t num_cpu;
842 1.75.2.2 snj if (tlv_len != sizeof(uint32_t)) {
843 1.75.2.2 snj err = EINVAL;
844 1.75.2.2 snj goto parse_out;
845 1.75.2.2 snj }
846 1.75.2.2 snj num_cpu = le32toh(*(uint32_t *)tlv_data);
847 1.75.2.2 snj if (num_cpu == 2) {
848 1.75.2.2 snj fw->fw_sects[IWM_UCODE_TYPE_REGULAR].is_dual_cpus =
849 1.75.2.2 snj true;
850 1.75.2.2 snj fw->fw_sects[IWM_UCODE_TYPE_INIT].is_dual_cpus =
851 1.75.2.2 snj true;
852 1.75.2.2 snj fw->fw_sects[IWM_UCODE_TYPE_WOW].is_dual_cpus =
853 1.75.2.2 snj true;
854 1.75.2.2 snj } else if (num_cpu < 1 || num_cpu > 2) {
855 1.75.2.2 snj err = EINVAL;
856 1.75.2.2 snj goto parse_out;
857 1.75.2.2 snj }
858 1.75.2.2 snj break;
859 1.75.2.2 snj }
860 1.75.2.2 snj case IWM_UCODE_TLV_SEC_RT:
861 1.75.2.2 snj err = iwm_firmware_store_section(sc,
862 1.75.2.2 snj IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len);
863 1.75.2.2 snj if (err)
864 1.75.2.2 snj goto parse_out;
865 1.75.2.2 snj break;
866 1.75.2.2 snj case IWM_UCODE_TLV_SEC_INIT:
867 1.75.2.2 snj err = iwm_firmware_store_section(sc,
868 1.75.2.2 snj IWM_UCODE_TYPE_INIT, tlv_data, tlv_len);
869 1.75.2.2 snj if (err)
870 1.75.2.2 snj goto parse_out;
871 1.75.2.2 snj break;
872 1.75.2.2 snj case IWM_UCODE_TLV_SEC_WOWLAN:
873 1.75.2.2 snj err = iwm_firmware_store_section(sc,
874 1.75.2.2 snj IWM_UCODE_TYPE_WOW, tlv_data, tlv_len);
875 1.75.2.2 snj if (err)
876 1.75.2.2 snj goto parse_out;
877 1.75.2.2 snj break;
878 1.75.2.2 snj case IWM_UCODE_TLV_DEF_CALIB:
879 1.75.2.2 snj if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
880 1.75.2.2 snj err = EINVAL;
881 1.75.2.2 snj goto parse_out;
882 1.75.2.2 snj }
883 1.75.2.2 snj err = iwm_set_default_calib(sc, tlv_data);
884 1.75.2.2 snj if (err)
885 1.75.2.2 snj goto parse_out;
886 1.75.2.2 snj break;
887 1.75.2.2 snj case IWM_UCODE_TLV_PHY_SKU:
888 1.75.2.2 snj if (tlv_len != sizeof(uint32_t)) {
889 1.75.2.2 snj err = EINVAL;
890 1.75.2.2 snj goto parse_out;
891 1.75.2.2 snj }
892 1.75.2.2 snj sc->sc_fw_phy_config = le32toh(*(uint32_t *)tlv_data);
893 1.75.2.2 snj break;
894 1.75.2.2 snj
895 1.75.2.2 snj case IWM_UCODE_TLV_API_CHANGES_SET: {
896 1.75.2.2 snj struct iwm_ucode_api *api;
897 1.75.2.2 snj uint32_t idx, bits;
898 1.75.2.2 snj int i;
899 1.75.2.2 snj if (tlv_len != sizeof(*api)) {
900 1.75.2.2 snj err = EINVAL;
901 1.75.2.2 snj goto parse_out;
902 1.75.2.2 snj }
903 1.75.2.2 snj api = (struct iwm_ucode_api *)tlv_data;
904 1.75.2.2 snj idx = le32toh(api->api_index);
905 1.75.2.2 snj bits = le32toh(api->api_flags);
906 1.75.2.2 snj if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
907 1.75.2.2 snj err = EINVAL;
908 1.75.2.2 snj goto parse_out;
909 1.75.2.2 snj }
910 1.75.2.2 snj for (i = 0; i < 32; i++) {
911 1.75.2.2 snj if (!ISSET(bits, __BIT(i)))
912 1.75.2.2 snj continue;
913 1.75.2.2 snj setbit(sc->sc_ucode_api, i + (32 * idx));
914 1.75.2.2 snj }
915 1.75.2.2 snj break;
916 1.75.2.2 snj }
917 1.75.2.2 snj
918 1.75.2.2 snj case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
919 1.75.2.2 snj struct iwm_ucode_capa *capa;
920 1.75.2.2 snj uint32_t idx, bits;
921 1.75.2.2 snj int i;
922 1.75.2.2 snj if (tlv_len != sizeof(*capa)) {
923 1.75.2.2 snj err = EINVAL;
924 1.75.2.2 snj goto parse_out;
925 1.75.2.2 snj }
926 1.75.2.2 snj capa = (struct iwm_ucode_capa *)tlv_data;
927 1.75.2.2 snj idx = le32toh(capa->api_index);
928 1.75.2.2 snj bits = le32toh(capa->api_capa);
929 1.75.2.2 snj if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
930 1.75.2.2 snj err = EINVAL;
931 1.75.2.2 snj goto parse_out;
932 1.75.2.2 snj }
933 1.75.2.2 snj for (i = 0; i < 32; i++) {
934 1.75.2.2 snj if (!ISSET(bits, __BIT(i)))
935 1.75.2.2 snj continue;
936 1.75.2.2 snj setbit(sc->sc_enabled_capa, i + (32 * idx));
937 1.75.2.2 snj }
938 1.75.2.2 snj break;
939 1.75.2.2 snj }
940 1.75.2.2 snj
941 1.75.2.2 snj case IWM_UCODE_TLV_FW_UNDOCUMENTED1:
942 1.75.2.2 snj case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
943 1.75.2.2 snj case IWM_UCODE_TLV_FW_GSCAN_CAPA:
944 1.75.2.2 snj case IWM_UCODE_TLV_FW_MEM_SEG:
945 1.75.2.2 snj /* ignore, not used by current driver */
946 1.75.2.2 snj break;
947 1.75.2.2 snj
948 1.75.2.2 snj case IWM_UCODE_TLV_SEC_RT_USNIFFER:
949 1.75.2.2 snj err = iwm_firmware_store_section(sc,
950 1.75.2.2 snj IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
951 1.75.2.2 snj tlv_len);
952 1.75.2.2 snj if (err)
953 1.75.2.2 snj goto parse_out;
954 1.75.2.2 snj break;
955 1.75.2.2 snj
956 1.75.2.2 snj case IWM_UCODE_TLV_PAGING: {
957 1.75.2.2 snj uint32_t paging_mem_size;
958 1.75.2.2 snj if (tlv_len != sizeof(paging_mem_size)) {
959 1.75.2.2 snj err = EINVAL;
960 1.75.2.2 snj goto parse_out;
961 1.75.2.2 snj }
962 1.75.2.2 snj paging_mem_size = le32toh(*(uint32_t *)tlv_data);
963 1.75.2.2 snj if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
964 1.75.2.2 snj err = EINVAL;
965 1.75.2.2 snj goto parse_out;
966 1.75.2.2 snj }
967 1.75.2.2 snj if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
968 1.75.2.2 snj err = EINVAL;
969 1.75.2.2 snj goto parse_out;
970 1.75.2.2 snj }
971 1.75.2.2 snj fw->fw_sects[IWM_UCODE_TYPE_REGULAR].paging_mem_size =
972 1.75.2.2 snj paging_mem_size;
973 1.75.2.2 snj fw->fw_sects[IWM_UCODE_TYPE_REGULAR_USNIFFER].paging_mem_size =
974 1.75.2.2 snj paging_mem_size;
975 1.75.2.2 snj break;
976 1.75.2.2 snj }
977 1.75.2.2 snj
978 1.75.2.2 snj case IWM_UCODE_TLV_N_SCAN_CHANNELS:
979 1.75.2.2 snj if (tlv_len != sizeof(uint32_t)) {
980 1.75.2.2 snj err = EINVAL;
981 1.75.2.2 snj goto parse_out;
982 1.75.2.2 snj }
983 1.75.2.2 snj sc->sc_capa_n_scan_channels =
984 1.75.2.2 snj le32toh(*(uint32_t *)tlv_data);
985 1.75.2.2 snj break;
986 1.75.2.2 snj
987 1.75.2.2 snj case IWM_UCODE_TLV_FW_VERSION:
988 1.75.2.2 snj if (tlv_len != sizeof(uint32_t) * 3) {
989 1.75.2.2 snj err = EINVAL;
990 1.75.2.2 snj goto parse_out;
991 1.75.2.2 snj }
992 1.75.2.2 snj snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
993 1.75.2.2 snj "%d.%d.%d",
994 1.75.2.2 snj le32toh(((uint32_t *)tlv_data)[0]),
995 1.75.2.2 snj le32toh(((uint32_t *)tlv_data)[1]),
996 1.75.2.2 snj le32toh(((uint32_t *)tlv_data)[2]));
997 1.75.2.2 snj break;
998 1.75.2.2 snj
999 1.75.2.2 snj default:
1000 1.75.2.2 snj DPRINTF(("%s: unknown firmware section %d, abort\n",
1001 1.75.2.2 snj DEVNAME(sc), tlv_type));
1002 1.75.2.2 snj err = EINVAL;
1003 1.75.2.2 snj goto parse_out;
1004 1.75.2.2 snj }
1005 1.75.2.2 snj
1006 1.75.2.2 snj len -= roundup(tlv_len, 4);
1007 1.75.2.2 snj data += roundup(tlv_len, 4);
1008 1.75.2.2 snj }
1009 1.75.2.2 snj
1010 1.75.2.2 snj KASSERT(err == 0);
1011 1.75.2.2 snj
1012 1.75.2.2 snj parse_out:
1013 1.75.2.2 snj if (err) {
1014 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1015 1.75.2.2 snj "firmware parse error, section type %d\n", tlv_type);
1016 1.75.2.2 snj }
1017 1.75.2.2 snj
1018 1.75.2.2 snj if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
1019 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1020 1.75.2.2 snj "device uses unsupported power ops\n");
1021 1.75.2.2 snj err = ENOTSUP;
1022 1.75.2.2 snj }
1023 1.75.2.2 snj
1024 1.75.2.2 snj out:
1025 1.75.2.2 snj if (err)
1026 1.75.2.2 snj fw->fw_status = IWM_FW_STATUS_NONE;
1027 1.75.2.2 snj else
1028 1.75.2.2 snj fw->fw_status = IWM_FW_STATUS_DONE;
1029 1.75.2.2 snj wakeup(&sc->sc_fw);
1030 1.75.2.2 snj
1031 1.75.2.2 snj if (err && fw->fw_rawdata != NULL) {
1032 1.75.2.2 snj kmem_free(fw->fw_rawdata, fw->fw_rawsize);
1033 1.75.2.2 snj fw->fw_rawdata = NULL;
1034 1.75.2.2 snj CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
1035 1.75.2.2 snj /* don't touch fw->fw_status */
1036 1.75.2.2 snj memset(fw->fw_sects, 0, sizeof(fw->fw_sects));
1037 1.75.2.2 snj }
1038 1.75.2.2 snj return err;
1039 1.75.2.2 snj }
1040 1.75.2.2 snj
1041 1.75.2.2 snj static uint32_t
1042 1.75.2.2 snj iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
1043 1.75.2.2 snj {
1044 1.75.2.2 snj IWM_WRITE(sc,
1045 1.75.2.2 snj IWM_HBUS_TARG_PRPH_RADDR, ((addr & 0x000fffff) | (3 << 24)));
1046 1.75.2.2 snj IWM_BARRIER_READ_WRITE(sc);
1047 1.75.2.2 snj return IWM_READ(sc, IWM_HBUS_TARG_PRPH_RDAT);
1048 1.75.2.2 snj }
1049 1.75.2.2 snj
1050 1.75.2.2 snj static void
1051 1.75.2.2 snj iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1052 1.75.2.2 snj {
1053 1.75.2.2 snj IWM_WRITE(sc,
1054 1.75.2.2 snj IWM_HBUS_TARG_PRPH_WADDR, ((addr & 0x000fffff) | (3 << 24)));
1055 1.75.2.2 snj IWM_BARRIER_WRITE(sc);
1056 1.75.2.2 snj IWM_WRITE(sc, IWM_HBUS_TARG_PRPH_WDAT, val);
1057 1.75.2.2 snj }
1058 1.75.2.2 snj
1059 1.75.2.2 snj #ifdef IWM_DEBUG
1060 1.75.2.2 snj static int
1061 1.75.2.2 snj iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
1062 1.75.2.2 snj {
1063 1.75.2.2 snj int offs;
1064 1.75.2.2 snj uint32_t *vals = buf;
1065 1.75.2.2 snj
1066 1.75.2.2 snj if (iwm_nic_lock(sc)) {
1067 1.75.2.2 snj IWM_WRITE(sc, IWM_HBUS_TARG_MEM_RADDR, addr);
1068 1.75.2.2 snj for (offs = 0; offs < dwords; offs++)
1069 1.75.2.2 snj vals[offs] = IWM_READ(sc, IWM_HBUS_TARG_MEM_RDAT);
1070 1.75.2.2 snj iwm_nic_unlock(sc);
1071 1.75.2.2 snj return 0;
1072 1.75.2.2 snj }
1073 1.75.2.2 snj return EBUSY;
1074 1.75.2.2 snj }
1075 1.75.2.2 snj #endif
1076 1.75.2.2 snj
1077 1.75.2.2 snj static int
1078 1.75.2.2 snj iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
1079 1.75.2.2 snj {
1080 1.75.2.2 snj int offs;
1081 1.75.2.2 snj const uint32_t *vals = buf;
1082 1.75.2.2 snj
1083 1.75.2.2 snj if (iwm_nic_lock(sc)) {
1084 1.75.2.2 snj IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WADDR, addr);
1085 1.75.2.2 snj /* WADDR auto-increments */
1086 1.75.2.2 snj for (offs = 0; offs < dwords; offs++) {
1087 1.75.2.2 snj uint32_t val = vals ? vals[offs] : 0;
1088 1.75.2.2 snj IWM_WRITE(sc, IWM_HBUS_TARG_MEM_WDAT, val);
1089 1.75.2.2 snj }
1090 1.75.2.2 snj iwm_nic_unlock(sc);
1091 1.75.2.2 snj return 0;
1092 1.75.2.2 snj }
1093 1.75.2.2 snj return EBUSY;
1094 1.75.2.2 snj }
1095 1.75.2.2 snj
1096 1.75.2.2 snj static int
1097 1.75.2.2 snj iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
1098 1.75.2.2 snj {
1099 1.75.2.2 snj return iwm_write_mem(sc, addr, &val, 1);
1100 1.75.2.2 snj }
1101 1.75.2.2 snj
1102 1.75.2.2 snj static int
1103 1.75.2.2 snj iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask,
1104 1.75.2.2 snj int timo)
1105 1.75.2.2 snj {
1106 1.75.2.2 snj for (;;) {
1107 1.75.2.2 snj if ((IWM_READ(sc, reg) & mask) == (bits & mask)) {
1108 1.75.2.2 snj return 1;
1109 1.75.2.2 snj }
1110 1.75.2.2 snj if (timo < 10) {
1111 1.75.2.2 snj return 0;
1112 1.75.2.2 snj }
1113 1.75.2.2 snj timo -= 10;
1114 1.75.2.2 snj DELAY(10);
1115 1.75.2.2 snj }
1116 1.75.2.2 snj }
1117 1.75.2.2 snj
1118 1.75.2.2 snj static int
1119 1.75.2.2 snj iwm_nic_lock(struct iwm_softc *sc)
1120 1.75.2.2 snj {
1121 1.75.2.2 snj int rv = 0;
1122 1.75.2.2 snj
1123 1.75.2.2 snj if (sc->sc_cmd_hold_nic_awake)
1124 1.75.2.2 snj return 1;
1125 1.75.2.2 snj
1126 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1127 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1128 1.75.2.2 snj
1129 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
1130 1.75.2.2 snj DELAY(2);
1131 1.75.2.2 snj
1132 1.75.2.2 snj if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1133 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1134 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
1135 1.75.2.2 snj | IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
1136 1.75.2.2 snj rv = 1;
1137 1.75.2.2 snj } else {
1138 1.75.2.2 snj DPRINTF(("%s: resetting device via NMI\n", DEVNAME(sc)));
1139 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
1140 1.75.2.2 snj }
1141 1.75.2.2 snj
1142 1.75.2.2 snj return rv;
1143 1.75.2.2 snj }
1144 1.75.2.2 snj
1145 1.75.2.2 snj static void
1146 1.75.2.2 snj iwm_nic_unlock(struct iwm_softc *sc)
1147 1.75.2.2 snj {
1148 1.75.2.2 snj
1149 1.75.2.2 snj if (sc->sc_cmd_hold_nic_awake)
1150 1.75.2.2 snj return;
1151 1.75.2.2 snj
1152 1.75.2.2 snj IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1153 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1154 1.75.2.2 snj }
1155 1.75.2.2 snj
1156 1.75.2.2 snj static void
1157 1.75.2.2 snj iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits,
1158 1.75.2.2 snj uint32_t mask)
1159 1.75.2.2 snj {
1160 1.75.2.2 snj uint32_t val;
1161 1.75.2.2 snj
1162 1.75.2.2 snj /* XXX: no error path? */
1163 1.75.2.2 snj if (iwm_nic_lock(sc)) {
1164 1.75.2.2 snj val = iwm_read_prph(sc, reg) & mask;
1165 1.75.2.2 snj val |= bits;
1166 1.75.2.2 snj iwm_write_prph(sc, reg, val);
1167 1.75.2.2 snj iwm_nic_unlock(sc);
1168 1.75.2.2 snj }
1169 1.75.2.2 snj }
1170 1.75.2.2 snj
1171 1.75.2.2 snj static void
1172 1.75.2.2 snj iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1173 1.75.2.2 snj {
1174 1.75.2.2 snj iwm_set_bits_mask_prph(sc, reg, bits, ~0);
1175 1.75.2.2 snj }
1176 1.75.2.2 snj
1177 1.75.2.2 snj static void
1178 1.75.2.2 snj iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
1179 1.75.2.2 snj {
1180 1.75.2.2 snj iwm_set_bits_mask_prph(sc, reg, 0, ~bits);
1181 1.75.2.2 snj }
1182 1.75.2.2 snj
1183 1.75.2.2 snj static int
1184 1.75.2.2 snj iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
1185 1.75.2.2 snj bus_size_t size, bus_size_t alignment)
1186 1.75.2.2 snj {
1187 1.75.2.2 snj int nsegs, err;
1188 1.75.2.2 snj void *va;
1189 1.75.2.2 snj
1190 1.75.2.2 snj dma->tag = tag;
1191 1.75.2.2 snj dma->size = size;
1192 1.75.2.2 snj
1193 1.75.2.2 snj err = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1194 1.75.2.2 snj &dma->map);
1195 1.75.2.2 snj if (err)
1196 1.75.2.2 snj goto fail;
1197 1.75.2.2 snj
1198 1.75.2.2 snj err = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1199 1.75.2.2 snj BUS_DMA_NOWAIT);
1200 1.75.2.2 snj if (err)
1201 1.75.2.2 snj goto fail;
1202 1.75.2.2 snj
1203 1.75.2.2 snj err = bus_dmamem_map(tag, &dma->seg, 1, size, &va, BUS_DMA_NOWAIT);
1204 1.75.2.2 snj if (err)
1205 1.75.2.2 snj goto fail;
1206 1.75.2.2 snj dma->vaddr = va;
1207 1.75.2.2 snj
1208 1.75.2.2 snj err = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1209 1.75.2.2 snj BUS_DMA_NOWAIT);
1210 1.75.2.2 snj if (err)
1211 1.75.2.2 snj goto fail;
1212 1.75.2.2 snj
1213 1.75.2.2 snj memset(dma->vaddr, 0, size);
1214 1.75.2.2 snj bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1215 1.75.2.2 snj dma->paddr = dma->map->dm_segs[0].ds_addr;
1216 1.75.2.2 snj
1217 1.75.2.2 snj return 0;
1218 1.75.2.2 snj
1219 1.75.2.2 snj fail: iwm_dma_contig_free(dma);
1220 1.75.2.2 snj return err;
1221 1.75.2.2 snj }
1222 1.75.2.2 snj
1223 1.75.2.2 snj static void
1224 1.75.2.2 snj iwm_dma_contig_free(struct iwm_dma_info *dma)
1225 1.75.2.2 snj {
1226 1.75.2.2 snj if (dma->map != NULL) {
1227 1.75.2.2 snj if (dma->vaddr != NULL) {
1228 1.75.2.2 snj bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1229 1.75.2.2 snj BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1230 1.75.2.2 snj bus_dmamap_unload(dma->tag, dma->map);
1231 1.75.2.2 snj bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1232 1.75.2.2 snj bus_dmamem_free(dma->tag, &dma->seg, 1);
1233 1.75.2.2 snj dma->vaddr = NULL;
1234 1.75.2.2 snj }
1235 1.75.2.2 snj bus_dmamap_destroy(dma->tag, dma->map);
1236 1.75.2.2 snj dma->map = NULL;
1237 1.75.2.2 snj }
1238 1.75.2.2 snj }
1239 1.75.2.2 snj
1240 1.75.2.2 snj static int
1241 1.75.2.2 snj iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1242 1.75.2.2 snj {
1243 1.75.2.2 snj bus_size_t size;
1244 1.75.2.2 snj int i, err;
1245 1.75.2.2 snj
1246 1.75.2.2 snj ring->cur = 0;
1247 1.75.2.2 snj
1248 1.75.2.2 snj /* Allocate RX descriptors (256-byte aligned). */
1249 1.75.2.2 snj size = IWM_RX_RING_COUNT * sizeof(uint32_t);
1250 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1251 1.75.2.2 snj if (err) {
1252 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1253 1.75.2.2 snj "could not allocate RX ring DMA memory\n");
1254 1.75.2.2 snj goto fail;
1255 1.75.2.2 snj }
1256 1.75.2.2 snj ring->desc = ring->desc_dma.vaddr;
1257 1.75.2.2 snj
1258 1.75.2.2 snj /* Allocate RX status area (16-byte aligned). */
1259 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1260 1.75.2.2 snj sizeof(*ring->stat), 16);
1261 1.75.2.2 snj if (err) {
1262 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1263 1.75.2.2 snj "could not allocate RX status DMA memory\n");
1264 1.75.2.2 snj goto fail;
1265 1.75.2.2 snj }
1266 1.75.2.2 snj ring->stat = ring->stat_dma.vaddr;
1267 1.75.2.2 snj
1268 1.75.2.2 snj for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1269 1.75.2.2 snj struct iwm_rx_data *data = &ring->data[i];
1270 1.75.2.2 snj
1271 1.75.2.2 snj memset(data, 0, sizeof(*data));
1272 1.75.2.2 snj err = bus_dmamap_create(sc->sc_dmat, IWM_RBUF_SIZE, 1,
1273 1.75.2.2 snj IWM_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1274 1.75.2.2 snj &data->map);
1275 1.75.2.2 snj if (err) {
1276 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1277 1.75.2.2 snj "could not create RX buf DMA map\n");
1278 1.75.2.2 snj goto fail;
1279 1.75.2.2 snj }
1280 1.75.2.2 snj
1281 1.75.2.2 snj err = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i);
1282 1.75.2.2 snj if (err)
1283 1.75.2.2 snj goto fail;
1284 1.75.2.2 snj }
1285 1.75.2.2 snj return 0;
1286 1.75.2.2 snj
1287 1.75.2.2 snj fail: iwm_free_rx_ring(sc, ring);
1288 1.75.2.2 snj return err;
1289 1.75.2.2 snj }
1290 1.75.2.2 snj
1291 1.75.2.2 snj static void
1292 1.75.2.2 snj iwm_disable_rx_dma(struct iwm_softc *sc)
1293 1.75.2.2 snj {
1294 1.75.2.2 snj int ntries;
1295 1.75.2.2 snj
1296 1.75.2.2 snj if (iwm_nic_lock(sc)) {
1297 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
1298 1.75.2.2 snj for (ntries = 0; ntries < 1000; ntries++) {
1299 1.75.2.2 snj if (IWM_READ(sc, IWM_FH_MEM_RSSR_RX_STATUS_REG) &
1300 1.75.2.2 snj IWM_FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE)
1301 1.75.2.2 snj break;
1302 1.75.2.2 snj DELAY(10);
1303 1.75.2.2 snj }
1304 1.75.2.2 snj iwm_nic_unlock(sc);
1305 1.75.2.2 snj }
1306 1.75.2.2 snj }
1307 1.75.2.2 snj
1308 1.75.2.2 snj void
1309 1.75.2.2 snj iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1310 1.75.2.2 snj {
1311 1.75.2.2 snj ring->cur = 0;
1312 1.75.2.2 snj memset(ring->stat, 0, sizeof(*ring->stat));
1313 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, ring->stat_dma.map, 0,
1314 1.75.2.2 snj ring->stat_dma.size, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1315 1.75.2.2 snj }
1316 1.75.2.2 snj
1317 1.75.2.2 snj static void
1318 1.75.2.2 snj iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1319 1.75.2.2 snj {
1320 1.75.2.2 snj int i;
1321 1.75.2.2 snj
1322 1.75.2.2 snj iwm_dma_contig_free(&ring->desc_dma);
1323 1.75.2.2 snj iwm_dma_contig_free(&ring->stat_dma);
1324 1.75.2.2 snj
1325 1.75.2.2 snj for (i = 0; i < IWM_RX_RING_COUNT; i++) {
1326 1.75.2.2 snj struct iwm_rx_data *data = &ring->data[i];
1327 1.75.2.2 snj
1328 1.75.2.2 snj if (data->m != NULL) {
1329 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1330 1.75.2.2 snj data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1331 1.75.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->map);
1332 1.75.2.2 snj m_freem(data->m);
1333 1.75.2.2 snj data->m = NULL;
1334 1.75.2.2 snj }
1335 1.75.2.2 snj if (data->map != NULL) {
1336 1.75.2.2 snj bus_dmamap_destroy(sc->sc_dmat, data->map);
1337 1.75.2.2 snj data->map = NULL;
1338 1.75.2.2 snj }
1339 1.75.2.2 snj }
1340 1.75.2.2 snj }
1341 1.75.2.2 snj
1342 1.75.2.2 snj static int
1343 1.75.2.2 snj iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1344 1.75.2.2 snj {
1345 1.75.2.2 snj bus_addr_t paddr;
1346 1.75.2.2 snj bus_size_t size;
1347 1.75.2.2 snj int i, err, nsegs;
1348 1.75.2.2 snj
1349 1.75.2.2 snj ring->qid = qid;
1350 1.75.2.2 snj ring->queued = 0;
1351 1.75.2.2 snj ring->cur = 0;
1352 1.75.2.2 snj
1353 1.75.2.2 snj /* Allocate TX descriptors (256-byte aligned). */
1354 1.75.2.2 snj size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1355 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1356 1.75.2.2 snj if (err) {
1357 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1358 1.75.2.2 snj "could not allocate TX ring DMA memory\n");
1359 1.75.2.2 snj goto fail;
1360 1.75.2.2 snj }
1361 1.75.2.2 snj ring->desc = ring->desc_dma.vaddr;
1362 1.75.2.2 snj
1363 1.75.2.2 snj /*
1364 1.75.2.2 snj * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1365 1.75.2.2 snj * to allocate commands space for other rings.
1366 1.75.2.2 snj */
1367 1.75.2.2 snj if (qid > IWM_CMD_QUEUE)
1368 1.75.2.2 snj return 0;
1369 1.75.2.2 snj
1370 1.75.2.2 snj size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1371 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1372 1.75.2.2 snj if (err) {
1373 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1374 1.75.2.2 snj "could not allocate TX cmd DMA memory\n");
1375 1.75.2.2 snj goto fail;
1376 1.75.2.2 snj }
1377 1.75.2.2 snj ring->cmd = ring->cmd_dma.vaddr;
1378 1.75.2.2 snj
1379 1.75.2.2 snj paddr = ring->cmd_dma.paddr;
1380 1.75.2.2 snj for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1381 1.75.2.2 snj struct iwm_tx_data *data = &ring->data[i];
1382 1.75.2.2 snj size_t mapsize;
1383 1.75.2.2 snj
1384 1.75.2.2 snj data->cmd_paddr = paddr;
1385 1.75.2.2 snj data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1386 1.75.2.2 snj + offsetof(struct iwm_tx_cmd, scratch);
1387 1.75.2.2 snj paddr += sizeof(struct iwm_device_cmd);
1388 1.75.2.2 snj
1389 1.75.2.2 snj /* FW commands may require more mapped space than packets. */
1390 1.75.2.2 snj if (qid == IWM_CMD_QUEUE) {
1391 1.75.2.2 snj mapsize = IWM_RBUF_SIZE;
1392 1.75.2.2 snj nsegs = 1;
1393 1.75.2.2 snj } else {
1394 1.75.2.2 snj mapsize = MCLBYTES;
1395 1.75.2.2 snj nsegs = IWM_NUM_OF_TBS - 2;
1396 1.75.2.2 snj }
1397 1.75.2.2 snj err = bus_dmamap_create(sc->sc_dmat, mapsize, nsegs, mapsize,
1398 1.75.2.2 snj 0, BUS_DMA_NOWAIT, &data->map);
1399 1.75.2.2 snj if (err) {
1400 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1401 1.75.2.2 snj "could not create TX buf DMA map\n");
1402 1.75.2.2 snj goto fail;
1403 1.75.2.2 snj }
1404 1.75.2.2 snj }
1405 1.75.2.2 snj KASSERT(paddr == ring->cmd_dma.paddr + size);
1406 1.75.2.2 snj return 0;
1407 1.75.2.2 snj
1408 1.75.2.2 snj fail: iwm_free_tx_ring(sc, ring);
1409 1.75.2.2 snj return err;
1410 1.75.2.2 snj }
1411 1.75.2.2 snj
1412 1.75.2.2 snj static void
1413 1.75.2.2 snj iwm_clear_cmd_in_flight(struct iwm_softc *sc)
1414 1.75.2.2 snj {
1415 1.75.2.2 snj
1416 1.75.2.2 snj if (!sc->apmg_wake_up_wa)
1417 1.75.2.2 snj return;
1418 1.75.2.2 snj
1419 1.75.2.2 snj if (!sc->sc_cmd_hold_nic_awake) {
1420 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1421 1.75.2.2 snj "cmd_hold_nic_awake not set\n");
1422 1.75.2.2 snj return;
1423 1.75.2.2 snj }
1424 1.75.2.2 snj
1425 1.75.2.2 snj sc->sc_cmd_hold_nic_awake = 0;
1426 1.75.2.2 snj IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1427 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1428 1.75.2.2 snj }
1429 1.75.2.2 snj
1430 1.75.2.2 snj static int
1431 1.75.2.2 snj iwm_set_cmd_in_flight(struct iwm_softc *sc)
1432 1.75.2.2 snj {
1433 1.75.2.2 snj int ret;
1434 1.75.2.2 snj
1435 1.75.2.2 snj /*
1436 1.75.2.2 snj * wake up the NIC to make sure that the firmware will see the host
1437 1.75.2.2 snj * command - we will let the NIC sleep once all the host commands
1438 1.75.2.2 snj * returned. This needs to be done only on NICs that have
1439 1.75.2.2 snj * apmg_wake_up_wa set.
1440 1.75.2.2 snj */
1441 1.75.2.2 snj if (sc->apmg_wake_up_wa && !sc->sc_cmd_hold_nic_awake) {
1442 1.75.2.2 snj
1443 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
1444 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1445 1.75.2.2 snj
1446 1.75.2.2 snj ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1447 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1448 1.75.2.2 snj (IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1449 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1450 1.75.2.2 snj 15000);
1451 1.75.2.2 snj if (ret == 0) {
1452 1.75.2.2 snj IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1453 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1454 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1455 1.75.2.2 snj "failed to wake NIC for hcmd\n");
1456 1.75.2.2 snj return EIO;
1457 1.75.2.2 snj }
1458 1.75.2.2 snj sc->sc_cmd_hold_nic_awake = 1;
1459 1.75.2.2 snj }
1460 1.75.2.2 snj
1461 1.75.2.2 snj return 0;
1462 1.75.2.2 snj }
1463 1.75.2.2 snj static void
1464 1.75.2.2 snj iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1465 1.75.2.2 snj {
1466 1.75.2.2 snj int i;
1467 1.75.2.2 snj
1468 1.75.2.2 snj for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1469 1.75.2.2 snj struct iwm_tx_data *data = &ring->data[i];
1470 1.75.2.2 snj
1471 1.75.2.2 snj if (data->m != NULL) {
1472 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1473 1.75.2.2 snj data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1474 1.75.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->map);
1475 1.75.2.2 snj m_freem(data->m);
1476 1.75.2.2 snj data->m = NULL;
1477 1.75.2.2 snj }
1478 1.75.2.2 snj }
1479 1.75.2.2 snj /* Clear TX descriptors. */
1480 1.75.2.2 snj memset(ring->desc, 0, ring->desc_dma.size);
1481 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1482 1.75.2.2 snj ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1483 1.75.2.2 snj sc->qfullmsk &= ~(1 << ring->qid);
1484 1.75.2.2 snj ring->queued = 0;
1485 1.75.2.2 snj ring->cur = 0;
1486 1.75.2.2 snj
1487 1.75.2.2 snj if (ring->qid == IWM_CMD_QUEUE && sc->sc_cmd_hold_nic_awake)
1488 1.75.2.2 snj iwm_clear_cmd_in_flight(sc);
1489 1.75.2.2 snj }
1490 1.75.2.2 snj
1491 1.75.2.2 snj static void
1492 1.75.2.2 snj iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1493 1.75.2.2 snj {
1494 1.75.2.2 snj int i;
1495 1.75.2.2 snj
1496 1.75.2.2 snj iwm_dma_contig_free(&ring->desc_dma);
1497 1.75.2.2 snj iwm_dma_contig_free(&ring->cmd_dma);
1498 1.75.2.2 snj
1499 1.75.2.2 snj for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1500 1.75.2.2 snj struct iwm_tx_data *data = &ring->data[i];
1501 1.75.2.2 snj
1502 1.75.2.2 snj if (data->m != NULL) {
1503 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1504 1.75.2.2 snj data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1505 1.75.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->map);
1506 1.75.2.2 snj m_freem(data->m);
1507 1.75.2.2 snj data->m = NULL;
1508 1.75.2.2 snj }
1509 1.75.2.2 snj if (data->map != NULL) {
1510 1.75.2.2 snj bus_dmamap_destroy(sc->sc_dmat, data->map);
1511 1.75.2.2 snj data->map = NULL;
1512 1.75.2.2 snj }
1513 1.75.2.2 snj }
1514 1.75.2.2 snj }
1515 1.75.2.2 snj
1516 1.75.2.2 snj static void
1517 1.75.2.2 snj iwm_enable_rfkill_int(struct iwm_softc *sc)
1518 1.75.2.2 snj {
1519 1.75.2.2 snj sc->sc_intmask = IWM_CSR_INT_BIT_RF_KILL;
1520 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1521 1.75.2.2 snj }
1522 1.75.2.2 snj
1523 1.75.2.2 snj static int
1524 1.75.2.2 snj iwm_check_rfkill(struct iwm_softc *sc)
1525 1.75.2.2 snj {
1526 1.75.2.2 snj uint32_t v;
1527 1.75.2.2 snj int s;
1528 1.75.2.2 snj int rv;
1529 1.75.2.2 snj
1530 1.75.2.2 snj s = splnet();
1531 1.75.2.2 snj
1532 1.75.2.2 snj /*
1533 1.75.2.2 snj * "documentation" is not really helpful here:
1534 1.75.2.2 snj * 27: HW_RF_KILL_SW
1535 1.75.2.2 snj * Indicates state of (platform's) hardware RF-Kill switch
1536 1.75.2.2 snj *
1537 1.75.2.2 snj * But apparently when it's off, it's on ...
1538 1.75.2.2 snj */
1539 1.75.2.2 snj v = IWM_READ(sc, IWM_CSR_GP_CNTRL);
1540 1.75.2.2 snj rv = (v & IWM_CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) == 0;
1541 1.75.2.2 snj if (rv) {
1542 1.75.2.2 snj sc->sc_flags |= IWM_FLAG_RFKILL;
1543 1.75.2.2 snj } else {
1544 1.75.2.2 snj sc->sc_flags &= ~IWM_FLAG_RFKILL;
1545 1.75.2.2 snj }
1546 1.75.2.2 snj
1547 1.75.2.2 snj splx(s);
1548 1.75.2.2 snj return rv;
1549 1.75.2.2 snj }
1550 1.75.2.2 snj
1551 1.75.2.2 snj static void
1552 1.75.2.2 snj iwm_enable_interrupts(struct iwm_softc *sc)
1553 1.75.2.2 snj {
1554 1.75.2.2 snj sc->sc_intmask = IWM_CSR_INI_SET_MASK;
1555 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1556 1.75.2.2 snj }
1557 1.75.2.2 snj
1558 1.75.2.2 snj static void
1559 1.75.2.2 snj iwm_restore_interrupts(struct iwm_softc *sc)
1560 1.75.2.2 snj {
1561 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT_MASK, sc->sc_intmask);
1562 1.75.2.2 snj }
1563 1.75.2.2 snj
1564 1.75.2.2 snj static void
1565 1.75.2.2 snj iwm_disable_interrupts(struct iwm_softc *sc)
1566 1.75.2.2 snj {
1567 1.75.2.2 snj int s = splnet();
1568 1.75.2.2 snj
1569 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
1570 1.75.2.2 snj
1571 1.75.2.2 snj /* acknowledge all interrupts */
1572 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT, ~0);
1573 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, ~0);
1574 1.75.2.2 snj
1575 1.75.2.2 snj splx(s);
1576 1.75.2.2 snj }
1577 1.75.2.2 snj
1578 1.75.2.2 snj static void
1579 1.75.2.2 snj iwm_ict_reset(struct iwm_softc *sc)
1580 1.75.2.2 snj {
1581 1.75.2.2 snj iwm_disable_interrupts(sc);
1582 1.75.2.2 snj
1583 1.75.2.2 snj memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1584 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map, 0, IWM_ICT_SIZE,
1585 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
1586 1.75.2.2 snj sc->ict_cur = 0;
1587 1.75.2.2 snj
1588 1.75.2.2 snj /* Set physical address of ICT (4KB aligned). */
1589 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
1590 1.75.2.2 snj IWM_CSR_DRAM_INT_TBL_ENABLE
1591 1.75.2.2 snj | IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
1592 1.75.2.2 snj | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
1593 1.75.2.2 snj | sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
1594 1.75.2.2 snj
1595 1.75.2.2 snj /* Switch to ICT interrupt mode in driver. */
1596 1.75.2.2 snj sc->sc_flags |= IWM_FLAG_USE_ICT;
1597 1.75.2.2 snj
1598 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT, ~0);
1599 1.75.2.2 snj iwm_enable_interrupts(sc);
1600 1.75.2.2 snj }
1601 1.75.2.2 snj
1602 1.75.2.2 snj #define IWM_HW_READY_TIMEOUT 50
1603 1.75.2.2 snj static int
1604 1.75.2.2 snj iwm_set_hw_ready(struct iwm_softc *sc)
1605 1.75.2.2 snj {
1606 1.75.2.2 snj int ready;
1607 1.75.2.2 snj
1608 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1609 1.75.2.2 snj IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
1610 1.75.2.2 snj
1611 1.75.2.2 snj ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
1612 1.75.2.2 snj IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1613 1.75.2.2 snj IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
1614 1.75.2.2 snj IWM_HW_READY_TIMEOUT);
1615 1.75.2.2 snj if (ready)
1616 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
1617 1.75.2.2 snj IWM_CSR_MBOX_SET_REG_OS_ALIVE);
1618 1.75.2.2 snj
1619 1.75.2.2 snj return ready;
1620 1.75.2.2 snj }
1621 1.75.2.2 snj #undef IWM_HW_READY_TIMEOUT
1622 1.75.2.2 snj
1623 1.75.2.2 snj static int
1624 1.75.2.2 snj iwm_prepare_card_hw(struct iwm_softc *sc)
1625 1.75.2.2 snj {
1626 1.75.2.2 snj int t = 0;
1627 1.75.2.2 snj
1628 1.75.2.2 snj if (iwm_set_hw_ready(sc))
1629 1.75.2.2 snj return 0;
1630 1.75.2.2 snj
1631 1.75.2.2 snj DELAY(100);
1632 1.75.2.2 snj
1633 1.75.2.2 snj /* If HW is not ready, prepare the conditions to check again */
1634 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1635 1.75.2.2 snj IWM_CSR_HW_IF_CONFIG_REG_PREPARE);
1636 1.75.2.2 snj
1637 1.75.2.2 snj do {
1638 1.75.2.2 snj if (iwm_set_hw_ready(sc))
1639 1.75.2.2 snj return 0;
1640 1.75.2.2 snj DELAY(200);
1641 1.75.2.2 snj t += 200;
1642 1.75.2.2 snj } while (t < 150000);
1643 1.75.2.2 snj
1644 1.75.2.2 snj return ETIMEDOUT;
1645 1.75.2.2 snj }
1646 1.75.2.2 snj
1647 1.75.2.2 snj static void
1648 1.75.2.2 snj iwm_apm_config(struct iwm_softc *sc)
1649 1.75.2.2 snj {
1650 1.75.2.2 snj pcireg_t reg;
1651 1.75.2.2 snj
1652 1.75.2.2 snj reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
1653 1.75.2.2 snj sc->sc_cap_off + PCIE_LCSR);
1654 1.75.2.2 snj if (reg & PCIE_LCSR_ASPM_L1) {
1655 1.75.2.2 snj /* Um the Linux driver prints "Disabling L0S for this one ... */
1656 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_GIO_REG,
1657 1.75.2.2 snj IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1658 1.75.2.2 snj } else {
1659 1.75.2.2 snj /* ... and "Enabling" here */
1660 1.75.2.2 snj IWM_CLRBITS(sc, IWM_CSR_GIO_REG,
1661 1.75.2.2 snj IWM_CSR_GIO_REG_VAL_L0S_ENABLED);
1662 1.75.2.2 snj }
1663 1.75.2.2 snj }
1664 1.75.2.2 snj
1665 1.75.2.2 snj /*
1666 1.75.2.2 snj * Start up NIC's basic functionality after it has been reset
1667 1.75.2.2 snj * e.g. after platform boot or shutdown.
1668 1.75.2.2 snj * NOTE: This does not load uCode nor start the embedded processor
1669 1.75.2.2 snj */
1670 1.75.2.2 snj static int
1671 1.75.2.2 snj iwm_apm_init(struct iwm_softc *sc)
1672 1.75.2.2 snj {
1673 1.75.2.2 snj int err = 0;
1674 1.75.2.2 snj
1675 1.75.2.2 snj /* Disable L0S exit timer (platform NMI workaround) */
1676 1.75.2.2 snj if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
1677 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1678 1.75.2.2 snj IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1679 1.75.2.2 snj }
1680 1.75.2.2 snj
1681 1.75.2.2 snj /*
1682 1.75.2.2 snj * Disable L0s without affecting L1;
1683 1.75.2.2 snj * don't wait for ICH L0s (ICH bug W/A)
1684 1.75.2.2 snj */
1685 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
1686 1.75.2.2 snj IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1687 1.75.2.2 snj
1688 1.75.2.2 snj /* Set FH wait threshold to maximum (HW error during stress W/A) */
1689 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_DBG_HPET_MEM_REG, IWM_CSR_DBG_HPET_MEM_REG_VAL);
1690 1.75.2.2 snj
1691 1.75.2.2 snj /*
1692 1.75.2.2 snj * Enable HAP INTA (interrupt from management bus) to
1693 1.75.2.2 snj * wake device's PCI Express link L1a -> L0s
1694 1.75.2.2 snj */
1695 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
1696 1.75.2.2 snj IWM_CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1697 1.75.2.2 snj
1698 1.75.2.2 snj iwm_apm_config(sc);
1699 1.75.2.2 snj
1700 1.75.2.2 snj #if 0 /* not for 7k/8k */
1701 1.75.2.2 snj /* Configure analog phase-lock-loop before activating to D0A */
1702 1.75.2.2 snj if (trans->cfg->base_params->pll_cfg_val)
1703 1.75.2.2 snj IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
1704 1.75.2.2 snj trans->cfg->base_params->pll_cfg_val);
1705 1.75.2.2 snj #endif
1706 1.75.2.2 snj
1707 1.75.2.2 snj /*
1708 1.75.2.2 snj * Set "initialization complete" bit to move adapter from
1709 1.75.2.2 snj * D0U* --> D0A* (powered-up active) state.
1710 1.75.2.2 snj */
1711 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_GP_CNTRL, IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1712 1.75.2.2 snj
1713 1.75.2.2 snj /*
1714 1.75.2.2 snj * Wait for clock stabilization; once stabilized, access to
1715 1.75.2.2 snj * device-internal resources is supported, e.g. iwm_write_prph()
1716 1.75.2.2 snj * and accesses to uCode SRAM.
1717 1.75.2.2 snj */
1718 1.75.2.2 snj if (!iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
1719 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1720 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000)) {
1721 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
1722 1.75.2.2 snj "timeout waiting for clock stabilization\n");
1723 1.75.2.2 snj err = ETIMEDOUT;
1724 1.75.2.2 snj goto out;
1725 1.75.2.2 snj }
1726 1.75.2.2 snj
1727 1.75.2.2 snj if (sc->host_interrupt_operation_mode) {
1728 1.75.2.2 snj /*
1729 1.75.2.2 snj * This is a bit of an abuse - This is needed for 7260 / 3160
1730 1.75.2.2 snj * only check host_interrupt_operation_mode even if this is
1731 1.75.2.2 snj * not related to host_interrupt_operation_mode.
1732 1.75.2.2 snj *
1733 1.75.2.2 snj * Enable the oscillator to count wake up time for L1 exit. This
1734 1.75.2.2 snj * consumes slightly more power (100uA) - but allows to be sure
1735 1.75.2.2 snj * that we wake up from L1 on time.
1736 1.75.2.2 snj *
1737 1.75.2.2 snj * This looks weird: read twice the same register, discard the
1738 1.75.2.2 snj * value, set a bit, and yet again, read that same register
1739 1.75.2.2 snj * just to discard the value. But that's the way the hardware
1740 1.75.2.2 snj * seems to like it.
1741 1.75.2.2 snj */
1742 1.75.2.2 snj iwm_read_prph(sc, IWM_OSC_CLK);
1743 1.75.2.2 snj iwm_read_prph(sc, IWM_OSC_CLK);
1744 1.75.2.2 snj iwm_set_bits_prph(sc, IWM_OSC_CLK, IWM_OSC_CLK_FORCE_CONTROL);
1745 1.75.2.2 snj iwm_read_prph(sc, IWM_OSC_CLK);
1746 1.75.2.2 snj iwm_read_prph(sc, IWM_OSC_CLK);
1747 1.75.2.2 snj }
1748 1.75.2.2 snj
1749 1.75.2.2 snj /*
1750 1.75.2.2 snj * Enable DMA clock and wait for it to stabilize.
1751 1.75.2.2 snj *
1752 1.75.2.2 snj * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1753 1.75.2.2 snj * do not disable clocks. This preserves any hardware bits already
1754 1.75.2.2 snj * set by default in "CLK_CTRL_REG" after reset.
1755 1.75.2.2 snj */
1756 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1757 1.75.2.2 snj iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
1758 1.75.2.2 snj IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1759 1.75.2.2 snj DELAY(20);
1760 1.75.2.2 snj
1761 1.75.2.2 snj /* Disable L1-Active */
1762 1.75.2.2 snj iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
1763 1.75.2.2 snj IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1764 1.75.2.2 snj
1765 1.75.2.2 snj /* Clear the interrupt in APMG if the NIC is in RFKILL */
1766 1.75.2.2 snj iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
1767 1.75.2.2 snj IWM_APMG_RTC_INT_STT_RFKILL);
1768 1.75.2.2 snj }
1769 1.75.2.2 snj out:
1770 1.75.2.2 snj if (err)
1771 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "apm init error %d\n", err);
1772 1.75.2.2 snj return err;
1773 1.75.2.2 snj }
1774 1.75.2.2 snj
1775 1.75.2.2 snj static void
1776 1.75.2.2 snj iwm_apm_stop(struct iwm_softc *sc)
1777 1.75.2.2 snj {
1778 1.75.2.2 snj /* stop device's busmaster DMA activity */
1779 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_STOP_MASTER);
1780 1.75.2.2 snj
1781 1.75.2.2 snj if (!iwm_poll_bit(sc, IWM_CSR_RESET,
1782 1.75.2.2 snj IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED,
1783 1.75.2.2 snj IWM_CSR_RESET_REG_FLAG_MASTER_DISABLED, 100))
1784 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "timeout waiting for master\n");
1785 1.75.2.2 snj DPRINTF(("iwm apm stop\n"));
1786 1.75.2.2 snj }
1787 1.75.2.2 snj
1788 1.75.2.2 snj static int
1789 1.75.2.2 snj iwm_start_hw(struct iwm_softc *sc)
1790 1.75.2.2 snj {
1791 1.75.2.2 snj int err;
1792 1.75.2.2 snj
1793 1.75.2.2 snj err = iwm_prepare_card_hw(sc);
1794 1.75.2.2 snj if (err)
1795 1.75.2.2 snj return err;
1796 1.75.2.2 snj
1797 1.75.2.2 snj /* Reset the entire device */
1798 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1799 1.75.2.2 snj DELAY(10);
1800 1.75.2.2 snj
1801 1.75.2.2 snj err = iwm_apm_init(sc);
1802 1.75.2.2 snj if (err)
1803 1.75.2.2 snj return err;
1804 1.75.2.2 snj
1805 1.75.2.2 snj iwm_enable_rfkill_int(sc);
1806 1.75.2.2 snj iwm_check_rfkill(sc);
1807 1.75.2.2 snj
1808 1.75.2.2 snj return 0;
1809 1.75.2.2 snj }
1810 1.75.2.2 snj
1811 1.75.2.2 snj static void
1812 1.75.2.2 snj iwm_stop_device(struct iwm_softc *sc)
1813 1.75.2.2 snj {
1814 1.75.2.2 snj int chnl, ntries;
1815 1.75.2.2 snj int qid;
1816 1.75.2.2 snj
1817 1.75.2.2 snj iwm_disable_interrupts(sc);
1818 1.75.2.2 snj sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1819 1.75.2.2 snj
1820 1.75.2.2 snj /* Deactivate TX scheduler. */
1821 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1822 1.75.2.2 snj
1823 1.75.2.2 snj /* Stop all DMA channels. */
1824 1.75.2.2 snj if (iwm_nic_lock(sc)) {
1825 1.75.2.2 snj for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1826 1.75.2.2 snj IWM_WRITE(sc,
1827 1.75.2.2 snj IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
1828 1.75.2.2 snj for (ntries = 0; ntries < 200; ntries++) {
1829 1.75.2.2 snj uint32_t r;
1830 1.75.2.2 snj
1831 1.75.2.2 snj r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
1832 1.75.2.2 snj if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
1833 1.75.2.2 snj chnl))
1834 1.75.2.2 snj break;
1835 1.75.2.2 snj DELAY(20);
1836 1.75.2.2 snj }
1837 1.75.2.2 snj }
1838 1.75.2.2 snj iwm_nic_unlock(sc);
1839 1.75.2.2 snj }
1840 1.75.2.2 snj iwm_disable_rx_dma(sc);
1841 1.75.2.2 snj
1842 1.75.2.2 snj iwm_reset_rx_ring(sc, &sc->rxq);
1843 1.75.2.2 snj
1844 1.75.2.2 snj for (qid = 0; qid < __arraycount(sc->txq); qid++)
1845 1.75.2.2 snj iwm_reset_tx_ring(sc, &sc->txq[qid]);
1846 1.75.2.2 snj
1847 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1848 1.75.2.2 snj /* Power-down device's busmaster DMA clocks */
1849 1.75.2.2 snj if (iwm_nic_lock(sc)) {
1850 1.75.2.2 snj iwm_write_prph(sc, IWM_APMG_CLK_DIS_REG,
1851 1.75.2.2 snj IWM_APMG_CLK_VAL_DMA_CLK_RQT);
1852 1.75.2.2 snj DELAY(5);
1853 1.75.2.2 snj iwm_nic_unlock(sc);
1854 1.75.2.2 snj }
1855 1.75.2.2 snj }
1856 1.75.2.2 snj
1857 1.75.2.2 snj /* Make sure (redundant) we've released our request to stay awake */
1858 1.75.2.2 snj IWM_CLRBITS(sc, IWM_CSR_GP_CNTRL,
1859 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1860 1.75.2.2 snj
1861 1.75.2.2 snj /* Stop the device, and put it in low power state */
1862 1.75.2.2 snj iwm_apm_stop(sc);
1863 1.75.2.2 snj
1864 1.75.2.2 snj /*
1865 1.75.2.2 snj * Upon stop, the APM issues an interrupt if HW RF kill is set.
1866 1.75.2.2 snj * Clean again the interrupt here
1867 1.75.2.2 snj */
1868 1.75.2.2 snj iwm_disable_interrupts(sc);
1869 1.75.2.2 snj
1870 1.75.2.2 snj /* Reset the on-board processor. */
1871 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
1872 1.75.2.2 snj
1873 1.75.2.2 snj /* Even though we stop the HW we still want the RF kill interrupt. */
1874 1.75.2.2 snj iwm_enable_rfkill_int(sc);
1875 1.75.2.2 snj iwm_check_rfkill(sc);
1876 1.75.2.2 snj }
1877 1.75.2.2 snj
1878 1.75.2.2 snj static void
1879 1.75.2.2 snj iwm_nic_config(struct iwm_softc *sc)
1880 1.75.2.2 snj {
1881 1.75.2.2 snj uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1882 1.75.2.2 snj uint32_t reg_val = 0;
1883 1.75.2.2 snj
1884 1.75.2.2 snj radio_cfg_type = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1885 1.75.2.2 snj IWM_FW_PHY_CFG_RADIO_TYPE_POS;
1886 1.75.2.2 snj radio_cfg_step = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1887 1.75.2.2 snj IWM_FW_PHY_CFG_RADIO_STEP_POS;
1888 1.75.2.2 snj radio_cfg_dash = (sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1889 1.75.2.2 snj IWM_FW_PHY_CFG_RADIO_DASH_POS;
1890 1.75.2.2 snj
1891 1.75.2.2 snj reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1892 1.75.2.2 snj IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP;
1893 1.75.2.2 snj reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1894 1.75.2.2 snj IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH;
1895 1.75.2.2 snj
1896 1.75.2.2 snj /* radio configuration */
1897 1.75.2.2 snj reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1898 1.75.2.2 snj reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1899 1.75.2.2 snj reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1900 1.75.2.2 snj
1901 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_HW_IF_CONFIG_REG, reg_val);
1902 1.75.2.2 snj
1903 1.75.2.2 snj DPRINTF(("Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1904 1.75.2.2 snj radio_cfg_step, radio_cfg_dash));
1905 1.75.2.2 snj
1906 1.75.2.2 snj /*
1907 1.75.2.2 snj * W/A : NIC is stuck in a reset state after Early PCIe power off
1908 1.75.2.2 snj * (PCIe power is lost before PERST# is asserted), causing ME FW
1909 1.75.2.2 snj * to lose ownership and not being able to obtain it back.
1910 1.75.2.2 snj */
1911 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
1912 1.75.2.2 snj iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
1913 1.75.2.2 snj IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
1914 1.75.2.2 snj ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
1915 1.75.2.2 snj }
1916 1.75.2.2 snj }
1917 1.75.2.2 snj
1918 1.75.2.2 snj static int
1919 1.75.2.2 snj iwm_nic_rx_init(struct iwm_softc *sc)
1920 1.75.2.2 snj {
1921 1.75.2.2 snj if (!iwm_nic_lock(sc))
1922 1.75.2.2 snj return EBUSY;
1923 1.75.2.2 snj
1924 1.75.2.2 snj memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1925 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
1926 1.75.2.2 snj 0, sc->rxq.stat_dma.size,
1927 1.75.2.2 snj BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1928 1.75.2.2 snj
1929 1.75.2.2 snj iwm_disable_rx_dma(sc);
1930 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
1931 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
1932 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RDPTR, 0);
1933 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
1934 1.75.2.2 snj
1935 1.75.2.2 snj /* Set physical address of RX ring (256-byte aligned). */
1936 1.75.2.2 snj IWM_WRITE(sc,
1937 1.75.2.2 snj IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG, sc->rxq.desc_dma.paddr >> 8);
1938 1.75.2.2 snj
1939 1.75.2.2 snj /* Set physical address of RX status (16-byte aligned). */
1940 1.75.2.2 snj IWM_WRITE(sc,
1941 1.75.2.2 snj IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
1942 1.75.2.2 snj
1943 1.75.2.2 snj /* Enable RX. */
1944 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
1945 1.75.2.2 snj IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
1946 1.75.2.2 snj IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
1947 1.75.2.2 snj IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
1948 1.75.2.2 snj IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
1949 1.75.2.2 snj IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
1950 1.75.2.2 snj (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
1951 1.75.2.2 snj IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
1952 1.75.2.2 snj
1953 1.75.2.2 snj IWM_WRITE_1(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_TIMEOUT_DEF);
1954 1.75.2.2 snj
1955 1.75.2.2 snj /* W/A for interrupt coalescing bug in 7260 and 3160 */
1956 1.75.2.2 snj if (sc->host_interrupt_operation_mode)
1957 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_INT_COALESCING, IWM_HOST_INT_OPER_MODE);
1958 1.75.2.2 snj
1959 1.75.2.2 snj /*
1960 1.75.2.2 snj * This value should initially be 0 (before preparing any RBs),
1961 1.75.2.2 snj * and should be 8 after preparing the first 8 RBs (for example).
1962 1.75.2.2 snj */
1963 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
1964 1.75.2.2 snj
1965 1.75.2.2 snj iwm_nic_unlock(sc);
1966 1.75.2.2 snj
1967 1.75.2.2 snj return 0;
1968 1.75.2.2 snj }
1969 1.75.2.2 snj
1970 1.75.2.2 snj static int
1971 1.75.2.2 snj iwm_nic_tx_init(struct iwm_softc *sc)
1972 1.75.2.2 snj {
1973 1.75.2.2 snj int qid;
1974 1.75.2.2 snj
1975 1.75.2.2 snj if (!iwm_nic_lock(sc))
1976 1.75.2.2 snj return EBUSY;
1977 1.75.2.2 snj
1978 1.75.2.2 snj /* Deactivate TX scheduler. */
1979 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
1980 1.75.2.2 snj
1981 1.75.2.2 snj /* Set physical address of "keep warm" page (16-byte aligned). */
1982 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_KW_MEM_ADDR_REG, sc->kw_dma.paddr >> 4);
1983 1.75.2.2 snj
1984 1.75.2.2 snj for (qid = 0; qid < __arraycount(sc->txq); qid++) {
1985 1.75.2.2 snj struct iwm_tx_ring *txq = &sc->txq[qid];
1986 1.75.2.2 snj
1987 1.75.2.2 snj /* Set physical address of TX ring (256-byte aligned). */
1988 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_MEM_CBBC_QUEUE(qid),
1989 1.75.2.2 snj txq->desc_dma.paddr >> 8);
1990 1.75.2.2 snj DPRINTF(("loading ring %d descriptors (%p) at %"PRIxMAX"\n",
1991 1.75.2.2 snj qid, txq->desc, (uintmax_t)(txq->desc_dma.paddr >> 8)));
1992 1.75.2.2 snj }
1993 1.75.2.2 snj
1994 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1995 1.75.2.2 snj
1996 1.75.2.2 snj iwm_nic_unlock(sc);
1997 1.75.2.2 snj
1998 1.75.2.2 snj return 0;
1999 1.75.2.2 snj }
2000 1.75.2.2 snj
2001 1.75.2.2 snj static int
2002 1.75.2.2 snj iwm_nic_init(struct iwm_softc *sc)
2003 1.75.2.2 snj {
2004 1.75.2.2 snj int err;
2005 1.75.2.2 snj
2006 1.75.2.2 snj iwm_apm_init(sc);
2007 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
2008 1.75.2.2 snj iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
2009 1.75.2.2 snj IWM_APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
2010 1.75.2.2 snj ~IWM_APMG_PS_CTRL_MSK_PWR_SRC);
2011 1.75.2.2 snj }
2012 1.75.2.2 snj
2013 1.75.2.2 snj iwm_nic_config(sc);
2014 1.75.2.2 snj
2015 1.75.2.2 snj err = iwm_nic_rx_init(sc);
2016 1.75.2.2 snj if (err)
2017 1.75.2.2 snj return err;
2018 1.75.2.2 snj
2019 1.75.2.2 snj err = iwm_nic_tx_init(sc);
2020 1.75.2.2 snj if (err)
2021 1.75.2.2 snj return err;
2022 1.75.2.2 snj
2023 1.75.2.2 snj DPRINTF(("shadow registers enabled\n"));
2024 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_MAC_SHADOW_REG_CTRL, 0x800fffff);
2025 1.75.2.2 snj
2026 1.75.2.2 snj return 0;
2027 1.75.2.2 snj }
2028 1.75.2.2 snj
2029 1.75.2.2 snj static const uint8_t iwm_ac_to_tx_fifo[] = {
2030 1.75.2.2 snj IWM_TX_FIFO_VO,
2031 1.75.2.2 snj IWM_TX_FIFO_VI,
2032 1.75.2.2 snj IWM_TX_FIFO_BE,
2033 1.75.2.2 snj IWM_TX_FIFO_BK,
2034 1.75.2.2 snj };
2035 1.75.2.2 snj
2036 1.75.2.2 snj static int
2037 1.75.2.2 snj iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
2038 1.75.2.2 snj {
2039 1.75.2.2 snj if (!iwm_nic_lock(sc)) {
2040 1.75.2.2 snj DPRINTF(("%s: cannot enable txq %d\n", DEVNAME(sc), qid));
2041 1.75.2.2 snj return EBUSY;
2042 1.75.2.2 snj }
2043 1.75.2.2 snj
2044 1.75.2.2 snj IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
2045 1.75.2.2 snj
2046 1.75.2.2 snj if (qid == IWM_CMD_QUEUE) {
2047 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2048 1.75.2.2 snj (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
2049 1.75.2.2 snj | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
2050 1.75.2.2 snj
2051 1.75.2.2 snj iwm_nic_unlock(sc);
2052 1.75.2.2 snj
2053 1.75.2.2 snj iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
2054 1.75.2.2 snj
2055 1.75.2.2 snj if (!iwm_nic_lock(sc))
2056 1.75.2.2 snj return EBUSY;
2057 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
2058 1.75.2.2 snj iwm_nic_unlock(sc);
2059 1.75.2.2 snj
2060 1.75.2.2 snj iwm_write_mem32(sc,
2061 1.75.2.2 snj sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
2062 1.75.2.2 snj
2063 1.75.2.2 snj /* Set scheduler window size and frame limit. */
2064 1.75.2.2 snj iwm_write_mem32(sc,
2065 1.75.2.2 snj sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
2066 1.75.2.2 snj sizeof(uint32_t),
2067 1.75.2.2 snj ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
2068 1.75.2.2 snj IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
2069 1.75.2.2 snj ((IWM_FRAME_LIMIT
2070 1.75.2.2 snj << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
2071 1.75.2.2 snj IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
2072 1.75.2.2 snj
2073 1.75.2.2 snj if (!iwm_nic_lock(sc))
2074 1.75.2.2 snj return EBUSY;
2075 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
2076 1.75.2.2 snj (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
2077 1.75.2.2 snj (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
2078 1.75.2.2 snj (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
2079 1.75.2.2 snj IWM_SCD_QUEUE_STTS_REG_MSK);
2080 1.75.2.2 snj } else {
2081 1.75.2.2 snj struct iwm_scd_txq_cfg_cmd cmd;
2082 1.75.2.2 snj int err;
2083 1.75.2.2 snj
2084 1.75.2.2 snj iwm_nic_unlock(sc);
2085 1.75.2.2 snj
2086 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
2087 1.75.2.2 snj cmd.scd_queue = qid;
2088 1.75.2.2 snj cmd.enable = 1;
2089 1.75.2.2 snj cmd.sta_id = sta_id;
2090 1.75.2.2 snj cmd.tx_fifo = fifo;
2091 1.75.2.2 snj cmd.aggregate = 0;
2092 1.75.2.2 snj cmd.window = IWM_FRAME_LIMIT;
2093 1.75.2.2 snj
2094 1.75.2.2 snj err = iwm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, 0, sizeof(cmd),
2095 1.75.2.2 snj &cmd);
2096 1.75.2.2 snj if (err)
2097 1.75.2.2 snj return err;
2098 1.75.2.2 snj
2099 1.75.2.2 snj if (!iwm_nic_lock(sc))
2100 1.75.2.2 snj return EBUSY;
2101 1.75.2.2 snj }
2102 1.75.2.2 snj
2103 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_EN_CTRL,
2104 1.75.2.2 snj iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
2105 1.75.2.2 snj
2106 1.75.2.2 snj iwm_nic_unlock(sc);
2107 1.75.2.2 snj
2108 1.75.2.2 snj DPRINTF(("enabled txq %d FIFO %d\n", qid, fifo));
2109 1.75.2.2 snj
2110 1.75.2.2 snj return 0;
2111 1.75.2.2 snj }
2112 1.75.2.2 snj
2113 1.75.2.2 snj static int
2114 1.75.2.2 snj iwm_post_alive(struct iwm_softc *sc)
2115 1.75.2.2 snj {
2116 1.75.2.2 snj int nwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
2117 1.75.2.2 snj IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
2118 1.75.2.2 snj int err, chnl;
2119 1.75.2.2 snj uint32_t base;
2120 1.75.2.2 snj
2121 1.75.2.2 snj if (!iwm_nic_lock(sc))
2122 1.75.2.2 snj return EBUSY;
2123 1.75.2.2 snj
2124 1.75.2.2 snj base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
2125 1.75.2.2 snj if (sc->sched_base != base) {
2126 1.75.2.2 snj DPRINTF(("%s: sched addr mismatch: 0x%08x != 0x%08x\n",
2127 1.75.2.2 snj DEVNAME(sc), sc->sched_base, base));
2128 1.75.2.2 snj sc->sched_base = base;
2129 1.75.2.2 snj }
2130 1.75.2.2 snj
2131 1.75.2.2 snj iwm_nic_unlock(sc);
2132 1.75.2.2 snj
2133 1.75.2.2 snj iwm_ict_reset(sc);
2134 1.75.2.2 snj
2135 1.75.2.2 snj /* Clear TX scheduler state in SRAM. */
2136 1.75.2.2 snj err = iwm_write_mem(sc,
2137 1.75.2.2 snj sc->sched_base + IWM_SCD_CONTEXT_MEM_LOWER_BOUND, NULL, nwords);
2138 1.75.2.2 snj if (err)
2139 1.75.2.2 snj return err;
2140 1.75.2.2 snj
2141 1.75.2.2 snj if (!iwm_nic_lock(sc))
2142 1.75.2.2 snj return EBUSY;
2143 1.75.2.2 snj
2144 1.75.2.2 snj /* Set physical address of TX scheduler rings (1KB aligned). */
2145 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_DRAM_BASE_ADDR, sc->sched_dma.paddr >> 10);
2146 1.75.2.2 snj
2147 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
2148 1.75.2.2 snj
2149 1.75.2.2 snj iwm_nic_unlock(sc);
2150 1.75.2.2 snj
2151 1.75.2.2 snj /* enable command channel */
2152 1.75.2.2 snj err = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
2153 1.75.2.2 snj if (err)
2154 1.75.2.2 snj return err;
2155 1.75.2.2 snj
2156 1.75.2.2 snj if (!iwm_nic_lock(sc))
2157 1.75.2.2 snj return EBUSY;
2158 1.75.2.2 snj
2159 1.75.2.2 snj /* Activate TX scheduler. */
2160 1.75.2.2 snj iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
2161 1.75.2.2 snj
2162 1.75.2.2 snj /* Enable DMA channels. */
2163 1.75.2.2 snj for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
2164 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl),
2165 1.75.2.2 snj IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
2166 1.75.2.2 snj IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
2167 1.75.2.2 snj }
2168 1.75.2.2 snj
2169 1.75.2.2 snj IWM_SETBITS(sc, IWM_FH_TX_CHICKEN_BITS_REG,
2170 1.75.2.2 snj IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
2171 1.75.2.2 snj
2172 1.75.2.2 snj /* Enable L1-Active */
2173 1.75.2.2 snj if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
2174 1.75.2.2 snj iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
2175 1.75.2.2 snj IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
2176 1.75.2.2 snj }
2177 1.75.2.2 snj
2178 1.75.2.2 snj iwm_nic_unlock(sc);
2179 1.75.2.2 snj
2180 1.75.2.2 snj return 0;
2181 1.75.2.2 snj }
2182 1.75.2.2 snj
2183 1.75.2.2 snj static struct iwm_phy_db_entry *
2184 1.75.2.2 snj iwm_phy_db_get_section(struct iwm_softc *sc, enum iwm_phy_db_section_type type,
2185 1.75.2.2 snj uint16_t chg_id)
2186 1.75.2.2 snj {
2187 1.75.2.2 snj struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2188 1.75.2.2 snj
2189 1.75.2.2 snj if (type >= IWM_PHY_DB_MAX)
2190 1.75.2.2 snj return NULL;
2191 1.75.2.2 snj
2192 1.75.2.2 snj switch (type) {
2193 1.75.2.2 snj case IWM_PHY_DB_CFG:
2194 1.75.2.2 snj return &phy_db->cfg;
2195 1.75.2.2 snj case IWM_PHY_DB_CALIB_NCH:
2196 1.75.2.2 snj return &phy_db->calib_nch;
2197 1.75.2.2 snj case IWM_PHY_DB_CALIB_CHG_PAPD:
2198 1.75.2.2 snj if (chg_id >= IWM_NUM_PAPD_CH_GROUPS)
2199 1.75.2.2 snj return NULL;
2200 1.75.2.2 snj return &phy_db->calib_ch_group_papd[chg_id];
2201 1.75.2.2 snj case IWM_PHY_DB_CALIB_CHG_TXP:
2202 1.75.2.2 snj if (chg_id >= IWM_NUM_TXP_CH_GROUPS)
2203 1.75.2.2 snj return NULL;
2204 1.75.2.2 snj return &phy_db->calib_ch_group_txp[chg_id];
2205 1.75.2.2 snj default:
2206 1.75.2.2 snj return NULL;
2207 1.75.2.2 snj }
2208 1.75.2.2 snj return NULL;
2209 1.75.2.2 snj }
2210 1.75.2.2 snj
2211 1.75.2.2 snj static int
2212 1.75.2.2 snj iwm_phy_db_set_section(struct iwm_softc *sc,
2213 1.75.2.2 snj struct iwm_calib_res_notif_phy_db *phy_db_notif, uint16_t size)
2214 1.75.2.2 snj {
2215 1.75.2.2 snj struct iwm_phy_db_entry *entry;
2216 1.75.2.2 snj enum iwm_phy_db_section_type type = le16toh(phy_db_notif->type);
2217 1.75.2.2 snj uint16_t chg_id = 0;
2218 1.75.2.2 snj
2219 1.75.2.2 snj if (type == IWM_PHY_DB_CALIB_CHG_PAPD ||
2220 1.75.2.2 snj type == IWM_PHY_DB_CALIB_CHG_TXP)
2221 1.75.2.2 snj chg_id = le16toh(*(uint16_t *)phy_db_notif->data);
2222 1.75.2.2 snj
2223 1.75.2.2 snj entry = iwm_phy_db_get_section(sc, type, chg_id);
2224 1.75.2.2 snj if (!entry)
2225 1.75.2.2 snj return EINVAL;
2226 1.75.2.2 snj
2227 1.75.2.2 snj if (entry->data)
2228 1.75.2.2 snj kmem_intr_free(entry->data, entry->size);
2229 1.75.2.2 snj entry->data = kmem_intr_alloc(size, KM_NOSLEEP);
2230 1.75.2.2 snj if (!entry->data) {
2231 1.75.2.2 snj entry->size = 0;
2232 1.75.2.2 snj return ENOMEM;
2233 1.75.2.2 snj }
2234 1.75.2.2 snj memcpy(entry->data, phy_db_notif->data, size);
2235 1.75.2.2 snj entry->size = size;
2236 1.75.2.2 snj
2237 1.75.2.2 snj DPRINTFN(10, ("%s(%d): [PHYDB]SET: Type %d, Size: %d, data: %p\n",
2238 1.75.2.2 snj __func__, __LINE__, type, size, entry->data));
2239 1.75.2.2 snj
2240 1.75.2.2 snj return 0;
2241 1.75.2.2 snj }
2242 1.75.2.2 snj
2243 1.75.2.2 snj static int
2244 1.75.2.2 snj iwm_is_valid_channel(uint16_t ch_id)
2245 1.75.2.2 snj {
2246 1.75.2.2 snj if (ch_id <= 14 ||
2247 1.75.2.2 snj (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
2248 1.75.2.2 snj (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
2249 1.75.2.2 snj (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
2250 1.75.2.2 snj return 1;
2251 1.75.2.2 snj return 0;
2252 1.75.2.2 snj }
2253 1.75.2.2 snj
2254 1.75.2.2 snj static uint8_t
2255 1.75.2.2 snj iwm_ch_id_to_ch_index(uint16_t ch_id)
2256 1.75.2.2 snj {
2257 1.75.2.2 snj if (!iwm_is_valid_channel(ch_id))
2258 1.75.2.2 snj return 0xff;
2259 1.75.2.2 snj
2260 1.75.2.2 snj if (ch_id <= 14)
2261 1.75.2.2 snj return ch_id - 1;
2262 1.75.2.2 snj if (ch_id <= 64)
2263 1.75.2.2 snj return (ch_id + 20) / 4;
2264 1.75.2.2 snj if (ch_id <= 140)
2265 1.75.2.2 snj return (ch_id - 12) / 4;
2266 1.75.2.2 snj return (ch_id - 13) / 4;
2267 1.75.2.2 snj }
2268 1.75.2.2 snj
2269 1.75.2.2 snj
2270 1.75.2.2 snj static uint16_t
2271 1.75.2.2 snj iwm_channel_id_to_papd(uint16_t ch_id)
2272 1.75.2.2 snj {
2273 1.75.2.2 snj if (!iwm_is_valid_channel(ch_id))
2274 1.75.2.2 snj return 0xff;
2275 1.75.2.2 snj
2276 1.75.2.2 snj if (1 <= ch_id && ch_id <= 14)
2277 1.75.2.2 snj return 0;
2278 1.75.2.2 snj if (36 <= ch_id && ch_id <= 64)
2279 1.75.2.2 snj return 1;
2280 1.75.2.2 snj if (100 <= ch_id && ch_id <= 140)
2281 1.75.2.2 snj return 2;
2282 1.75.2.2 snj return 3;
2283 1.75.2.2 snj }
2284 1.75.2.2 snj
2285 1.75.2.2 snj static uint16_t
2286 1.75.2.2 snj iwm_channel_id_to_txp(struct iwm_softc *sc, uint16_t ch_id)
2287 1.75.2.2 snj {
2288 1.75.2.2 snj struct iwm_phy_db *phy_db = &sc->sc_phy_db;
2289 1.75.2.2 snj struct iwm_phy_db_chg_txp *txp_chg;
2290 1.75.2.2 snj int i;
2291 1.75.2.2 snj uint8_t ch_index = iwm_ch_id_to_ch_index(ch_id);
2292 1.75.2.2 snj
2293 1.75.2.2 snj if (ch_index == 0xff)
2294 1.75.2.2 snj return 0xff;
2295 1.75.2.2 snj
2296 1.75.2.2 snj for (i = 0; i < IWM_NUM_TXP_CH_GROUPS; i++) {
2297 1.75.2.2 snj txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
2298 1.75.2.2 snj if (!txp_chg)
2299 1.75.2.2 snj return 0xff;
2300 1.75.2.2 snj /*
2301 1.75.2.2 snj * Looking for the first channel group the max channel
2302 1.75.2.2 snj * of which is higher than the requested channel.
2303 1.75.2.2 snj */
2304 1.75.2.2 snj if (le16toh(txp_chg->max_channel_idx) >= ch_index)
2305 1.75.2.2 snj return i;
2306 1.75.2.2 snj }
2307 1.75.2.2 snj return 0xff;
2308 1.75.2.2 snj }
2309 1.75.2.2 snj
2310 1.75.2.2 snj static int
2311 1.75.2.2 snj iwm_phy_db_get_section_data(struct iwm_softc *sc, uint32_t type, uint8_t **data,
2312 1.75.2.2 snj uint16_t *size, uint16_t ch_id)
2313 1.75.2.2 snj {
2314 1.75.2.2 snj struct iwm_phy_db_entry *entry;
2315 1.75.2.2 snj uint16_t ch_group_id = 0;
2316 1.75.2.2 snj
2317 1.75.2.2 snj if (type == IWM_PHY_DB_CALIB_CHG_PAPD)
2318 1.75.2.2 snj ch_group_id = iwm_channel_id_to_papd(ch_id);
2319 1.75.2.2 snj else if (type == IWM_PHY_DB_CALIB_CHG_TXP)
2320 1.75.2.2 snj ch_group_id = iwm_channel_id_to_txp(sc, ch_id);
2321 1.75.2.2 snj
2322 1.75.2.2 snj entry = iwm_phy_db_get_section(sc, type, ch_group_id);
2323 1.75.2.2 snj if (!entry)
2324 1.75.2.2 snj return EINVAL;
2325 1.75.2.2 snj
2326 1.75.2.2 snj *data = entry->data;
2327 1.75.2.2 snj *size = entry->size;
2328 1.75.2.2 snj
2329 1.75.2.2 snj DPRINTFN(10, ("%s(%d): [PHYDB] GET: Type %d , Size: %d\n",
2330 1.75.2.2 snj __func__, __LINE__, type, *size));
2331 1.75.2.2 snj
2332 1.75.2.2 snj return 0;
2333 1.75.2.2 snj }
2334 1.75.2.2 snj
2335 1.75.2.2 snj static int
2336 1.75.2.2 snj iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type, uint16_t length,
2337 1.75.2.2 snj void *data)
2338 1.75.2.2 snj {
2339 1.75.2.2 snj struct iwm_phy_db_cmd phy_db_cmd;
2340 1.75.2.2 snj struct iwm_host_cmd cmd = {
2341 1.75.2.2 snj .id = IWM_PHY_DB_CMD,
2342 1.75.2.2 snj .flags = IWM_CMD_ASYNC,
2343 1.75.2.2 snj };
2344 1.75.2.2 snj
2345 1.75.2.2 snj DPRINTFN(10, ("Sending PHY-DB hcmd of type %d, of length %d\n",
2346 1.75.2.2 snj type, length));
2347 1.75.2.2 snj
2348 1.75.2.2 snj phy_db_cmd.type = le16toh(type);
2349 1.75.2.2 snj phy_db_cmd.length = le16toh(length);
2350 1.75.2.2 snj
2351 1.75.2.2 snj cmd.data[0] = &phy_db_cmd;
2352 1.75.2.2 snj cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
2353 1.75.2.2 snj cmd.data[1] = data;
2354 1.75.2.2 snj cmd.len[1] = length;
2355 1.75.2.2 snj
2356 1.75.2.2 snj return iwm_send_cmd(sc, &cmd);
2357 1.75.2.2 snj }
2358 1.75.2.2 snj
2359 1.75.2.2 snj static int
2360 1.75.2.2 snj iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
2361 1.75.2.2 snj enum iwm_phy_db_section_type type, uint8_t max_ch_groups)
2362 1.75.2.2 snj {
2363 1.75.2.2 snj uint16_t i;
2364 1.75.2.2 snj int err;
2365 1.75.2.2 snj struct iwm_phy_db_entry *entry;
2366 1.75.2.2 snj
2367 1.75.2.2 snj /* Send all the channel-specific groups to operational fw */
2368 1.75.2.2 snj for (i = 0; i < max_ch_groups; i++) {
2369 1.75.2.2 snj entry = iwm_phy_db_get_section(sc, type, i);
2370 1.75.2.2 snj if (!entry)
2371 1.75.2.2 snj return EINVAL;
2372 1.75.2.2 snj
2373 1.75.2.2 snj if (!entry->size)
2374 1.75.2.2 snj continue;
2375 1.75.2.2 snj
2376 1.75.2.2 snj err = iwm_send_phy_db_cmd(sc, type, entry->size, entry->data);
2377 1.75.2.2 snj if (err) {
2378 1.75.2.2 snj DPRINTF(("%s: Can't SEND phy_db section %d (%d), "
2379 1.75.2.2 snj "err %d\n", DEVNAME(sc), type, i, err));
2380 1.75.2.2 snj return err;
2381 1.75.2.2 snj }
2382 1.75.2.2 snj
2383 1.75.2.2 snj DPRINTFN(10, ("%s: Sent PHY_DB HCMD, type = %d num = %d\n",
2384 1.75.2.2 snj DEVNAME(sc), type, i));
2385 1.75.2.2 snj
2386 1.75.2.2 snj DELAY(1000);
2387 1.75.2.2 snj }
2388 1.75.2.2 snj
2389 1.75.2.2 snj return 0;
2390 1.75.2.2 snj }
2391 1.75.2.2 snj
2392 1.75.2.2 snj static int
2393 1.75.2.2 snj iwm_send_phy_db_data(struct iwm_softc *sc)
2394 1.75.2.2 snj {
2395 1.75.2.2 snj uint8_t *data = NULL;
2396 1.75.2.2 snj uint16_t size = 0;
2397 1.75.2.2 snj int err;
2398 1.75.2.2 snj
2399 1.75.2.2 snj err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CFG, &data, &size, 0);
2400 1.75.2.2 snj if (err)
2401 1.75.2.2 snj return err;
2402 1.75.2.2 snj
2403 1.75.2.2 snj err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CFG, size, data);
2404 1.75.2.2 snj if (err)
2405 1.75.2.2 snj return err;
2406 1.75.2.2 snj
2407 1.75.2.2 snj err = iwm_phy_db_get_section_data(sc, IWM_PHY_DB_CALIB_NCH,
2408 1.75.2.2 snj &data, &size, 0);
2409 1.75.2.2 snj if (err)
2410 1.75.2.2 snj return err;
2411 1.75.2.2 snj
2412 1.75.2.2 snj err = iwm_send_phy_db_cmd(sc, IWM_PHY_DB_CALIB_NCH, size, data);
2413 1.75.2.2 snj if (err)
2414 1.75.2.2 snj return err;
2415 1.75.2.2 snj
2416 1.75.2.2 snj err = iwm_phy_db_send_all_channel_groups(sc,
2417 1.75.2.2 snj IWM_PHY_DB_CALIB_CHG_PAPD, IWM_NUM_PAPD_CH_GROUPS);
2418 1.75.2.2 snj if (err)
2419 1.75.2.2 snj return err;
2420 1.75.2.2 snj
2421 1.75.2.2 snj err = iwm_phy_db_send_all_channel_groups(sc,
2422 1.75.2.2 snj IWM_PHY_DB_CALIB_CHG_TXP, IWM_NUM_TXP_CH_GROUPS);
2423 1.75.2.2 snj if (err)
2424 1.75.2.2 snj return err;
2425 1.75.2.2 snj
2426 1.75.2.2 snj return 0;
2427 1.75.2.2 snj }
2428 1.75.2.2 snj
2429 1.75.2.2 snj /*
2430 1.75.2.2 snj * For the high priority TE use a time event type that has similar priority to
2431 1.75.2.2 snj * the FW's action scan priority.
2432 1.75.2.2 snj */
2433 1.75.2.2 snj #define IWM_ROC_TE_TYPE_NORMAL IWM_TE_P2P_DEVICE_DISCOVERABLE
2434 1.75.2.2 snj #define IWM_ROC_TE_TYPE_MGMT_TX IWM_TE_P2P_CLIENT_ASSOC
2435 1.75.2.2 snj
2436 1.75.2.2 snj /* used to convert from time event API v2 to v1 */
2437 1.75.2.2 snj #define IWM_TE_V2_DEP_POLICY_MSK (IWM_TE_V2_DEP_OTHER | IWM_TE_V2_DEP_TSF |\
2438 1.75.2.2 snj IWM_TE_V2_EVENT_SOCIOPATHIC)
2439 1.75.2.2 snj static inline uint16_t
2440 1.75.2.2 snj iwm_te_v2_get_notify(uint16_t policy)
2441 1.75.2.2 snj {
2442 1.75.2.2 snj return le16toh(policy) & IWM_TE_V2_NOTIF_MSK;
2443 1.75.2.2 snj }
2444 1.75.2.2 snj
2445 1.75.2.2 snj static inline uint16_t
2446 1.75.2.2 snj iwm_te_v2_get_dep_policy(uint16_t policy)
2447 1.75.2.2 snj {
2448 1.75.2.2 snj return (le16toh(policy) & IWM_TE_V2_DEP_POLICY_MSK) >>
2449 1.75.2.2 snj IWM_TE_V2_PLACEMENT_POS;
2450 1.75.2.2 snj }
2451 1.75.2.2 snj
2452 1.75.2.2 snj static inline uint16_t
2453 1.75.2.2 snj iwm_te_v2_get_absence(uint16_t policy)
2454 1.75.2.2 snj {
2455 1.75.2.2 snj return (le16toh(policy) & IWM_TE_V2_ABSENCE) >> IWM_TE_V2_ABSENCE_POS;
2456 1.75.2.2 snj }
2457 1.75.2.2 snj
2458 1.75.2.2 snj static void
2459 1.75.2.2 snj iwm_te_v2_to_v1(const struct iwm_time_event_cmd_v2 *cmd_v2,
2460 1.75.2.2 snj struct iwm_time_event_cmd_v1 *cmd_v1)
2461 1.75.2.2 snj {
2462 1.75.2.2 snj cmd_v1->id_and_color = cmd_v2->id_and_color;
2463 1.75.2.2 snj cmd_v1->action = cmd_v2->action;
2464 1.75.2.2 snj cmd_v1->id = cmd_v2->id;
2465 1.75.2.2 snj cmd_v1->apply_time = cmd_v2->apply_time;
2466 1.75.2.2 snj cmd_v1->max_delay = cmd_v2->max_delay;
2467 1.75.2.2 snj cmd_v1->depends_on = cmd_v2->depends_on;
2468 1.75.2.2 snj cmd_v1->interval = cmd_v2->interval;
2469 1.75.2.2 snj cmd_v1->duration = cmd_v2->duration;
2470 1.75.2.2 snj if (cmd_v2->repeat == IWM_TE_V2_REPEAT_ENDLESS)
2471 1.75.2.2 snj cmd_v1->repeat = htole32(IWM_TE_V1_REPEAT_ENDLESS);
2472 1.75.2.2 snj else
2473 1.75.2.2 snj cmd_v1->repeat = htole32(cmd_v2->repeat);
2474 1.75.2.2 snj cmd_v1->max_frags = htole32(cmd_v2->max_frags);
2475 1.75.2.2 snj cmd_v1->interval_reciprocal = 0; /* unused */
2476 1.75.2.2 snj
2477 1.75.2.2 snj cmd_v1->dep_policy = htole32(iwm_te_v2_get_dep_policy(cmd_v2->policy));
2478 1.75.2.2 snj cmd_v1->is_present = htole32(!iwm_te_v2_get_absence(cmd_v2->policy));
2479 1.75.2.2 snj cmd_v1->notify = htole32(iwm_te_v2_get_notify(cmd_v2->policy));
2480 1.75.2.2 snj }
2481 1.75.2.2 snj
2482 1.75.2.2 snj static int
2483 1.75.2.2 snj iwm_send_time_event_cmd(struct iwm_softc *sc,
2484 1.75.2.2 snj const struct iwm_time_event_cmd_v2 *cmd)
2485 1.75.2.2 snj {
2486 1.75.2.2 snj struct iwm_time_event_cmd_v1 cmd_v1;
2487 1.75.2.2 snj
2488 1.75.2.2 snj if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
2489 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(*cmd),
2490 1.75.2.2 snj cmd);
2491 1.75.2.2 snj
2492 1.75.2.2 snj iwm_te_v2_to_v1(cmd, &cmd_v1);
2493 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_TIME_EVENT_CMD, 0, sizeof(cmd_v1),
2494 1.75.2.2 snj &cmd_v1);
2495 1.75.2.2 snj }
2496 1.75.2.2 snj
2497 1.75.2.2 snj static void
2498 1.75.2.2 snj iwm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
2499 1.75.2.2 snj uint32_t duration, uint32_t max_delay)
2500 1.75.2.2 snj {
2501 1.75.2.2 snj struct iwm_time_event_cmd_v2 time_cmd;
2502 1.75.2.2 snj
2503 1.75.2.2 snj memset(&time_cmd, 0, sizeof(time_cmd));
2504 1.75.2.2 snj
2505 1.75.2.2 snj time_cmd.action = htole32(IWM_FW_CTXT_ACTION_ADD);
2506 1.75.2.2 snj time_cmd.id_and_color =
2507 1.75.2.2 snj htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2508 1.75.2.2 snj time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
2509 1.75.2.2 snj
2510 1.75.2.2 snj time_cmd.apply_time = htole32(0);
2511 1.75.2.2 snj
2512 1.75.2.2 snj time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
2513 1.75.2.2 snj time_cmd.max_delay = htole32(max_delay);
2514 1.75.2.2 snj /* TODO: why do we need to interval = bi if it is not periodic? */
2515 1.75.2.2 snj time_cmd.interval = htole32(1);
2516 1.75.2.2 snj time_cmd.duration = htole32(duration);
2517 1.75.2.2 snj time_cmd.repeat = 1;
2518 1.75.2.2 snj time_cmd.policy
2519 1.75.2.2 snj = htole16(IWM_TE_V2_NOTIF_HOST_EVENT_START |
2520 1.75.2.2 snj IWM_TE_V2_NOTIF_HOST_EVENT_END |
2521 1.75.2.2 snj IWM_T2_V2_START_IMMEDIATELY);
2522 1.75.2.2 snj
2523 1.75.2.2 snj iwm_send_time_event_cmd(sc, &time_cmd);
2524 1.75.2.2 snj }
2525 1.75.2.2 snj
2526 1.75.2.2 snj /*
2527 1.75.2.2 snj * NVM read access and content parsing. We do not support
2528 1.75.2.2 snj * external NVM or writing NVM.
2529 1.75.2.2 snj */
2530 1.75.2.2 snj
2531 1.75.2.2 snj /* list of NVM sections we are allowed/need to read */
2532 1.75.2.2 snj static const int iwm_nvm_to_read[] = {
2533 1.75.2.2 snj IWM_NVM_SECTION_TYPE_HW,
2534 1.75.2.2 snj IWM_NVM_SECTION_TYPE_SW,
2535 1.75.2.2 snj IWM_NVM_SECTION_TYPE_REGULATORY,
2536 1.75.2.2 snj IWM_NVM_SECTION_TYPE_CALIBRATION,
2537 1.75.2.2 snj IWM_NVM_SECTION_TYPE_PRODUCTION,
2538 1.75.2.2 snj IWM_NVM_SECTION_TYPE_HW_8000,
2539 1.75.2.2 snj IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
2540 1.75.2.2 snj IWM_NVM_SECTION_TYPE_PHY_SKU,
2541 1.75.2.2 snj };
2542 1.75.2.2 snj
2543 1.75.2.2 snj /* Default NVM size to read */
2544 1.75.2.2 snj #define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
2545 1.75.2.2 snj #define IWM_MAX_NVM_SECTION_SIZE_7000 (16 * 512 * sizeof(uint16_t)) /*16 KB*/
2546 1.75.2.2 snj #define IWM_MAX_NVM_SECTION_SIZE_8000 (32 * 512 * sizeof(uint16_t)) /*32 KB*/
2547 1.75.2.2 snj
2548 1.75.2.2 snj #define IWM_NVM_WRITE_OPCODE 1
2549 1.75.2.2 snj #define IWM_NVM_READ_OPCODE 0
2550 1.75.2.2 snj
2551 1.75.2.2 snj static int
2552 1.75.2.2 snj iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section, uint16_t offset,
2553 1.75.2.2 snj uint16_t length, uint8_t *data, uint16_t *len)
2554 1.75.2.2 snj {
2555 1.75.2.2 snj offset = 0;
2556 1.75.2.2 snj struct iwm_nvm_access_cmd nvm_access_cmd = {
2557 1.75.2.2 snj .offset = htole16(offset),
2558 1.75.2.2 snj .length = htole16(length),
2559 1.75.2.2 snj .type = htole16(section),
2560 1.75.2.2 snj .op_code = IWM_NVM_READ_OPCODE,
2561 1.75.2.2 snj };
2562 1.75.2.2 snj struct iwm_nvm_access_resp *nvm_resp;
2563 1.75.2.2 snj struct iwm_rx_packet *pkt;
2564 1.75.2.2 snj struct iwm_host_cmd cmd = {
2565 1.75.2.2 snj .id = IWM_NVM_ACCESS_CMD,
2566 1.75.2.2 snj .flags = (IWM_CMD_WANT_SKB | IWM_CMD_SEND_IN_RFKILL),
2567 1.75.2.2 snj .data = { &nvm_access_cmd, },
2568 1.75.2.2 snj };
2569 1.75.2.2 snj int err, offset_read;
2570 1.75.2.2 snj size_t bytes_read;
2571 1.75.2.2 snj uint8_t *resp_data;
2572 1.75.2.2 snj
2573 1.75.2.2 snj cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
2574 1.75.2.2 snj
2575 1.75.2.2 snj err = iwm_send_cmd(sc, &cmd);
2576 1.75.2.2 snj if (err) {
2577 1.75.2.2 snj DPRINTF(("%s: Could not send NVM_ACCESS command (error=%d)\n",
2578 1.75.2.2 snj DEVNAME(sc), err));
2579 1.75.2.2 snj return err;
2580 1.75.2.2 snj }
2581 1.75.2.2 snj
2582 1.75.2.2 snj pkt = cmd.resp_pkt;
2583 1.75.2.2 snj if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
2584 1.75.2.2 snj err = EIO;
2585 1.75.2.2 snj goto exit;
2586 1.75.2.2 snj }
2587 1.75.2.2 snj
2588 1.75.2.2 snj /* Extract NVM response */
2589 1.75.2.2 snj nvm_resp = (void *)pkt->data;
2590 1.75.2.2 snj
2591 1.75.2.2 snj err = le16toh(nvm_resp->status);
2592 1.75.2.2 snj bytes_read = le16toh(nvm_resp->length);
2593 1.75.2.2 snj offset_read = le16toh(nvm_resp->offset);
2594 1.75.2.2 snj resp_data = nvm_resp->data;
2595 1.75.2.2 snj if (err) {
2596 1.75.2.2 snj err = EINVAL;
2597 1.75.2.2 snj goto exit;
2598 1.75.2.2 snj }
2599 1.75.2.2 snj
2600 1.75.2.2 snj if (offset_read != offset) {
2601 1.75.2.2 snj err = EINVAL;
2602 1.75.2.2 snj goto exit;
2603 1.75.2.2 snj }
2604 1.75.2.2 snj if (bytes_read > length) {
2605 1.75.2.2 snj err = EINVAL;
2606 1.75.2.2 snj goto exit;
2607 1.75.2.2 snj }
2608 1.75.2.2 snj
2609 1.75.2.2 snj memcpy(data + offset, resp_data, bytes_read);
2610 1.75.2.2 snj *len = bytes_read;
2611 1.75.2.2 snj
2612 1.75.2.2 snj exit:
2613 1.75.2.2 snj iwm_free_resp(sc, &cmd);
2614 1.75.2.2 snj return err;
2615 1.75.2.2 snj }
2616 1.75.2.2 snj
2617 1.75.2.2 snj /*
2618 1.75.2.2 snj * Reads an NVM section completely.
2619 1.75.2.2 snj * NICs prior to 7000 family doesn't have a real NVM, but just read
2620 1.75.2.2 snj * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
2621 1.75.2.2 snj * by uCode, we need to manually check in this case that we don't
2622 1.75.2.2 snj * overflow and try to read more than the EEPROM size.
2623 1.75.2.2 snj */
2624 1.75.2.2 snj static int
2625 1.75.2.2 snj iwm_nvm_read_section(struct iwm_softc *sc, uint16_t section, uint8_t *data,
2626 1.75.2.2 snj uint16_t *len, size_t max_len)
2627 1.75.2.2 snj {
2628 1.75.2.2 snj uint16_t chunklen, seglen;
2629 1.75.2.2 snj int err;
2630 1.75.2.2 snj
2631 1.75.2.2 snj chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
2632 1.75.2.2 snj *len = 0;
2633 1.75.2.2 snj
2634 1.75.2.2 snj /* Read NVM chunks until exhausted (reading less than requested) */
2635 1.75.2.2 snj while (seglen == chunklen && *len < max_len) {
2636 1.75.2.2 snj err = iwm_nvm_read_chunk(sc, section, *len, chunklen, data,
2637 1.75.2.2 snj &seglen);
2638 1.75.2.2 snj if (err) {
2639 1.75.2.2 snj DPRINTF(("%s: Cannot read NVM from section %d "
2640 1.75.2.2 snj "offset %d, length %d\n",
2641 1.75.2.2 snj DEVNAME(sc), section, *len, chunklen));
2642 1.75.2.2 snj return err;
2643 1.75.2.2 snj }
2644 1.75.2.2 snj *len += seglen;
2645 1.75.2.2 snj }
2646 1.75.2.2 snj
2647 1.75.2.2 snj DPRINTFN(4, ("NVM section %d read completed\n", section));
2648 1.75.2.2 snj return 0;
2649 1.75.2.2 snj }
2650 1.75.2.2 snj
2651 1.75.2.2 snj static uint8_t
2652 1.75.2.2 snj iwm_fw_valid_tx_ant(struct iwm_softc *sc)
2653 1.75.2.2 snj {
2654 1.75.2.2 snj uint8_t tx_ant;
2655 1.75.2.2 snj
2656 1.75.2.2 snj tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
2657 1.75.2.2 snj >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
2658 1.75.2.2 snj
2659 1.75.2.2 snj if (sc->sc_nvm.valid_tx_ant)
2660 1.75.2.2 snj tx_ant &= sc->sc_nvm.valid_tx_ant;
2661 1.75.2.2 snj
2662 1.75.2.2 snj return tx_ant;
2663 1.75.2.2 snj }
2664 1.75.2.2 snj
2665 1.75.2.2 snj static uint8_t
2666 1.75.2.2 snj iwm_fw_valid_rx_ant(struct iwm_softc *sc)
2667 1.75.2.2 snj {
2668 1.75.2.2 snj uint8_t rx_ant;
2669 1.75.2.2 snj
2670 1.75.2.2 snj rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
2671 1.75.2.2 snj >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
2672 1.75.2.2 snj
2673 1.75.2.2 snj if (sc->sc_nvm.valid_rx_ant)
2674 1.75.2.2 snj rx_ant &= sc->sc_nvm.valid_rx_ant;
2675 1.75.2.2 snj
2676 1.75.2.2 snj return rx_ant;
2677 1.75.2.2 snj }
2678 1.75.2.2 snj
2679 1.75.2.2 snj static void
2680 1.75.2.2 snj iwm_init_channel_map(struct iwm_softc *sc, const uint16_t * const nvm_ch_flags,
2681 1.75.2.2 snj const uint8_t *nvm_channels, size_t nchan)
2682 1.75.2.2 snj {
2683 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
2684 1.75.2.2 snj struct iwm_nvm_data *data = &sc->sc_nvm;
2685 1.75.2.2 snj int ch_idx;
2686 1.75.2.2 snj struct ieee80211_channel *channel;
2687 1.75.2.2 snj uint16_t ch_flags;
2688 1.75.2.2 snj int is_5ghz;
2689 1.75.2.2 snj int flags, hw_value;
2690 1.75.2.2 snj
2691 1.75.2.2 snj for (ch_idx = 0; ch_idx < nchan; ch_idx++) {
2692 1.75.2.2 snj ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
2693 1.75.2.2 snj aprint_debug_dev(sc->sc_dev,
2694 1.75.2.2 snj "Ch. %d: %svalid %cibss %s %cradar %cdfs"
2695 1.75.2.2 snj " %cwide %c40MHz %c80MHz %c160MHz\n",
2696 1.75.2.2 snj nvm_channels[ch_idx],
2697 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_VALID ? "" : "in",
2698 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_IBSS ? '+' : '-',
2699 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_ACTIVE ? "active" : "passive",
2700 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_RADAR ? '+' : '-',
2701 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_DFS ? '+' : '-',
2702 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_WIDE ? '+' : '-',
2703 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_40MHZ ? '+' : '-',
2704 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_80MHZ ? '+' : '-',
2705 1.75.2.2 snj ch_flags & IWM_NVM_CHANNEL_160MHZ ? '+' : '-');
2706 1.75.2.2 snj
2707 1.75.2.2 snj if (ch_idx >= IWM_NUM_2GHZ_CHANNELS &&
2708 1.75.2.2 snj !data->sku_cap_band_52GHz_enable)
2709 1.75.2.2 snj ch_flags &= ~IWM_NVM_CHANNEL_VALID;
2710 1.75.2.2 snj
2711 1.75.2.2 snj if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
2712 1.75.2.2 snj DPRINTF(("Ch. %d Flags %x [%sGHz] - No traffic\n",
2713 1.75.2.2 snj nvm_channels[ch_idx], ch_flags,
2714 1.75.2.2 snj (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ? "5" : "2.4"));
2715 1.75.2.2 snj continue;
2716 1.75.2.2 snj }
2717 1.75.2.2 snj
2718 1.75.2.2 snj hw_value = nvm_channels[ch_idx];
2719 1.75.2.2 snj channel = &ic->ic_channels[hw_value];
2720 1.75.2.2 snj
2721 1.75.2.2 snj is_5ghz = ch_idx >= IWM_NUM_2GHZ_CHANNELS;
2722 1.75.2.2 snj if (!is_5ghz) {
2723 1.75.2.2 snj flags = IEEE80211_CHAN_2GHZ;
2724 1.75.2.2 snj channel->ic_flags
2725 1.75.2.2 snj = IEEE80211_CHAN_CCK
2726 1.75.2.2 snj | IEEE80211_CHAN_OFDM
2727 1.75.2.2 snj | IEEE80211_CHAN_DYN
2728 1.75.2.2 snj | IEEE80211_CHAN_2GHZ;
2729 1.75.2.2 snj } else {
2730 1.75.2.2 snj flags = IEEE80211_CHAN_5GHZ;
2731 1.75.2.2 snj channel->ic_flags =
2732 1.75.2.2 snj IEEE80211_CHAN_A;
2733 1.75.2.2 snj }
2734 1.75.2.2 snj channel->ic_freq = ieee80211_ieee2mhz(hw_value, flags);
2735 1.75.2.2 snj
2736 1.75.2.2 snj if (!(ch_flags & IWM_NVM_CHANNEL_ACTIVE))
2737 1.75.2.2 snj channel->ic_flags |= IEEE80211_CHAN_PASSIVE;
2738 1.75.2.2 snj
2739 1.75.2.2 snj #ifndef IEEE80211_NO_HT
2740 1.75.2.2 snj if (data->sku_cap_11n_enable)
2741 1.75.2.2 snj channel->ic_flags |= IEEE80211_CHAN_HT;
2742 1.75.2.2 snj #endif
2743 1.75.2.2 snj }
2744 1.75.2.2 snj }
2745 1.75.2.2 snj
2746 1.75.2.2 snj #ifndef IEEE80211_NO_HT
2747 1.75.2.2 snj static void
2748 1.75.2.2 snj iwm_setup_ht_rates(struct iwm_softc *sc)
2749 1.75.2.2 snj {
2750 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
2751 1.75.2.2 snj
2752 1.75.2.2 snj /* TX is supported with the same MCS as RX. */
2753 1.75.2.2 snj ic->ic_tx_mcs_set = IEEE80211_TX_MCS_SET_DEFINED;
2754 1.75.2.2 snj
2755 1.75.2.2 snj ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
2756 1.75.2.2 snj
2757 1.75.2.2 snj #ifdef notyet
2758 1.75.2.2 snj if (sc->sc_nvm.sku_cap_mimo_disable)
2759 1.75.2.2 snj return;
2760 1.75.2.2 snj
2761 1.75.2.2 snj if (iwm_fw_valid_rx_ant(sc) > 1)
2762 1.75.2.2 snj ic->ic_sup_mcs[1] = 0xff; /* MCS 8-15 */
2763 1.75.2.2 snj if (iwm_fw_valid_rx_ant(sc) > 2)
2764 1.75.2.2 snj ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
2765 1.75.2.2 snj #endif
2766 1.75.2.2 snj }
2767 1.75.2.2 snj
2768 1.75.2.2 snj #define IWM_MAX_RX_BA_SESSIONS 16
2769 1.75.2.2 snj
2770 1.75.2.2 snj static void
2771 1.75.2.2 snj iwm_sta_rx_agg(struct iwm_softc *sc, struct ieee80211_node *ni, uint8_t tid,
2772 1.75.2.2 snj uint16_t ssn, int start)
2773 1.75.2.2 snj {
2774 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
2775 1.75.2.2 snj struct iwm_add_sta_cmd_v7 cmd;
2776 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ni;
2777 1.75.2.2 snj int err, s;
2778 1.75.2.2 snj uint32_t status;
2779 1.75.2.2 snj
2780 1.75.2.2 snj if (start && sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS) {
2781 1.75.2.2 snj ieee80211_addba_req_refuse(ic, ni, tid);
2782 1.75.2.2 snj return;
2783 1.75.2.2 snj }
2784 1.75.2.2 snj
2785 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
2786 1.75.2.2 snj
2787 1.75.2.2 snj cmd.sta_id = IWM_STATION_ID;
2788 1.75.2.2 snj cmd.mac_id_n_color
2789 1.75.2.2 snj = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
2790 1.75.2.2 snj cmd.add_modify = IWM_STA_MODE_MODIFY;
2791 1.75.2.2 snj
2792 1.75.2.2 snj if (start) {
2793 1.75.2.2 snj cmd.add_immediate_ba_tid = (uint8_t)tid;
2794 1.75.2.2 snj cmd.add_immediate_ba_ssn = ssn;
2795 1.75.2.2 snj } else {
2796 1.75.2.2 snj cmd.remove_immediate_ba_tid = (uint8_t)tid;
2797 1.75.2.2 snj }
2798 1.75.2.2 snj cmd.modify_mask = start ? IWM_STA_MODIFY_ADD_BA_TID :
2799 1.75.2.2 snj IWM_STA_MODIFY_REMOVE_BA_TID;
2800 1.75.2.2 snj
2801 1.75.2.2 snj status = IWM_ADD_STA_SUCCESS;
2802 1.75.2.2 snj err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
2803 1.75.2.2 snj &status);
2804 1.75.2.2 snj
2805 1.75.2.2 snj s = splnet();
2806 1.75.2.2 snj if (err == 0 && status == IWM_ADD_STA_SUCCESS) {
2807 1.75.2.2 snj if (start) {
2808 1.75.2.2 snj sc->sc_rx_ba_sessions++;
2809 1.75.2.2 snj ieee80211_addba_req_accept(ic, ni, tid);
2810 1.75.2.2 snj } else if (sc->sc_rx_ba_sessions > 0)
2811 1.75.2.2 snj sc->sc_rx_ba_sessions--;
2812 1.75.2.2 snj } else if (start)
2813 1.75.2.2 snj ieee80211_addba_req_refuse(ic, ni, tid);
2814 1.75.2.2 snj splx(s);
2815 1.75.2.2 snj }
2816 1.75.2.2 snj
2817 1.75.2.2 snj static void
2818 1.75.2.2 snj iwm_htprot_task(void *arg)
2819 1.75.2.2 snj {
2820 1.75.2.2 snj struct iwm_softc *sc = arg;
2821 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
2822 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
2823 1.75.2.2 snj int err;
2824 1.75.2.2 snj
2825 1.75.2.2 snj /* This call updates HT protection based on in->in_ni.ni_htop1. */
2826 1.75.2.2 snj err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
2827 1.75.2.2 snj if (err)
2828 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
2829 1.75.2.2 snj "could not change HT protection: error %d\n", err);
2830 1.75.2.2 snj }
2831 1.75.2.2 snj
2832 1.75.2.2 snj /*
2833 1.75.2.2 snj * This function is called by upper layer when HT protection settings in
2834 1.75.2.2 snj * beacons have changed.
2835 1.75.2.2 snj */
2836 1.75.2.2 snj static void
2837 1.75.2.2 snj iwm_update_htprot(struct ieee80211com *ic, struct ieee80211_node *ni)
2838 1.75.2.2 snj {
2839 1.75.2.2 snj struct iwm_softc *sc = ic->ic_softc;
2840 1.75.2.2 snj
2841 1.75.2.2 snj /* assumes that ni == ic->ic_bss */
2842 1.75.2.2 snj task_add(systq, &sc->htprot_task);
2843 1.75.2.2 snj }
2844 1.75.2.2 snj
2845 1.75.2.2 snj static void
2846 1.75.2.2 snj iwm_ba_task(void *arg)
2847 1.75.2.2 snj {
2848 1.75.2.2 snj struct iwm_softc *sc = arg;
2849 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
2850 1.75.2.2 snj struct ieee80211_node *ni = ic->ic_bss;
2851 1.75.2.2 snj
2852 1.75.2.2 snj if (sc->ba_start)
2853 1.75.2.2 snj iwm_sta_rx_agg(sc, ni, sc->ba_tid, sc->ba_ssn, 1);
2854 1.75.2.2 snj else
2855 1.75.2.2 snj iwm_sta_rx_agg(sc, ni, sc->ba_tid, 0, 0);
2856 1.75.2.2 snj }
2857 1.75.2.2 snj
2858 1.75.2.2 snj /*
2859 1.75.2.2 snj * This function is called by upper layer when an ADDBA request is received
2860 1.75.2.2 snj * from another STA and before the ADDBA response is sent.
2861 1.75.2.2 snj */
2862 1.75.2.2 snj static int
2863 1.75.2.2 snj iwm_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
2864 1.75.2.2 snj uint8_t tid)
2865 1.75.2.2 snj {
2866 1.75.2.2 snj struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
2867 1.75.2.2 snj struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2868 1.75.2.2 snj
2869 1.75.2.2 snj if (sc->sc_rx_ba_sessions >= IWM_MAX_RX_BA_SESSIONS)
2870 1.75.2.2 snj return ENOSPC;
2871 1.75.2.2 snj
2872 1.75.2.2 snj sc->ba_start = 1;
2873 1.75.2.2 snj sc->ba_tid = tid;
2874 1.75.2.2 snj sc->ba_ssn = htole16(ba->ba_winstart);
2875 1.75.2.2 snj task_add(systq, &sc->ba_task);
2876 1.75.2.2 snj
2877 1.75.2.2 snj return EBUSY;
2878 1.75.2.2 snj }
2879 1.75.2.2 snj
2880 1.75.2.2 snj /*
2881 1.75.2.2 snj * This function is called by upper layer on teardown of an HT-immediate
2882 1.75.2.2 snj * Block Ack agreement (eg. upon receipt of a DELBA frame).
2883 1.75.2.2 snj */
2884 1.75.2.2 snj static void
2885 1.75.2.2 snj iwm_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
2886 1.75.2.2 snj uint8_t tid)
2887 1.75.2.2 snj {
2888 1.75.2.2 snj struct iwm_softc *sc = IC2IFP(ic)->if_softc;
2889 1.75.2.2 snj
2890 1.75.2.2 snj sc->ba_start = 0;
2891 1.75.2.2 snj sc->ba_tid = tid;
2892 1.75.2.2 snj task_add(systq, &sc->ba_task);
2893 1.75.2.2 snj }
2894 1.75.2.2 snj #endif
2895 1.75.2.2 snj
2896 1.75.2.2 snj static void
2897 1.75.2.2 snj iwm_free_fw_paging(struct iwm_softc *sc)
2898 1.75.2.2 snj {
2899 1.75.2.2 snj int i;
2900 1.75.2.2 snj
2901 1.75.2.2 snj if (sc->fw_paging_db[0].fw_paging_block.vaddr == NULL)
2902 1.75.2.2 snj return;
2903 1.75.2.2 snj
2904 1.75.2.2 snj for (i = 0; i < IWM_NUM_OF_FW_PAGING_BLOCKS; i++) {
2905 1.75.2.2 snj iwm_dma_contig_free(&sc->fw_paging_db[i].fw_paging_block);
2906 1.75.2.2 snj }
2907 1.75.2.2 snj
2908 1.75.2.2 snj memset(sc->fw_paging_db, 0, sizeof(sc->fw_paging_db));
2909 1.75.2.2 snj }
2910 1.75.2.2 snj
2911 1.75.2.2 snj static int
2912 1.75.2.2 snj iwm_fill_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2913 1.75.2.2 snj {
2914 1.75.2.2 snj int sec_idx, idx;
2915 1.75.2.2 snj uint32_t offset = 0;
2916 1.75.2.2 snj
2917 1.75.2.2 snj /*
2918 1.75.2.2 snj * find where is the paging image start point:
2919 1.75.2.2 snj * if CPU2 exist and it's in paging format, then the image looks like:
2920 1.75.2.2 snj * CPU1 sections (2 or more)
2921 1.75.2.2 snj * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
2922 1.75.2.2 snj * CPU2 sections (not paged)
2923 1.75.2.2 snj * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
2924 1.75.2.2 snj * non paged to CPU2 paging sec
2925 1.75.2.2 snj * CPU2 paging CSS
2926 1.75.2.2 snj * CPU2 paging image (including instruction and data)
2927 1.75.2.2 snj */
2928 1.75.2.2 snj for (sec_idx = 0; sec_idx < IWM_UCODE_SECT_MAX; sec_idx++) {
2929 1.75.2.2 snj if (fws->fw_sect[sec_idx].fws_devoff ==
2930 1.75.2.2 snj IWM_PAGING_SEPARATOR_SECTION) {
2931 1.75.2.2 snj sec_idx++;
2932 1.75.2.2 snj break;
2933 1.75.2.2 snj }
2934 1.75.2.2 snj }
2935 1.75.2.2 snj
2936 1.75.2.2 snj /*
2937 1.75.2.2 snj * If paging is enabled there should be at least 2 more sections left
2938 1.75.2.2 snj * (one for CSS and one for Paging data)
2939 1.75.2.2 snj */
2940 1.75.2.2 snj if (sec_idx >= __arraycount(fws->fw_sect) - 1) {
2941 1.75.2.2 snj aprint_verbose_dev(sc->sc_dev,
2942 1.75.2.2 snj "Paging: Missing CSS and/or paging sections\n");
2943 1.75.2.2 snj iwm_free_fw_paging(sc);
2944 1.75.2.2 snj return EINVAL;
2945 1.75.2.2 snj }
2946 1.75.2.2 snj
2947 1.75.2.2 snj /* copy the CSS block to the dram */
2948 1.75.2.2 snj DPRINTF(("%s: Paging: load paging CSS to FW, sec = %d\n", DEVNAME(sc),
2949 1.75.2.2 snj sec_idx));
2950 1.75.2.2 snj
2951 1.75.2.2 snj memcpy(sc->fw_paging_db[0].fw_paging_block.vaddr,
2952 1.75.2.2 snj fws->fw_sect[sec_idx].fws_data, sc->fw_paging_db[0].fw_paging_size);
2953 1.75.2.2 snj
2954 1.75.2.2 snj DPRINTF(("%s: Paging: copied %d CSS bytes to first block\n",
2955 1.75.2.2 snj DEVNAME(sc), sc->fw_paging_db[0].fw_paging_size));
2956 1.75.2.2 snj
2957 1.75.2.2 snj sec_idx++;
2958 1.75.2.2 snj
2959 1.75.2.2 snj /*
2960 1.75.2.2 snj * copy the paging blocks to the dram
2961 1.75.2.2 snj * loop index start from 1 since that CSS block already copied to dram
2962 1.75.2.2 snj * and CSS index is 0.
2963 1.75.2.2 snj * loop stop at num_of_paging_blk since that last block is not full.
2964 1.75.2.2 snj */
2965 1.75.2.2 snj for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
2966 1.75.2.2 snj memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2967 1.75.2.2 snj (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2968 1.75.2.2 snj sc->fw_paging_db[idx].fw_paging_size);
2969 1.75.2.2 snj
2970 1.75.2.2 snj DPRINTF(("%s: Paging: copied %d paging bytes to block %d\n",
2971 1.75.2.2 snj DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
2972 1.75.2.2 snj
2973 1.75.2.2 snj offset += sc->fw_paging_db[idx].fw_paging_size;
2974 1.75.2.2 snj }
2975 1.75.2.2 snj
2976 1.75.2.2 snj /* copy the last paging block */
2977 1.75.2.2 snj if (sc->num_of_pages_in_last_blk > 0) {
2978 1.75.2.2 snj memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
2979 1.75.2.2 snj (const char *)fws->fw_sect[sec_idx].fws_data + offset,
2980 1.75.2.2 snj IWM_FW_PAGING_SIZE * sc->num_of_pages_in_last_blk);
2981 1.75.2.2 snj
2982 1.75.2.2 snj DPRINTF(("%s: Paging: copied %d pages in the last block %d\n",
2983 1.75.2.2 snj DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
2984 1.75.2.2 snj }
2985 1.75.2.2 snj
2986 1.75.2.2 snj return 0;
2987 1.75.2.2 snj }
2988 1.75.2.2 snj
2989 1.75.2.2 snj static int
2990 1.75.2.2 snj iwm_alloc_fw_paging_mem(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
2991 1.75.2.2 snj {
2992 1.75.2.2 snj int blk_idx = 0;
2993 1.75.2.2 snj int error, num_of_pages;
2994 1.75.2.2 snj bus_dmamap_t dmap;
2995 1.75.2.2 snj
2996 1.75.2.2 snj if (sc->fw_paging_db[0].fw_paging_block.vaddr != NULL) {
2997 1.75.2.2 snj int i;
2998 1.75.2.2 snj /* Device got reset, and we setup firmware paging again */
2999 1.75.2.2 snj for (i = 0; i < sc->num_of_paging_blk + 1; i++) {
3000 1.75.2.2 snj dmap = sc->fw_paging_db[i].fw_paging_block.map;
3001 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3002 1.75.2.2 snj BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
3003 1.75.2.2 snj }
3004 1.75.2.2 snj return 0;
3005 1.75.2.2 snj }
3006 1.75.2.2 snj
3007 1.75.2.2 snj /* ensure IWM_BLOCK_2_EXP_SIZE is power of 2 of IWM_PAGING_BLOCK_SIZE */
3008 1.75.2.2 snj CTASSERT(__BIT(IWM_BLOCK_2_EXP_SIZE) == IWM_PAGING_BLOCK_SIZE);
3009 1.75.2.2 snj
3010 1.75.2.2 snj num_of_pages = fws->paging_mem_size / IWM_FW_PAGING_SIZE;
3011 1.75.2.2 snj sc->num_of_paging_blk =
3012 1.75.2.2 snj howmany(num_of_pages, IWM_NUM_OF_PAGE_PER_GROUP);
3013 1.75.2.2 snj sc->num_of_pages_in_last_blk = num_of_pages -
3014 1.75.2.2 snj IWM_NUM_OF_PAGE_PER_GROUP * (sc->num_of_paging_blk - 1);
3015 1.75.2.2 snj
3016 1.75.2.2 snj DPRINTF(("%s: Paging: allocating mem for %d paging blocks, "
3017 1.75.2.2 snj "each block holds 8 pages, last block holds %d pages\n",
3018 1.75.2.2 snj DEVNAME(sc), sc->num_of_paging_blk, sc->num_of_pages_in_last_blk));
3019 1.75.2.2 snj
3020 1.75.2.2 snj /* allocate block of 4Kbytes for paging CSS */
3021 1.75.2.2 snj error = iwm_dma_contig_alloc(sc->sc_dmat,
3022 1.75.2.2 snj &sc->fw_paging_db[blk_idx].fw_paging_block, IWM_FW_PAGING_SIZE,
3023 1.75.2.2 snj 4096);
3024 1.75.2.2 snj if (error) {
3025 1.75.2.2 snj /* free all the previous pages since we failed */
3026 1.75.2.2 snj iwm_free_fw_paging(sc);
3027 1.75.2.2 snj return ENOMEM;
3028 1.75.2.2 snj }
3029 1.75.2.2 snj
3030 1.75.2.2 snj sc->fw_paging_db[blk_idx].fw_paging_size = IWM_FW_PAGING_SIZE;
3031 1.75.2.2 snj
3032 1.75.2.2 snj DPRINTF(("%s: Paging: allocated 4K(CSS) bytes for firmware paging.\n",
3033 1.75.2.2 snj DEVNAME(sc)));
3034 1.75.2.2 snj
3035 1.75.2.2 snj /*
3036 1.75.2.2 snj * allocate blocks in dram.
3037 1.75.2.2 snj * since that CSS allocated in fw_paging_db[0] loop start from index 1
3038 1.75.2.2 snj */
3039 1.75.2.2 snj for (blk_idx = 1; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3040 1.75.2.2 snj /* allocate block of IWM_PAGING_BLOCK_SIZE (32K) */
3041 1.75.2.2 snj /* XXX Use iwm_dma_contig_alloc for allocating */
3042 1.75.2.2 snj error = iwm_dma_contig_alloc(sc->sc_dmat,
3043 1.75.2.2 snj &sc->fw_paging_db[blk_idx].fw_paging_block,
3044 1.75.2.2 snj IWM_PAGING_BLOCK_SIZE, 4096);
3045 1.75.2.2 snj if (error) {
3046 1.75.2.2 snj /* free all the previous pages since we failed */
3047 1.75.2.2 snj iwm_free_fw_paging(sc);
3048 1.75.2.2 snj return ENOMEM;
3049 1.75.2.2 snj }
3050 1.75.2.2 snj
3051 1.75.2.2 snj sc->fw_paging_db[blk_idx].fw_paging_size =
3052 1.75.2.2 snj IWM_PAGING_BLOCK_SIZE;
3053 1.75.2.2 snj
3054 1.75.2.2 snj DPRINTF(("%s: Paging: allocated 32K bytes for firmware "
3055 1.75.2.2 snj "paging.\n", DEVNAME(sc)));
3056 1.75.2.2 snj }
3057 1.75.2.2 snj
3058 1.75.2.2 snj return 0;
3059 1.75.2.2 snj }
3060 1.75.2.2 snj
3061 1.75.2.2 snj static int
3062 1.75.2.2 snj iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3063 1.75.2.2 snj {
3064 1.75.2.2 snj int err;
3065 1.75.2.2 snj
3066 1.75.2.2 snj err = iwm_alloc_fw_paging_mem(sc, fws);
3067 1.75.2.2 snj if (err)
3068 1.75.2.2 snj return err;
3069 1.75.2.2 snj
3070 1.75.2.2 snj return iwm_fill_paging_mem(sc, fws);
3071 1.75.2.2 snj }
3072 1.75.2.2 snj
3073 1.75.2.2 snj static bool
3074 1.75.2.2 snj iwm_has_new_tx_api(struct iwm_softc *sc)
3075 1.75.2.2 snj {
3076 1.75.2.2 snj /* XXX */
3077 1.75.2.2 snj return false;
3078 1.75.2.2 snj }
3079 1.75.2.2 snj
3080 1.75.2.2 snj /* send paging cmd to FW in case CPU2 has paging image */
3081 1.75.2.2 snj static int
3082 1.75.2.2 snj iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_sects *fws)
3083 1.75.2.2 snj {
3084 1.75.2.2 snj struct iwm_fw_paging_cmd fw_paging_cmd = {
3085 1.75.2.2 snj .flags = htole32(IWM_PAGING_CMD_IS_SECURED |
3086 1.75.2.2 snj IWM_PAGING_CMD_IS_ENABLED |
3087 1.75.2.2 snj (sc->num_of_pages_in_last_blk <<
3088 1.75.2.2 snj IWM_PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS)),
3089 1.75.2.2 snj .block_size = htole32(IWM_BLOCK_2_EXP_SIZE),
3090 1.75.2.2 snj .block_num = htole32(sc->num_of_paging_blk),
3091 1.75.2.2 snj };
3092 1.75.2.2 snj size_t size = sizeof(fw_paging_cmd);
3093 1.75.2.2 snj int blk_idx;
3094 1.75.2.2 snj bus_dmamap_t dmap;
3095 1.75.2.2 snj
3096 1.75.2.2 snj if (!iwm_has_new_tx_api(sc))
3097 1.75.2.2 snj size -= (sizeof(uint64_t) - sizeof(uint32_t)) *
3098 1.75.2.2 snj IWM_NUM_OF_FW_PAGING_BLOCKS;
3099 1.75.2.2 snj
3100 1.75.2.2 snj /* loop for for all paging blocks + CSS block */
3101 1.75.2.2 snj for (blk_idx = 0; blk_idx < sc->num_of_paging_blk + 1; blk_idx++) {
3102 1.75.2.2 snj bus_addr_t dev_phy_addr =
3103 1.75.2.2 snj sc->fw_paging_db[blk_idx].fw_paging_block.paddr;
3104 1.75.2.2 snj if (iwm_has_new_tx_api(sc)) {
3105 1.75.2.2 snj fw_paging_cmd.device_phy_addr.addr64[blk_idx] =
3106 1.75.2.2 snj htole64(dev_phy_addr);
3107 1.75.2.2 snj } else {
3108 1.75.2.2 snj dev_phy_addr = dev_phy_addr >> IWM_PAGE_2_EXP_SIZE;
3109 1.75.2.2 snj fw_paging_cmd.device_phy_addr.addr32[blk_idx] =
3110 1.75.2.2 snj htole32(dev_phy_addr);
3111 1.75.2.2 snj }
3112 1.75.2.2 snj dmap = sc->fw_paging_db[blk_idx].fw_paging_block.map,
3113 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3114 1.75.2.2 snj BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
3115 1.75.2.2 snj }
3116 1.75.2.2 snj
3117 1.75.2.2 snj return iwm_send_cmd_pdu(sc,
3118 1.75.2.2 snj iwm_cmd_id(IWM_FW_PAGING_BLOCK_CMD, IWM_ALWAYS_LONG_GROUP, 0),
3119 1.75.2.2 snj 0, size, &fw_paging_cmd);
3120 1.75.2.2 snj }
3121 1.75.2.2 snj
3122 1.75.2.2 snj static void
3123 1.75.2.2 snj iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
3124 1.75.2.2 snj const uint16_t *mac_override, const uint16_t *nvm_hw)
3125 1.75.2.2 snj {
3126 1.75.2.2 snj static const uint8_t reserved_mac[ETHER_ADDR_LEN] = {
3127 1.75.2.2 snj 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
3128 1.75.2.2 snj };
3129 1.75.2.2 snj static const u_int8_t etheranyaddr[ETHER_ADDR_LEN] = {
3130 1.75.2.2 snj 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
3131 1.75.2.2 snj };
3132 1.75.2.2 snj const uint8_t *hw_addr;
3133 1.75.2.2 snj
3134 1.75.2.2 snj if (mac_override) {
3135 1.75.2.2 snj hw_addr = (const uint8_t *)(mac_override +
3136 1.75.2.2 snj IWM_MAC_ADDRESS_OVERRIDE_8000);
3137 1.75.2.2 snj
3138 1.75.2.2 snj /*
3139 1.75.2.2 snj * Store the MAC address from MAO section.
3140 1.75.2.2 snj * No byte swapping is required in MAO section
3141 1.75.2.2 snj */
3142 1.75.2.2 snj memcpy(data->hw_addr, hw_addr, ETHER_ADDR_LEN);
3143 1.75.2.2 snj
3144 1.75.2.2 snj /*
3145 1.75.2.2 snj * Force the use of the OTP MAC address in case of reserved MAC
3146 1.75.2.2 snj * address in the NVM, or if address is given but invalid.
3147 1.75.2.2 snj */
3148 1.75.2.2 snj if (memcmp(reserved_mac, hw_addr, ETHER_ADDR_LEN) != 0 &&
3149 1.75.2.2 snj (memcmp(etherbroadcastaddr, data->hw_addr,
3150 1.75.2.2 snj sizeof(etherbroadcastaddr)) != 0) &&
3151 1.75.2.2 snj (memcmp(etheranyaddr, data->hw_addr,
3152 1.75.2.2 snj sizeof(etheranyaddr)) != 0) &&
3153 1.75.2.2 snj !ETHER_IS_MULTICAST(data->hw_addr))
3154 1.75.2.2 snj return;
3155 1.75.2.2 snj }
3156 1.75.2.2 snj
3157 1.75.2.2 snj if (nvm_hw) {
3158 1.75.2.2 snj /* Read the mac address from WFMP registers. */
3159 1.75.2.2 snj uint32_t mac_addr0 =
3160 1.75.2.2 snj htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
3161 1.75.2.2 snj uint32_t mac_addr1 =
3162 1.75.2.2 snj htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
3163 1.75.2.2 snj
3164 1.75.2.2 snj hw_addr = (const uint8_t *)&mac_addr0;
3165 1.75.2.2 snj data->hw_addr[0] = hw_addr[3];
3166 1.75.2.2 snj data->hw_addr[1] = hw_addr[2];
3167 1.75.2.2 snj data->hw_addr[2] = hw_addr[1];
3168 1.75.2.2 snj data->hw_addr[3] = hw_addr[0];
3169 1.75.2.2 snj
3170 1.75.2.2 snj hw_addr = (const uint8_t *)&mac_addr1;
3171 1.75.2.2 snj data->hw_addr[4] = hw_addr[1];
3172 1.75.2.2 snj data->hw_addr[5] = hw_addr[0];
3173 1.75.2.2 snj
3174 1.75.2.2 snj return;
3175 1.75.2.2 snj }
3176 1.75.2.2 snj
3177 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "mac address not found\n");
3178 1.75.2.2 snj memset(data->hw_addr, 0, sizeof(data->hw_addr));
3179 1.75.2.2 snj }
3180 1.75.2.2 snj
3181 1.75.2.2 snj static int
3182 1.75.2.2 snj iwm_parse_nvm_data(struct iwm_softc *sc, const uint16_t *nvm_hw,
3183 1.75.2.2 snj const uint16_t *nvm_sw, const uint16_t *nvm_calib,
3184 1.75.2.2 snj const uint16_t *mac_override, const uint16_t *phy_sku,
3185 1.75.2.2 snj const uint16_t *regulatory)
3186 1.75.2.2 snj {
3187 1.75.2.2 snj struct iwm_nvm_data *data = &sc->sc_nvm;
3188 1.75.2.2 snj uint8_t hw_addr[ETHER_ADDR_LEN];
3189 1.75.2.2 snj uint32_t sku;
3190 1.75.2.2 snj
3191 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3192 1.75.2.2 snj uint16_t radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
3193 1.75.2.2 snj data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
3194 1.75.2.2 snj data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
3195 1.75.2.2 snj data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
3196 1.75.2.2 snj data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
3197 1.75.2.2 snj
3198 1.75.2.2 snj data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
3199 1.75.2.2 snj sku = le16_to_cpup(nvm_sw + IWM_SKU);
3200 1.75.2.2 snj } else {
3201 1.75.2.2 snj uint32_t radio_cfg = le32_to_cpup(phy_sku + IWM_RADIO_CFG_8000);
3202 1.75.2.2 snj data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
3203 1.75.2.2 snj data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
3204 1.75.2.2 snj data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
3205 1.75.2.2 snj data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK_8000(radio_cfg);
3206 1.75.2.2 snj data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
3207 1.75.2.2 snj data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
3208 1.75.2.2 snj
3209 1.75.2.2 snj data->nvm_version = le32_to_cpup(nvm_sw + IWM_NVM_VERSION_8000);
3210 1.75.2.2 snj sku = le32_to_cpup(phy_sku + IWM_SKU_8000);
3211 1.75.2.2 snj }
3212 1.75.2.2 snj
3213 1.75.2.2 snj data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
3214 1.75.2.2 snj data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
3215 1.75.2.2 snj data->sku_cap_11n_enable = sku & IWM_NVM_SKU_CAP_11N_ENABLE;
3216 1.75.2.2 snj data->sku_cap_mimo_disable = sku & IWM_NVM_SKU_CAP_MIMO_DISABLE;
3217 1.75.2.2 snj
3218 1.75.2.2 snj data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
3219 1.75.2.2 snj
3220 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3221 1.75.2.2 snj memcpy(hw_addr, nvm_hw + IWM_HW_ADDR, ETHER_ADDR_LEN);
3222 1.75.2.2 snj data->hw_addr[0] = hw_addr[1];
3223 1.75.2.2 snj data->hw_addr[1] = hw_addr[0];
3224 1.75.2.2 snj data->hw_addr[2] = hw_addr[3];
3225 1.75.2.2 snj data->hw_addr[3] = hw_addr[2];
3226 1.75.2.2 snj data->hw_addr[4] = hw_addr[5];
3227 1.75.2.2 snj data->hw_addr[5] = hw_addr[4];
3228 1.75.2.2 snj } else
3229 1.75.2.2 snj iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
3230 1.75.2.2 snj
3231 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3232 1.75.2.2 snj uint16_t lar_offset, lar_config;
3233 1.75.2.2 snj lar_offset = data->nvm_version < 0xE39 ?
3234 1.75.2.2 snj IWM_NVM_LAR_OFFSET_8000_OLD : IWM_NVM_LAR_OFFSET_8000;
3235 1.75.2.2 snj lar_config = le16_to_cpup(regulatory + lar_offset);
3236 1.75.2.2 snj data->lar_enabled = !!(lar_config & IWM_NVM_LAR_ENABLED_8000);
3237 1.75.2.2 snj }
3238 1.75.2.2 snj
3239 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
3240 1.75.2.2 snj iwm_init_channel_map(sc, &nvm_sw[IWM_NVM_CHANNELS],
3241 1.75.2.2 snj iwm_nvm_channels, __arraycount(iwm_nvm_channels));
3242 1.75.2.2 snj else
3243 1.75.2.2 snj iwm_init_channel_map(sc, ®ulatory[IWM_NVM_CHANNELS_8000],
3244 1.75.2.2 snj iwm_nvm_channels_8000, __arraycount(iwm_nvm_channels_8000));
3245 1.75.2.2 snj
3246 1.75.2.2 snj data->calib_version = 255; /* TODO:
3247 1.75.2.2 snj this value will prevent some checks from
3248 1.75.2.2 snj failing, we need to check if this
3249 1.75.2.2 snj field is still needed, and if it does,
3250 1.75.2.2 snj where is it in the NVM */
3251 1.75.2.2 snj
3252 1.75.2.2 snj return 0;
3253 1.75.2.2 snj }
3254 1.75.2.2 snj
3255 1.75.2.2 snj static int
3256 1.75.2.2 snj iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
3257 1.75.2.2 snj {
3258 1.75.2.2 snj const uint16_t *hw, *sw, *calib, *mac_override = NULL, *phy_sku = NULL;
3259 1.75.2.2 snj const uint16_t *regulatory = NULL;
3260 1.75.2.2 snj
3261 1.75.2.2 snj /* Checking for required sections */
3262 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
3263 1.75.2.2 snj if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3264 1.75.2.2 snj !sections[IWM_NVM_SECTION_TYPE_HW].data) {
3265 1.75.2.2 snj return ENOENT;
3266 1.75.2.2 snj }
3267 1.75.2.2 snj
3268 1.75.2.2 snj hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
3269 1.75.2.2 snj } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3270 1.75.2.2 snj /* SW and REGULATORY sections are mandatory */
3271 1.75.2.2 snj if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
3272 1.75.2.2 snj !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
3273 1.75.2.2 snj return ENOENT;
3274 1.75.2.2 snj }
3275 1.75.2.2 snj /* MAC_OVERRIDE or at least HW section must exist */
3276 1.75.2.2 snj if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
3277 1.75.2.2 snj !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
3278 1.75.2.2 snj return ENOENT;
3279 1.75.2.2 snj }
3280 1.75.2.2 snj
3281 1.75.2.2 snj /* PHY_SKU section is mandatory in B0 */
3282 1.75.2.2 snj if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
3283 1.75.2.2 snj return ENOENT;
3284 1.75.2.2 snj }
3285 1.75.2.2 snj
3286 1.75.2.2 snj regulatory = (const uint16_t *)
3287 1.75.2.2 snj sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
3288 1.75.2.2 snj hw = (const uint16_t *)
3289 1.75.2.2 snj sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
3290 1.75.2.2 snj mac_override =
3291 1.75.2.2 snj (const uint16_t *)
3292 1.75.2.2 snj sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
3293 1.75.2.2 snj phy_sku = (const uint16_t *)
3294 1.75.2.2 snj sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
3295 1.75.2.2 snj } else {
3296 1.75.2.2 snj panic("unknown device family %d\n", sc->sc_device_family);
3297 1.75.2.2 snj }
3298 1.75.2.2 snj
3299 1.75.2.2 snj sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
3300 1.75.2.2 snj calib = (const uint16_t *)
3301 1.75.2.2 snj sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
3302 1.75.2.2 snj
3303 1.75.2.2 snj return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
3304 1.75.2.2 snj phy_sku, regulatory);
3305 1.75.2.2 snj }
3306 1.75.2.2 snj
3307 1.75.2.2 snj static int
3308 1.75.2.2 snj iwm_nvm_init(struct iwm_softc *sc)
3309 1.75.2.2 snj {
3310 1.75.2.2 snj struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
3311 1.75.2.2 snj int i, section, err;
3312 1.75.2.2 snj uint16_t len;
3313 1.75.2.2 snj uint8_t *buf;
3314 1.75.2.2 snj const size_t bufsz = (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) ?
3315 1.75.2.2 snj IWM_MAX_NVM_SECTION_SIZE_8000 : IWM_MAX_NVM_SECTION_SIZE_7000;
3316 1.75.2.2 snj
3317 1.75.2.2 snj /* Read From FW NVM */
3318 1.75.2.2 snj DPRINTF(("Read NVM\n"));
3319 1.75.2.2 snj
3320 1.75.2.2 snj memset(nvm_sections, 0, sizeof(nvm_sections));
3321 1.75.2.2 snj
3322 1.75.2.2 snj buf = kmem_alloc(bufsz, KM_SLEEP);
3323 1.75.2.2 snj if (buf == NULL)
3324 1.75.2.2 snj return ENOMEM;
3325 1.75.2.2 snj
3326 1.75.2.2 snj for (i = 0; i < __arraycount(iwm_nvm_to_read); i++) {
3327 1.75.2.2 snj section = iwm_nvm_to_read[i];
3328 1.75.2.2 snj KASSERT(section <= IWM_NVM_NUM_OF_SECTIONS);
3329 1.75.2.2 snj
3330 1.75.2.2 snj err = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
3331 1.75.2.2 snj if (err) {
3332 1.75.2.2 snj err = 0;
3333 1.75.2.2 snj continue;
3334 1.75.2.2 snj }
3335 1.75.2.2 snj nvm_sections[section].data = kmem_alloc(len, KM_SLEEP);
3336 1.75.2.2 snj if (nvm_sections[section].data == NULL) {
3337 1.75.2.2 snj err = ENOMEM;
3338 1.75.2.2 snj break;
3339 1.75.2.2 snj }
3340 1.75.2.2 snj memcpy(nvm_sections[section].data, buf, len);
3341 1.75.2.2 snj nvm_sections[section].length = len;
3342 1.75.2.2 snj }
3343 1.75.2.2 snj kmem_free(buf, bufsz);
3344 1.75.2.2 snj if (err == 0)
3345 1.75.2.2 snj err = iwm_parse_nvm_sections(sc, nvm_sections);
3346 1.75.2.2 snj
3347 1.75.2.2 snj for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
3348 1.75.2.2 snj if (nvm_sections[i].data != NULL)
3349 1.75.2.2 snj kmem_free(nvm_sections[i].data, nvm_sections[i].length);
3350 1.75.2.2 snj }
3351 1.75.2.2 snj
3352 1.75.2.2 snj return err;
3353 1.75.2.2 snj }
3354 1.75.2.2 snj
3355 1.75.2.2 snj static int
3356 1.75.2.2 snj iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
3357 1.75.2.2 snj const uint8_t *section, uint32_t byte_cnt)
3358 1.75.2.2 snj {
3359 1.75.2.2 snj int err = EINVAL;
3360 1.75.2.2 snj uint32_t chunk_sz, offset;
3361 1.75.2.2 snj
3362 1.75.2.2 snj chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
3363 1.75.2.2 snj
3364 1.75.2.2 snj for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
3365 1.75.2.2 snj uint32_t addr, len;
3366 1.75.2.2 snj const uint8_t *data;
3367 1.75.2.2 snj bool is_extended = false;
3368 1.75.2.2 snj
3369 1.75.2.2 snj addr = dst_addr + offset;
3370 1.75.2.2 snj len = MIN(chunk_sz, byte_cnt - offset);
3371 1.75.2.2 snj data = section + offset;
3372 1.75.2.2 snj
3373 1.75.2.2 snj if (addr >= IWM_FW_MEM_EXTENDED_START &&
3374 1.75.2.2 snj addr <= IWM_FW_MEM_EXTENDED_END)
3375 1.75.2.2 snj is_extended = true;
3376 1.75.2.2 snj
3377 1.75.2.2 snj if (is_extended)
3378 1.75.2.2 snj iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
3379 1.75.2.2 snj IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3380 1.75.2.2 snj
3381 1.75.2.2 snj err = iwm_firmware_load_chunk(sc, addr, data, len);
3382 1.75.2.2 snj
3383 1.75.2.2 snj if (is_extended)
3384 1.75.2.2 snj iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
3385 1.75.2.2 snj IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
3386 1.75.2.2 snj
3387 1.75.2.2 snj if (err)
3388 1.75.2.2 snj break;
3389 1.75.2.2 snj }
3390 1.75.2.2 snj
3391 1.75.2.2 snj return err;
3392 1.75.2.2 snj }
3393 1.75.2.2 snj
3394 1.75.2.2 snj static int
3395 1.75.2.2 snj iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
3396 1.75.2.2 snj const uint8_t *section, uint32_t byte_cnt)
3397 1.75.2.2 snj {
3398 1.75.2.2 snj struct iwm_dma_info *dma = &sc->fw_dma;
3399 1.75.2.2 snj int err;
3400 1.75.2.2 snj
3401 1.75.2.2 snj /* Copy firmware chunk into pre-allocated DMA-safe memory. */
3402 1.75.2.2 snj memcpy(dma->vaddr, section, byte_cnt);
3403 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, dma->map, 0, byte_cnt,
3404 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
3405 1.75.2.2 snj
3406 1.75.2.2 snj sc->sc_fw_chunk_done = 0;
3407 1.75.2.2 snj
3408 1.75.2.2 snj if (!iwm_nic_lock(sc))
3409 1.75.2.2 snj return EBUSY;
3410 1.75.2.2 snj
3411 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3412 1.75.2.2 snj IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
3413 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
3414 1.75.2.2 snj dst_addr);
3415 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_TFDIB_CTRL0_REG(IWM_FH_SRVC_CHNL),
3416 1.75.2.2 snj dma->paddr & IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
3417 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_TFDIB_CTRL1_REG(IWM_FH_SRVC_CHNL),
3418 1.75.2.2 snj (iwm_get_dma_hi_addr(dma->paddr)
3419 1.75.2.2 snj << IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
3420 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(IWM_FH_SRVC_CHNL),
3421 1.75.2.2 snj 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
3422 1.75.2.2 snj 1 << IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
3423 1.75.2.2 snj IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
3424 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
3425 1.75.2.2 snj IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
3426 1.75.2.2 snj IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
3427 1.75.2.2 snj IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
3428 1.75.2.2 snj
3429 1.75.2.2 snj iwm_nic_unlock(sc);
3430 1.75.2.2 snj
3431 1.75.2.2 snj /* Wait for this segment to load. */
3432 1.75.2.2 snj err = 0;
3433 1.75.2.2 snj while (!sc->sc_fw_chunk_done) {
3434 1.75.2.2 snj err = tsleep(&sc->sc_fw, 0, "iwmfw", mstohz(5000));
3435 1.75.2.2 snj if (err)
3436 1.75.2.2 snj break;
3437 1.75.2.2 snj }
3438 1.75.2.2 snj if (!sc->sc_fw_chunk_done) {
3439 1.75.2.2 snj DPRINTF(("%s: fw chunk addr 0x%x len %d failed to load\n",
3440 1.75.2.2 snj DEVNAME(sc), dst_addr, byte_cnt));
3441 1.75.2.2 snj }
3442 1.75.2.2 snj
3443 1.75.2.2 snj return err;
3444 1.75.2.2 snj }
3445 1.75.2.2 snj
3446 1.75.2.2 snj static int
3447 1.75.2.2 snj iwm_load_cpu_sections_7000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3448 1.75.2.2 snj int cpu, int *first_ucode_section)
3449 1.75.2.2 snj {
3450 1.75.2.2 snj int i, err = 0;
3451 1.75.2.2 snj uint32_t last_read_idx = 0;
3452 1.75.2.2 snj void *data;
3453 1.75.2.2 snj uint32_t dlen;
3454 1.75.2.2 snj uint32_t offset;
3455 1.75.2.2 snj
3456 1.75.2.2 snj if (cpu == 1) {
3457 1.75.2.2 snj *first_ucode_section = 0;
3458 1.75.2.2 snj } else {
3459 1.75.2.2 snj (*first_ucode_section)++;
3460 1.75.2.2 snj }
3461 1.75.2.2 snj
3462 1.75.2.2 snj for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3463 1.75.2.2 snj last_read_idx = i;
3464 1.75.2.2 snj data = fws->fw_sect[i].fws_data;
3465 1.75.2.2 snj dlen = fws->fw_sect[i].fws_len;
3466 1.75.2.2 snj offset = fws->fw_sect[i].fws_devoff;
3467 1.75.2.2 snj
3468 1.75.2.2 snj /*
3469 1.75.2.2 snj * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3470 1.75.2.2 snj * CPU1 to CPU2.
3471 1.75.2.2 snj * PAGING_SEPARATOR_SECTION delimiter - separate between
3472 1.75.2.2 snj * CPU2 non paged to CPU2 paging sec.
3473 1.75.2.2 snj */
3474 1.75.2.2 snj if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3475 1.75.2.2 snj offset == IWM_PAGING_SEPARATOR_SECTION)
3476 1.75.2.2 snj break;
3477 1.75.2.2 snj
3478 1.75.2.2 snj if (dlen > sc->sc_fwdmasegsz) {
3479 1.75.2.2 snj err = EFBIG;
3480 1.75.2.2 snj } else
3481 1.75.2.2 snj err = iwm_firmware_load_sect(sc, offset, data, dlen);
3482 1.75.2.2 snj if (err) {
3483 1.75.2.2 snj DPRINTF(("%s: could not load firmware chunk %d "
3484 1.75.2.2 snj "(error %d)\n", DEVNAME(sc), i, err));
3485 1.75.2.2 snj return err;
3486 1.75.2.2 snj }
3487 1.75.2.2 snj }
3488 1.75.2.2 snj
3489 1.75.2.2 snj *first_ucode_section = last_read_idx;
3490 1.75.2.2 snj
3491 1.75.2.2 snj return 0;
3492 1.75.2.2 snj }
3493 1.75.2.2 snj
3494 1.75.2.2 snj static int
3495 1.75.2.2 snj iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3496 1.75.2.2 snj {
3497 1.75.2.2 snj struct iwm_fw_sects *fws;
3498 1.75.2.2 snj int err = 0;
3499 1.75.2.2 snj int first_ucode_section;
3500 1.75.2.2 snj
3501 1.75.2.2 snj fws = &sc->sc_fw.fw_sects[ucode_type];
3502 1.75.2.2 snj
3503 1.75.2.2 snj DPRINTF(("%s: working with %s CPU\n", DEVNAME(sc),
3504 1.75.2.2 snj fws->is_dual_cpus ? "dual" : "single"));
3505 1.75.2.2 snj
3506 1.75.2.2 snj /* load to FW the binary Secured sections of CPU1 */
3507 1.75.2.2 snj err = iwm_load_cpu_sections_7000(sc, fws, 1, &first_ucode_section);
3508 1.75.2.2 snj if (err)
3509 1.75.2.2 snj return err;
3510 1.75.2.2 snj
3511 1.75.2.2 snj if (fws->is_dual_cpus) {
3512 1.75.2.2 snj /* set CPU2 header address */
3513 1.75.2.2 snj if (iwm_nic_lock(sc)) {
3514 1.75.2.2 snj iwm_write_prph(sc,
3515 1.75.2.2 snj IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
3516 1.75.2.2 snj IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE);
3517 1.75.2.2 snj iwm_nic_unlock(sc);
3518 1.75.2.2 snj }
3519 1.75.2.2 snj
3520 1.75.2.2 snj /* load to FW the binary sections of CPU2 */
3521 1.75.2.2 snj err = iwm_load_cpu_sections_7000(sc, fws, 2,
3522 1.75.2.2 snj &first_ucode_section);
3523 1.75.2.2 snj if (err)
3524 1.75.2.2 snj return err;
3525 1.75.2.2 snj }
3526 1.75.2.2 snj
3527 1.75.2.2 snj /* release CPU reset */
3528 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_RESET, 0);
3529 1.75.2.2 snj
3530 1.75.2.2 snj return 0;
3531 1.75.2.2 snj }
3532 1.75.2.2 snj
3533 1.75.2.2 snj static int
3534 1.75.2.2 snj iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
3535 1.75.2.2 snj int cpu, int *first_ucode_section)
3536 1.75.2.2 snj {
3537 1.75.2.2 snj int shift_param;
3538 1.75.2.2 snj int i, err = 0, sec_num = 0x1;
3539 1.75.2.2 snj uint32_t val, last_read_idx = 0;
3540 1.75.2.2 snj void *data;
3541 1.75.2.2 snj uint32_t dlen;
3542 1.75.2.2 snj uint32_t offset;
3543 1.75.2.2 snj
3544 1.75.2.2 snj if (cpu == 1) {
3545 1.75.2.2 snj shift_param = 0;
3546 1.75.2.2 snj *first_ucode_section = 0;
3547 1.75.2.2 snj } else {
3548 1.75.2.2 snj shift_param = 16;
3549 1.75.2.2 snj (*first_ucode_section)++;
3550 1.75.2.2 snj }
3551 1.75.2.2 snj
3552 1.75.2.2 snj for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
3553 1.75.2.2 snj last_read_idx = i;
3554 1.75.2.2 snj data = fws->fw_sect[i].fws_data;
3555 1.75.2.2 snj dlen = fws->fw_sect[i].fws_len;
3556 1.75.2.2 snj offset = fws->fw_sect[i].fws_devoff;
3557 1.75.2.2 snj
3558 1.75.2.2 snj /*
3559 1.75.2.2 snj * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
3560 1.75.2.2 snj * CPU1 to CPU2.
3561 1.75.2.2 snj * PAGING_SEPARATOR_SECTION delimiter - separate between
3562 1.75.2.2 snj * CPU2 non paged to CPU2 paging sec.
3563 1.75.2.2 snj */
3564 1.75.2.2 snj if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
3565 1.75.2.2 snj offset == IWM_PAGING_SEPARATOR_SECTION)
3566 1.75.2.2 snj break;
3567 1.75.2.2 snj
3568 1.75.2.2 snj if (dlen > sc->sc_fwdmasegsz) {
3569 1.75.2.2 snj err = EFBIG;
3570 1.75.2.2 snj } else
3571 1.75.2.2 snj err = iwm_firmware_load_sect(sc, offset, data, dlen);
3572 1.75.2.2 snj if (err) {
3573 1.75.2.2 snj DPRINTF(("%s: could not load firmware chunk %d "
3574 1.75.2.2 snj "(error %d)\n", DEVNAME(sc), i, err));
3575 1.75.2.2 snj return err;
3576 1.75.2.2 snj }
3577 1.75.2.2 snj
3578 1.75.2.2 snj /* Notify the ucode of the loaded section number and status */
3579 1.75.2.2 snj if (iwm_nic_lock(sc)) {
3580 1.75.2.2 snj val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
3581 1.75.2.2 snj val = val | (sec_num << shift_param);
3582 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
3583 1.75.2.2 snj sec_num = (sec_num << 1) | 0x1;
3584 1.75.2.2 snj iwm_nic_unlock(sc);
3585 1.75.2.2 snj
3586 1.75.2.2 snj /*
3587 1.75.2.2 snj * The firmware won't load correctly without this delay.
3588 1.75.2.2 snj */
3589 1.75.2.2 snj DELAY(8000);
3590 1.75.2.2 snj }
3591 1.75.2.2 snj }
3592 1.75.2.2 snj
3593 1.75.2.2 snj *first_ucode_section = last_read_idx;
3594 1.75.2.2 snj
3595 1.75.2.2 snj if (iwm_nic_lock(sc)) {
3596 1.75.2.2 snj if (cpu == 1)
3597 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
3598 1.75.2.2 snj else
3599 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
3600 1.75.2.2 snj iwm_nic_unlock(sc);
3601 1.75.2.2 snj }
3602 1.75.2.2 snj
3603 1.75.2.2 snj return 0;
3604 1.75.2.2 snj }
3605 1.75.2.2 snj
3606 1.75.2.2 snj static int
3607 1.75.2.2 snj iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3608 1.75.2.2 snj {
3609 1.75.2.2 snj struct iwm_fw_sects *fws;
3610 1.75.2.2 snj int err = 0;
3611 1.75.2.2 snj int first_ucode_section;
3612 1.75.2.2 snj
3613 1.75.2.2 snj fws = &sc->sc_fw.fw_sects[ucode_type];
3614 1.75.2.2 snj
3615 1.75.2.2 snj /* configure the ucode to be ready to get the secured image */
3616 1.75.2.2 snj /* release CPU reset */
3617 1.75.2.2 snj if (iwm_nic_lock(sc)) {
3618 1.75.2.2 snj iwm_write_prph(sc, IWM_RELEASE_CPU_RESET,
3619 1.75.2.2 snj IWM_RELEASE_CPU_RESET_BIT);
3620 1.75.2.2 snj iwm_nic_unlock(sc);
3621 1.75.2.2 snj }
3622 1.75.2.2 snj
3623 1.75.2.2 snj /* load to FW the binary Secured sections of CPU1 */
3624 1.75.2.2 snj err = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
3625 1.75.2.2 snj if (err)
3626 1.75.2.2 snj return err;
3627 1.75.2.2 snj
3628 1.75.2.2 snj /* load to FW the binary sections of CPU2 */
3629 1.75.2.2 snj return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
3630 1.75.2.2 snj }
3631 1.75.2.2 snj
3632 1.75.2.2 snj static int
3633 1.75.2.2 snj iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3634 1.75.2.2 snj {
3635 1.75.2.2 snj int err, w;
3636 1.75.2.2 snj
3637 1.75.2.2 snj sc->sc_uc.uc_intr = 0;
3638 1.75.2.2 snj
3639 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
3640 1.75.2.2 snj err = iwm_load_firmware_8000(sc, ucode_type);
3641 1.75.2.2 snj else
3642 1.75.2.2 snj err = iwm_load_firmware_7000(sc, ucode_type);
3643 1.75.2.2 snj if (err)
3644 1.75.2.2 snj return err;
3645 1.75.2.2 snj
3646 1.75.2.2 snj /* wait for the firmware to load */
3647 1.75.2.2 snj for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++)
3648 1.75.2.2 snj err = tsleep(&sc->sc_uc, 0, "iwmuc", mstohz(100));
3649 1.75.2.2 snj if (err || !sc->sc_uc.uc_ok) {
3650 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
3651 1.75.2.2 snj "could not load firmware (error %d, ok %d)\n",
3652 1.75.2.2 snj err, sc->sc_uc.uc_ok);
3653 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
3654 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "cpu1 status: 0x%x\n",
3655 1.75.2.2 snj iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
3656 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "cpu2 status: 0x%x\n",
3657 1.75.2.2 snj iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
3658 1.75.2.2 snj }
3659 1.75.2.2 snj }
3660 1.75.2.2 snj
3661 1.75.2.2 snj return err;
3662 1.75.2.2 snj }
3663 1.75.2.2 snj
3664 1.75.2.2 snj static int
3665 1.75.2.2 snj iwm_start_fw(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3666 1.75.2.2 snj {
3667 1.75.2.2 snj int err;
3668 1.75.2.2 snj
3669 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT, ~0);
3670 1.75.2.2 snj
3671 1.75.2.2 snj err = iwm_nic_init(sc);
3672 1.75.2.2 snj if (err) {
3673 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "Unable to init nic\n");
3674 1.75.2.2 snj return err;
3675 1.75.2.2 snj }
3676 1.75.2.2 snj
3677 1.75.2.2 snj /* make sure rfkill handshake bits are cleared */
3678 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3679 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR,
3680 1.75.2.2 snj IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
3681 1.75.2.2 snj
3682 1.75.2.2 snj /* clear (again), then enable host interrupts */
3683 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT, ~0);
3684 1.75.2.2 snj iwm_enable_interrupts(sc);
3685 1.75.2.2 snj
3686 1.75.2.2 snj /* really make sure rfkill handshake bits are cleared */
3687 1.75.2.2 snj /* maybe we should write a few times more? just to make sure */
3688 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3689 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_UCODE_DRV_GP1_CLR, IWM_CSR_UCODE_SW_BIT_RFKILL);
3690 1.75.2.2 snj
3691 1.75.2.2 snj return iwm_load_firmware(sc, ucode_type);
3692 1.75.2.2 snj }
3693 1.75.2.2 snj
3694 1.75.2.2 snj static int
3695 1.75.2.2 snj iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
3696 1.75.2.2 snj {
3697 1.75.2.2 snj struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
3698 1.75.2.2 snj .valid = htole32(valid_tx_ant),
3699 1.75.2.2 snj };
3700 1.75.2.2 snj
3701 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_TX_ANT_CONFIGURATION_CMD, 0,
3702 1.75.2.2 snj sizeof(tx_ant_cmd), &tx_ant_cmd);
3703 1.75.2.2 snj }
3704 1.75.2.2 snj
3705 1.75.2.2 snj static int
3706 1.75.2.2 snj iwm_send_phy_cfg_cmd(struct iwm_softc *sc)
3707 1.75.2.2 snj {
3708 1.75.2.2 snj struct iwm_phy_cfg_cmd phy_cfg_cmd;
3709 1.75.2.2 snj enum iwm_ucode_type ucode_type = sc->sc_uc_current;
3710 1.75.2.2 snj
3711 1.75.2.2 snj phy_cfg_cmd.phy_cfg = htole32(sc->sc_fw_phy_config);
3712 1.75.2.2 snj phy_cfg_cmd.calib_control.event_trigger =
3713 1.75.2.2 snj sc->sc_default_calib[ucode_type].event_trigger;
3714 1.75.2.2 snj phy_cfg_cmd.calib_control.flow_trigger =
3715 1.75.2.2 snj sc->sc_default_calib[ucode_type].flow_trigger;
3716 1.75.2.2 snj
3717 1.75.2.2 snj DPRINTFN(10, ("Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg));
3718 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_PHY_CONFIGURATION_CMD, 0,
3719 1.75.2.2 snj sizeof(phy_cfg_cmd), &phy_cfg_cmd);
3720 1.75.2.2 snj }
3721 1.75.2.2 snj
3722 1.75.2.2 snj static int
3723 1.75.2.2 snj iwm_load_ucode_wait_alive(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
3724 1.75.2.2 snj {
3725 1.75.2.2 snj struct iwm_fw_sects *fws;
3726 1.75.2.2 snj enum iwm_ucode_type old_type = sc->sc_uc_current;
3727 1.75.2.2 snj int err;
3728 1.75.2.2 snj
3729 1.75.2.2 snj err = iwm_read_firmware(sc, ucode_type);
3730 1.75.2.2 snj if (err)
3731 1.75.2.2 snj return err;
3732 1.75.2.2 snj
3733 1.75.2.2 snj sc->sc_uc_current = ucode_type;
3734 1.75.2.2 snj err = iwm_start_fw(sc, ucode_type);
3735 1.75.2.2 snj if (err) {
3736 1.75.2.2 snj sc->sc_uc_current = old_type;
3737 1.75.2.2 snj return err;
3738 1.75.2.2 snj }
3739 1.75.2.2 snj
3740 1.75.2.2 snj err = iwm_post_alive(sc);
3741 1.75.2.2 snj if (err)
3742 1.75.2.2 snj return err;
3743 1.75.2.2 snj
3744 1.75.2.2 snj fws = &sc->sc_fw.fw_sects[ucode_type];
3745 1.75.2.2 snj if (fws->paging_mem_size) {
3746 1.75.2.2 snj err = iwm_save_fw_paging(sc, fws);
3747 1.75.2.2 snj if (err)
3748 1.75.2.2 snj return err;
3749 1.75.2.2 snj
3750 1.75.2.2 snj err = iwm_send_paging_cmd(sc, fws);
3751 1.75.2.2 snj if (err) {
3752 1.75.2.2 snj iwm_free_fw_paging(sc);
3753 1.75.2.2 snj return err;
3754 1.75.2.2 snj }
3755 1.75.2.2 snj }
3756 1.75.2.2 snj
3757 1.75.2.2 snj return 0;
3758 1.75.2.2 snj }
3759 1.75.2.2 snj
3760 1.75.2.2 snj static int
3761 1.75.2.2 snj iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
3762 1.75.2.2 snj {
3763 1.75.2.2 snj int err;
3764 1.75.2.2 snj
3765 1.75.2.2 snj if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
3766 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
3767 1.75.2.2 snj "radio is disabled by hardware switch\n");
3768 1.75.2.2 snj return EPERM;
3769 1.75.2.2 snj }
3770 1.75.2.2 snj
3771 1.75.2.2 snj sc->sc_init_complete = 0;
3772 1.75.2.2 snj err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_INIT);
3773 1.75.2.2 snj if (err) {
3774 1.75.2.2 snj DPRINTF(("%s: failed to load init firmware\n", DEVNAME(sc)));
3775 1.75.2.2 snj return err;
3776 1.75.2.2 snj }
3777 1.75.2.2 snj
3778 1.75.2.2 snj if (justnvm) {
3779 1.75.2.2 snj err = iwm_nvm_init(sc);
3780 1.75.2.2 snj if (err) {
3781 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "failed to read nvm\n");
3782 1.75.2.2 snj return err;
3783 1.75.2.2 snj }
3784 1.75.2.2 snj
3785 1.75.2.2 snj memcpy(&sc->sc_ic.ic_myaddr, &sc->sc_nvm.hw_addr,
3786 1.75.2.2 snj ETHER_ADDR_LEN);
3787 1.75.2.2 snj return 0;
3788 1.75.2.2 snj }
3789 1.75.2.2 snj
3790 1.75.2.2 snj err = iwm_send_bt_init_conf(sc);
3791 1.75.2.2 snj if (err)
3792 1.75.2.2 snj return err;
3793 1.75.2.2 snj
3794 1.75.2.2 snj err = iwm_sf_config(sc, IWM_SF_INIT_OFF);
3795 1.75.2.2 snj if (err)
3796 1.75.2.2 snj return err;
3797 1.75.2.2 snj
3798 1.75.2.2 snj err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
3799 1.75.2.2 snj if (err)
3800 1.75.2.2 snj return err;
3801 1.75.2.2 snj
3802 1.75.2.2 snj /*
3803 1.75.2.2 snj * Send phy configurations command to init uCode
3804 1.75.2.2 snj * to start the 16.0 uCode init image internal calibrations.
3805 1.75.2.2 snj */
3806 1.75.2.2 snj err = iwm_send_phy_cfg_cmd(sc);
3807 1.75.2.2 snj if (err)
3808 1.75.2.2 snj return err;
3809 1.75.2.2 snj
3810 1.75.2.2 snj /*
3811 1.75.2.2 snj * Nothing to do but wait for the init complete notification
3812 1.75.2.2 snj * from the firmware
3813 1.75.2.2 snj */
3814 1.75.2.2 snj while (!sc->sc_init_complete) {
3815 1.75.2.2 snj err = tsleep(&sc->sc_init_complete, 0, "iwminit", mstohz(2000));
3816 1.75.2.2 snj if (err)
3817 1.75.2.2 snj break;
3818 1.75.2.2 snj }
3819 1.75.2.2 snj
3820 1.75.2.2 snj return err;
3821 1.75.2.2 snj }
3822 1.75.2.2 snj
3823 1.75.2.2 snj static int
3824 1.75.2.2 snj iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
3825 1.75.2.2 snj {
3826 1.75.2.2 snj struct iwm_rx_ring *ring = &sc->rxq;
3827 1.75.2.2 snj struct iwm_rx_data *data = &ring->data[idx];
3828 1.75.2.2 snj struct mbuf *m;
3829 1.75.2.2 snj int err;
3830 1.75.2.2 snj int fatal = 0;
3831 1.75.2.2 snj
3832 1.75.2.2 snj m = m_gethdr(M_DONTWAIT, MT_DATA);
3833 1.75.2.2 snj if (m == NULL)
3834 1.75.2.2 snj return ENOBUFS;
3835 1.75.2.2 snj
3836 1.75.2.2 snj if (size <= MCLBYTES) {
3837 1.75.2.2 snj MCLGET(m, M_DONTWAIT);
3838 1.75.2.2 snj } else {
3839 1.75.2.2 snj MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
3840 1.75.2.2 snj }
3841 1.75.2.2 snj if ((m->m_flags & M_EXT) == 0) {
3842 1.75.2.2 snj m_freem(m);
3843 1.75.2.2 snj return ENOBUFS;
3844 1.75.2.2 snj }
3845 1.75.2.2 snj
3846 1.75.2.2 snj if (data->m != NULL) {
3847 1.75.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->map);
3848 1.75.2.2 snj fatal = 1;
3849 1.75.2.2 snj }
3850 1.75.2.2 snj
3851 1.75.2.2 snj m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3852 1.75.2.2 snj err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
3853 1.75.2.2 snj BUS_DMA_READ|BUS_DMA_NOWAIT);
3854 1.75.2.2 snj if (err) {
3855 1.75.2.2 snj /* XXX */
3856 1.75.2.2 snj if (fatal)
3857 1.75.2.2 snj panic("iwm: could not load RX mbuf");
3858 1.75.2.2 snj m_freem(m);
3859 1.75.2.2 snj return err;
3860 1.75.2.2 snj }
3861 1.75.2.2 snj data->m = m;
3862 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0, size, BUS_DMASYNC_PREREAD);
3863 1.75.2.2 snj
3864 1.75.2.2 snj /* Update RX descriptor. */
3865 1.75.2.2 snj ring->desc[idx] = htole32(data->map->dm_segs[0].ds_addr >> 8);
3866 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3867 1.75.2.2 snj idx * sizeof(uint32_t), sizeof(uint32_t), BUS_DMASYNC_PREWRITE);
3868 1.75.2.2 snj
3869 1.75.2.2 snj return 0;
3870 1.75.2.2 snj }
3871 1.75.2.2 snj
3872 1.75.2.2 snj #define IWM_RSSI_OFFSET 50
3873 1.75.2.2 snj static int
3874 1.75.2.2 snj iwm_calc_rssi(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3875 1.75.2.2 snj {
3876 1.75.2.2 snj int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
3877 1.75.2.2 snj uint32_t agc_a, agc_b;
3878 1.75.2.2 snj uint32_t val;
3879 1.75.2.2 snj
3880 1.75.2.2 snj val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_AGC_IDX]);
3881 1.75.2.2 snj agc_a = (val & IWM_OFDM_AGC_A_MSK) >> IWM_OFDM_AGC_A_POS;
3882 1.75.2.2 snj agc_b = (val & IWM_OFDM_AGC_B_MSK) >> IWM_OFDM_AGC_B_POS;
3883 1.75.2.2 snj
3884 1.75.2.2 snj val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_RSSI_AB_IDX]);
3885 1.75.2.2 snj rssi_a = (val & IWM_OFDM_RSSI_INBAND_A_MSK) >> IWM_OFDM_RSSI_A_POS;
3886 1.75.2.2 snj rssi_b = (val & IWM_OFDM_RSSI_INBAND_B_MSK) >> IWM_OFDM_RSSI_B_POS;
3887 1.75.2.2 snj
3888 1.75.2.2 snj /*
3889 1.75.2.2 snj * dBm = rssi dB - agc dB - constant.
3890 1.75.2.2 snj * Higher AGC (higher radio gain) means lower signal.
3891 1.75.2.2 snj */
3892 1.75.2.2 snj rssi_a_dbm = rssi_a - IWM_RSSI_OFFSET - agc_a;
3893 1.75.2.2 snj rssi_b_dbm = rssi_b - IWM_RSSI_OFFSET - agc_b;
3894 1.75.2.2 snj max_rssi_dbm = MAX(rssi_a_dbm, rssi_b_dbm);
3895 1.75.2.2 snj
3896 1.75.2.2 snj DPRINTF(("Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
3897 1.75.2.2 snj rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b));
3898 1.75.2.2 snj
3899 1.75.2.2 snj return max_rssi_dbm;
3900 1.75.2.2 snj }
3901 1.75.2.2 snj
3902 1.75.2.2 snj /*
3903 1.75.2.2 snj * RSSI values are reported by the FW as positive values - need to negate
3904 1.75.2.2 snj * to obtain their dBM. Account for missing antennas by replacing 0
3905 1.75.2.2 snj * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3906 1.75.2.2 snj */
3907 1.75.2.2 snj static int
3908 1.75.2.2 snj iwm_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
3909 1.75.2.2 snj {
3910 1.75.2.2 snj int energy_a, energy_b, energy_c, max_energy;
3911 1.75.2.2 snj uint32_t val;
3912 1.75.2.2 snj
3913 1.75.2.2 snj val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3914 1.75.2.2 snj energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3915 1.75.2.2 snj IWM_RX_INFO_ENERGY_ANT_A_POS;
3916 1.75.2.2 snj energy_a = energy_a ? -energy_a : -256;
3917 1.75.2.2 snj energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3918 1.75.2.2 snj IWM_RX_INFO_ENERGY_ANT_B_POS;
3919 1.75.2.2 snj energy_b = energy_b ? -energy_b : -256;
3920 1.75.2.2 snj energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3921 1.75.2.2 snj IWM_RX_INFO_ENERGY_ANT_C_POS;
3922 1.75.2.2 snj energy_c = energy_c ? -energy_c : -256;
3923 1.75.2.2 snj max_energy = MAX(energy_a, energy_b);
3924 1.75.2.2 snj max_energy = MAX(max_energy, energy_c);
3925 1.75.2.2 snj
3926 1.75.2.2 snj DPRINTFN(12, ("energy In A %d B %d C %d, and max %d\n",
3927 1.75.2.2 snj energy_a, energy_b, energy_c, max_energy));
3928 1.75.2.2 snj
3929 1.75.2.2 snj return max_energy;
3930 1.75.2.2 snj }
3931 1.75.2.2 snj
3932 1.75.2.2 snj static void
3933 1.75.2.2 snj iwm_rx_rx_phy_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3934 1.75.2.2 snj struct iwm_rx_data *data)
3935 1.75.2.2 snj {
3936 1.75.2.2 snj struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3937 1.75.2.2 snj
3938 1.75.2.2 snj DPRINTFN(20, ("received PHY stats\n"));
3939 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*pkt),
3940 1.75.2.2 snj sizeof(*phy_info), BUS_DMASYNC_POSTREAD);
3941 1.75.2.2 snj
3942 1.75.2.2 snj memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3943 1.75.2.2 snj }
3944 1.75.2.2 snj
3945 1.75.2.2 snj /*
3946 1.75.2.2 snj * Retrieve the average noise (in dBm) among receivers.
3947 1.75.2.2 snj */
3948 1.75.2.2 snj static int
3949 1.75.2.2 snj iwm_get_noise(const struct iwm_statistics_rx_non_phy *stats)
3950 1.75.2.2 snj {
3951 1.75.2.2 snj int i, total, nbant, noise;
3952 1.75.2.2 snj
3953 1.75.2.2 snj total = nbant = noise = 0;
3954 1.75.2.2 snj for (i = 0; i < 3; i++) {
3955 1.75.2.2 snj noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3956 1.75.2.2 snj if (noise) {
3957 1.75.2.2 snj total += noise;
3958 1.75.2.2 snj nbant++;
3959 1.75.2.2 snj }
3960 1.75.2.2 snj }
3961 1.75.2.2 snj
3962 1.75.2.2 snj /* There should be at least one antenna but check anyway. */
3963 1.75.2.2 snj return (nbant == 0) ? -127 : (total / nbant) - 107;
3964 1.75.2.2 snj }
3965 1.75.2.2 snj
3966 1.75.2.2 snj static void
3967 1.75.2.2 snj iwm_rx_rx_mpdu(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
3968 1.75.2.2 snj struct iwm_rx_data *data)
3969 1.75.2.2 snj {
3970 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
3971 1.75.2.2 snj struct ieee80211_frame *wh;
3972 1.75.2.2 snj struct ieee80211_node *ni;
3973 1.75.2.2 snj struct ieee80211_channel *c = NULL;
3974 1.75.2.2 snj struct mbuf *m;
3975 1.75.2.2 snj struct iwm_rx_phy_info *phy_info;
3976 1.75.2.2 snj struct iwm_rx_mpdu_res_start *rx_res;
3977 1.75.2.2 snj int device_timestamp;
3978 1.75.2.2 snj uint32_t len;
3979 1.75.2.2 snj uint32_t rx_pkt_status;
3980 1.75.2.2 snj int rssi;
3981 1.75.2.2 snj int s;
3982 1.75.2.2 snj
3983 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
3984 1.75.2.2 snj BUS_DMASYNC_POSTREAD);
3985 1.75.2.2 snj
3986 1.75.2.2 snj phy_info = &sc->sc_last_phy_info;
3987 1.75.2.2 snj rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3988 1.75.2.2 snj wh = (struct ieee80211_frame *)(pkt->data + sizeof(*rx_res));
3989 1.75.2.2 snj len = le16toh(rx_res->byte_count);
3990 1.75.2.2 snj rx_pkt_status = le32toh(*(uint32_t *)(pkt->data +
3991 1.75.2.2 snj sizeof(*rx_res) + len));
3992 1.75.2.2 snj
3993 1.75.2.2 snj m = data->m;
3994 1.75.2.2 snj m->m_data = pkt->data + sizeof(*rx_res);
3995 1.75.2.2 snj m->m_pkthdr.len = m->m_len = len;
3996 1.75.2.2 snj
3997 1.75.2.2 snj if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3998 1.75.2.2 snj DPRINTF(("dsp size out of range [0,20]: %d\n",
3999 1.75.2.2 snj phy_info->cfg_phy_cnt));
4000 1.75.2.2 snj return;
4001 1.75.2.2 snj }
4002 1.75.2.2 snj
4003 1.75.2.2 snj if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
4004 1.75.2.2 snj !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
4005 1.75.2.2 snj DPRINTF(("Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status));
4006 1.75.2.2 snj return; /* drop */
4007 1.75.2.2 snj }
4008 1.75.2.2 snj
4009 1.75.2.2 snj device_timestamp = le32toh(phy_info->system_timestamp);
4010 1.75.2.2 snj
4011 1.75.2.2 snj if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_RX_ENERGY_API) {
4012 1.75.2.2 snj rssi = iwm_get_signal_strength(sc, phy_info);
4013 1.75.2.2 snj } else {
4014 1.75.2.2 snj rssi = iwm_calc_rssi(sc, phy_info);
4015 1.75.2.2 snj }
4016 1.75.2.2 snj rssi = -rssi;
4017 1.75.2.2 snj
4018 1.75.2.2 snj if (ic->ic_state == IEEE80211_S_SCAN)
4019 1.75.2.2 snj iwm_fix_channel(sc, m);
4020 1.75.2.2 snj
4021 1.75.2.2 snj if (iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0)
4022 1.75.2.2 snj return;
4023 1.75.2.2 snj
4024 1.75.2.2 snj m->m_pkthdr.rcvif = IC2IFP(ic);
4025 1.75.2.2 snj
4026 1.75.2.2 snj if (le32toh(phy_info->channel) < __arraycount(ic->ic_channels))
4027 1.75.2.2 snj c = &ic->ic_channels[le32toh(phy_info->channel)];
4028 1.75.2.2 snj
4029 1.75.2.2 snj s = splnet();
4030 1.75.2.2 snj
4031 1.75.2.2 snj ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
4032 1.75.2.2 snj if (c)
4033 1.75.2.2 snj ni->ni_chan = c;
4034 1.75.2.2 snj
4035 1.75.2.2 snj if (__predict_false(sc->sc_drvbpf != NULL)) {
4036 1.75.2.2 snj struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
4037 1.75.2.2 snj
4038 1.75.2.2 snj tap->wr_flags = 0;
4039 1.75.2.2 snj if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
4040 1.75.2.2 snj tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
4041 1.75.2.2 snj tap->wr_chan_freq =
4042 1.75.2.2 snj htole16(ic->ic_channels[phy_info->channel].ic_freq);
4043 1.75.2.2 snj tap->wr_chan_flags =
4044 1.75.2.2 snj htole16(ic->ic_channels[phy_info->channel].ic_flags);
4045 1.75.2.2 snj tap->wr_dbm_antsignal = (int8_t)rssi;
4046 1.75.2.2 snj tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
4047 1.75.2.2 snj tap->wr_tsft = phy_info->system_timestamp;
4048 1.75.2.2 snj if (phy_info->phy_flags &
4049 1.75.2.2 snj htole16(IWM_RX_RES_PHY_FLAGS_OFDM_HT)) {
4050 1.75.2.2 snj uint8_t mcs = (phy_info->rate_n_flags &
4051 1.75.2.2 snj htole32(IWM_RATE_HT_MCS_RATE_CODE_MSK |
4052 1.75.2.2 snj IWM_RATE_HT_MCS_NSS_MSK));
4053 1.75.2.2 snj tap->wr_rate = (0x80 | mcs);
4054 1.75.2.2 snj } else {
4055 1.75.2.2 snj uint8_t rate = (phy_info->rate_n_flags &
4056 1.75.2.2 snj htole32(IWM_RATE_LEGACY_RATE_MSK));
4057 1.75.2.2 snj switch (rate) {
4058 1.75.2.2 snj /* CCK rates. */
4059 1.75.2.2 snj case 10: tap->wr_rate = 2; break;
4060 1.75.2.2 snj case 20: tap->wr_rate = 4; break;
4061 1.75.2.2 snj case 55: tap->wr_rate = 11; break;
4062 1.75.2.2 snj case 110: tap->wr_rate = 22; break;
4063 1.75.2.2 snj /* OFDM rates. */
4064 1.75.2.2 snj case 0xd: tap->wr_rate = 12; break;
4065 1.75.2.2 snj case 0xf: tap->wr_rate = 18; break;
4066 1.75.2.2 snj case 0x5: tap->wr_rate = 24; break;
4067 1.75.2.2 snj case 0x7: tap->wr_rate = 36; break;
4068 1.75.2.2 snj case 0x9: tap->wr_rate = 48; break;
4069 1.75.2.2 snj case 0xb: tap->wr_rate = 72; break;
4070 1.75.2.2 snj case 0x1: tap->wr_rate = 96; break;
4071 1.75.2.2 snj case 0x3: tap->wr_rate = 108; break;
4072 1.75.2.2 snj /* Unknown rate: should not happen. */
4073 1.75.2.2 snj default: tap->wr_rate = 0;
4074 1.75.2.2 snj }
4075 1.75.2.2 snj }
4076 1.75.2.2 snj
4077 1.75.2.2 snj bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
4078 1.75.2.2 snj }
4079 1.75.2.2 snj ieee80211_input(ic, m, ni, rssi, device_timestamp);
4080 1.75.2.2 snj ieee80211_free_node(ni);
4081 1.75.2.2 snj
4082 1.75.2.2 snj splx(s);
4083 1.75.2.2 snj }
4084 1.75.2.2 snj
4085 1.75.2.2 snj static void
4086 1.75.2.2 snj iwm_rx_tx_cmd_single(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4087 1.75.2.2 snj struct iwm_node *in)
4088 1.75.2.2 snj {
4089 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
4090 1.75.2.2 snj struct ifnet *ifp = IC2IFP(ic);
4091 1.75.2.2 snj struct iwm_tx_resp *tx_resp = (void *)pkt->data;
4092 1.75.2.2 snj int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
4093 1.75.2.2 snj int failack = tx_resp->failure_frame;
4094 1.75.2.2 snj
4095 1.75.2.2 snj KASSERT(tx_resp->frame_count == 1);
4096 1.75.2.2 snj
4097 1.75.2.2 snj /* Update rate control statistics. */
4098 1.75.2.2 snj in->in_amn.amn_txcnt++;
4099 1.75.2.2 snj if (failack > 0) {
4100 1.75.2.2 snj in->in_amn.amn_retrycnt++;
4101 1.75.2.2 snj }
4102 1.75.2.2 snj
4103 1.75.2.2 snj if (status != IWM_TX_STATUS_SUCCESS &&
4104 1.75.2.2 snj status != IWM_TX_STATUS_DIRECT_DONE)
4105 1.75.2.2 snj ifp->if_oerrors++;
4106 1.75.2.2 snj else
4107 1.75.2.2 snj ifp->if_opackets++;
4108 1.75.2.2 snj }
4109 1.75.2.2 snj
4110 1.75.2.2 snj static void
4111 1.75.2.2 snj iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt,
4112 1.75.2.2 snj struct iwm_rx_data *data)
4113 1.75.2.2 snj {
4114 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
4115 1.75.2.2 snj struct ifnet *ifp = IC2IFP(ic);
4116 1.75.2.2 snj struct iwm_cmd_header *cmd_hdr = &pkt->hdr;
4117 1.75.2.2 snj int idx = cmd_hdr->idx;
4118 1.75.2.2 snj int qid = cmd_hdr->qid;
4119 1.75.2.2 snj struct iwm_tx_ring *ring = &sc->txq[qid];
4120 1.75.2.2 snj struct iwm_tx_data *txd = &ring->data[idx];
4121 1.75.2.2 snj struct iwm_node *in = txd->in;
4122 1.75.2.2 snj int s;
4123 1.75.2.2 snj
4124 1.75.2.2 snj s = splnet();
4125 1.75.2.2 snj
4126 1.75.2.2 snj if (txd->done) {
4127 1.75.2.2 snj DPRINTF(("%s: got tx interrupt that's already been handled!\n",
4128 1.75.2.2 snj DEVNAME(sc)));
4129 1.75.2.2 snj splx(s);
4130 1.75.2.2 snj return;
4131 1.75.2.2 snj }
4132 1.75.2.2 snj
4133 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWM_RBUF_SIZE,
4134 1.75.2.2 snj BUS_DMASYNC_POSTREAD);
4135 1.75.2.2 snj
4136 1.75.2.2 snj sc->sc_tx_timer = 0;
4137 1.75.2.2 snj
4138 1.75.2.2 snj iwm_rx_tx_cmd_single(sc, pkt, in);
4139 1.75.2.2 snj
4140 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, txd->map, 0, txd->map->dm_mapsize,
4141 1.75.2.2 snj BUS_DMASYNC_POSTWRITE);
4142 1.75.2.2 snj bus_dmamap_unload(sc->sc_dmat, txd->map);
4143 1.75.2.2 snj m_freem(txd->m);
4144 1.75.2.2 snj
4145 1.75.2.2 snj DPRINTFN(8, ("free txd %p, in %p\n", txd, txd->in));
4146 1.75.2.2 snj KASSERT(txd->done == 0);
4147 1.75.2.2 snj txd->done = 1;
4148 1.75.2.2 snj KASSERT(txd->in);
4149 1.75.2.2 snj
4150 1.75.2.2 snj txd->m = NULL;
4151 1.75.2.2 snj txd->in = NULL;
4152 1.75.2.2 snj ieee80211_free_node(&in->in_ni);
4153 1.75.2.2 snj
4154 1.75.2.2 snj if (--ring->queued < IWM_TX_RING_LOMARK) {
4155 1.75.2.2 snj sc->qfullmsk &= ~(1 << qid);
4156 1.75.2.2 snj if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
4157 1.75.2.2 snj ifp->if_flags &= ~IFF_OACTIVE;
4158 1.75.2.2 snj KASSERT(KERNEL_LOCKED_P());
4159 1.75.2.2 snj iwm_start(ifp);
4160 1.75.2.2 snj }
4161 1.75.2.2 snj }
4162 1.75.2.2 snj
4163 1.75.2.2 snj splx(s);
4164 1.75.2.2 snj }
4165 1.75.2.2 snj
4166 1.75.2.2 snj static int
4167 1.75.2.2 snj iwm_binding_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action)
4168 1.75.2.2 snj {
4169 1.75.2.2 snj struct iwm_binding_cmd cmd;
4170 1.75.2.2 snj struct iwm_phy_ctxt *phyctxt = in->in_phyctxt;
4171 1.75.2.2 snj int i, err;
4172 1.75.2.2 snj uint32_t status;
4173 1.75.2.2 snj
4174 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
4175 1.75.2.2 snj
4176 1.75.2.2 snj cmd.id_and_color
4177 1.75.2.2 snj = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4178 1.75.2.2 snj cmd.action = htole32(action);
4179 1.75.2.2 snj cmd.phy = htole32(IWM_FW_CMD_ID_AND_COLOR(phyctxt->id, phyctxt->color));
4180 1.75.2.2 snj
4181 1.75.2.2 snj cmd.macs[0] = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
4182 1.75.2.2 snj for (i = 1; i < IWM_MAX_MACS_IN_BINDING; i++)
4183 1.75.2.2 snj cmd.macs[i] = htole32(IWM_FW_CTXT_INVALID);
4184 1.75.2.2 snj
4185 1.75.2.2 snj status = 0;
4186 1.75.2.2 snj err = iwm_send_cmd_pdu_status(sc, IWM_BINDING_CONTEXT_CMD,
4187 1.75.2.2 snj sizeof(cmd), &cmd, &status);
4188 1.75.2.2 snj if (err == 0 && status != 0)
4189 1.75.2.2 snj err = EIO;
4190 1.75.2.2 snj
4191 1.75.2.2 snj return err;
4192 1.75.2.2 snj }
4193 1.75.2.2 snj
4194 1.75.2.2 snj static void
4195 1.75.2.2 snj iwm_phy_ctxt_cmd_hdr(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4196 1.75.2.2 snj struct iwm_phy_context_cmd *cmd, uint32_t action, uint32_t apply_time)
4197 1.75.2.2 snj {
4198 1.75.2.2 snj memset(cmd, 0, sizeof(struct iwm_phy_context_cmd));
4199 1.75.2.2 snj
4200 1.75.2.2 snj cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(ctxt->id,
4201 1.75.2.2 snj ctxt->color));
4202 1.75.2.2 snj cmd->action = htole32(action);
4203 1.75.2.2 snj cmd->apply_time = htole32(apply_time);
4204 1.75.2.2 snj }
4205 1.75.2.2 snj
4206 1.75.2.2 snj static void
4207 1.75.2.2 snj iwm_phy_ctxt_cmd_data(struct iwm_softc *sc, struct iwm_phy_context_cmd *cmd,
4208 1.75.2.2 snj struct ieee80211_channel *chan, uint8_t chains_static,
4209 1.75.2.2 snj uint8_t chains_dynamic)
4210 1.75.2.2 snj {
4211 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
4212 1.75.2.2 snj uint8_t active_cnt, idle_cnt;
4213 1.75.2.2 snj
4214 1.75.2.2 snj cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
4215 1.75.2.2 snj IWM_PHY_BAND_24 : IWM_PHY_BAND_5;
4216 1.75.2.2 snj
4217 1.75.2.2 snj cmd->ci.channel = ieee80211_chan2ieee(ic, chan);
4218 1.75.2.2 snj cmd->ci.width = IWM_PHY_VHT_CHANNEL_MODE20;
4219 1.75.2.2 snj cmd->ci.ctrl_pos = IWM_PHY_VHT_CTRL_POS_1_BELOW;
4220 1.75.2.2 snj
4221 1.75.2.2 snj /* Set rx the chains */
4222 1.75.2.2 snj idle_cnt = chains_static;
4223 1.75.2.2 snj active_cnt = chains_dynamic;
4224 1.75.2.2 snj
4225 1.75.2.2 snj cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
4226 1.75.2.2 snj IWM_PHY_RX_CHAIN_VALID_POS);
4227 1.75.2.2 snj cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
4228 1.75.2.2 snj cmd->rxchain_info |= htole32(active_cnt <<
4229 1.75.2.2 snj IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
4230 1.75.2.2 snj
4231 1.75.2.2 snj cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
4232 1.75.2.2 snj }
4233 1.75.2.2 snj
4234 1.75.2.2 snj static int
4235 1.75.2.2 snj iwm_phy_ctxt_cmd(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt,
4236 1.75.2.2 snj uint8_t chains_static, uint8_t chains_dynamic, uint32_t action,
4237 1.75.2.2 snj uint32_t apply_time)
4238 1.75.2.2 snj {
4239 1.75.2.2 snj struct iwm_phy_context_cmd cmd;
4240 1.75.2.2 snj
4241 1.75.2.2 snj iwm_phy_ctxt_cmd_hdr(sc, ctxt, &cmd, action, apply_time);
4242 1.75.2.2 snj
4243 1.75.2.2 snj iwm_phy_ctxt_cmd_data(sc, &cmd, ctxt->channel,
4244 1.75.2.2 snj chains_static, chains_dynamic);
4245 1.75.2.2 snj
4246 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_PHY_CONTEXT_CMD, 0,
4247 1.75.2.2 snj sizeof(struct iwm_phy_context_cmd), &cmd);
4248 1.75.2.2 snj }
4249 1.75.2.2 snj
4250 1.75.2.2 snj static int
4251 1.75.2.2 snj iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4252 1.75.2.2 snj {
4253 1.75.2.2 snj struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4254 1.75.2.2 snj struct iwm_tfd *desc;
4255 1.75.2.2 snj struct iwm_tx_data *txdata;
4256 1.75.2.2 snj struct iwm_device_cmd *cmd;
4257 1.75.2.2 snj struct mbuf *m;
4258 1.75.2.2 snj bus_addr_t paddr;
4259 1.75.2.2 snj uint32_t addr_lo;
4260 1.75.2.2 snj int err = 0, i, paylen, off, s;
4261 1.75.2.2 snj int code;
4262 1.75.2.2 snj int async, wantresp;
4263 1.75.2.2 snj int group_id;
4264 1.75.2.2 snj size_t hdrlen, datasz;
4265 1.75.2.2 snj uint8_t *data;
4266 1.75.2.2 snj
4267 1.75.2.2 snj code = hcmd->id;
4268 1.75.2.2 snj async = hcmd->flags & IWM_CMD_ASYNC;
4269 1.75.2.2 snj wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
4270 1.75.2.2 snj
4271 1.75.2.2 snj for (i = 0, paylen = 0; i < __arraycount(hcmd->len); i++) {
4272 1.75.2.2 snj paylen += hcmd->len[i];
4273 1.75.2.2 snj }
4274 1.75.2.2 snj
4275 1.75.2.2 snj /* if the command wants an answer, busy sc_cmd_resp */
4276 1.75.2.2 snj if (wantresp) {
4277 1.75.2.2 snj KASSERT(!async);
4278 1.75.2.2 snj while (sc->sc_wantresp != IWM_CMD_RESP_IDLE)
4279 1.75.2.2 snj tsleep(&sc->sc_wantresp, 0, "iwmcmdsl", 0);
4280 1.75.2.2 snj sc->sc_wantresp = ring->qid << 16 | ring->cur;
4281 1.75.2.2 snj }
4282 1.75.2.2 snj
4283 1.75.2.2 snj /*
4284 1.75.2.2 snj * Is the hardware still available? (after e.g. above wait).
4285 1.75.2.2 snj */
4286 1.75.2.2 snj s = splnet();
4287 1.75.2.2 snj if (sc->sc_flags & IWM_FLAG_STOPPED) {
4288 1.75.2.2 snj err = ENXIO;
4289 1.75.2.2 snj goto out;
4290 1.75.2.2 snj }
4291 1.75.2.2 snj
4292 1.75.2.2 snj desc = &ring->desc[ring->cur];
4293 1.75.2.2 snj txdata = &ring->data[ring->cur];
4294 1.75.2.2 snj
4295 1.75.2.2 snj group_id = iwm_cmd_groupid(code);
4296 1.75.2.2 snj if (group_id != 0) {
4297 1.75.2.2 snj hdrlen = sizeof(cmd->hdr_wide);
4298 1.75.2.2 snj datasz = sizeof(cmd->data_wide);
4299 1.75.2.2 snj } else {
4300 1.75.2.2 snj hdrlen = sizeof(cmd->hdr);
4301 1.75.2.2 snj datasz = sizeof(cmd->data);
4302 1.75.2.2 snj }
4303 1.75.2.2 snj
4304 1.75.2.2 snj if (paylen > datasz) {
4305 1.75.2.2 snj /* Command is too large to fit in pre-allocated space. */
4306 1.75.2.2 snj size_t totlen = hdrlen + paylen;
4307 1.75.2.2 snj if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
4308 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
4309 1.75.2.2 snj "firmware command too long (%zd bytes)\n", totlen);
4310 1.75.2.2 snj err = EINVAL;
4311 1.75.2.2 snj goto out;
4312 1.75.2.2 snj }
4313 1.75.2.2 snj m = m_gethdr(M_DONTWAIT, MT_DATA);
4314 1.75.2.2 snj if (m == NULL) {
4315 1.75.2.2 snj err = ENOMEM;
4316 1.75.2.2 snj goto out;
4317 1.75.2.2 snj }
4318 1.75.2.2 snj MEXTMALLOC(m, IWM_RBUF_SIZE, M_DONTWAIT);
4319 1.75.2.2 snj if (!(m->m_flags & M_EXT)) {
4320 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
4321 1.75.2.2 snj "could not get fw cmd mbuf (%zd bytes)\n", totlen);
4322 1.75.2.2 snj m_freem(m);
4323 1.75.2.2 snj err = ENOMEM;
4324 1.75.2.2 snj goto out;
4325 1.75.2.2 snj }
4326 1.75.2.2 snj cmd = mtod(m, struct iwm_device_cmd *);
4327 1.75.2.2 snj err = bus_dmamap_load(sc->sc_dmat, txdata->map, cmd,
4328 1.75.2.2 snj totlen, NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4329 1.75.2.2 snj if (err) {
4330 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
4331 1.75.2.2 snj "could not load fw cmd mbuf (%zd bytes)\n", totlen);
4332 1.75.2.2 snj m_freem(m);
4333 1.75.2.2 snj goto out;
4334 1.75.2.2 snj }
4335 1.75.2.2 snj txdata->m = m;
4336 1.75.2.2 snj paddr = txdata->map->dm_segs[0].ds_addr;
4337 1.75.2.2 snj } else {
4338 1.75.2.2 snj cmd = &ring->cmd[ring->cur];
4339 1.75.2.2 snj paddr = txdata->cmd_paddr;
4340 1.75.2.2 snj }
4341 1.75.2.2 snj
4342 1.75.2.2 snj if (group_id != 0) {
4343 1.75.2.2 snj cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
4344 1.75.2.2 snj cmd->hdr_wide.group_id = group_id;
4345 1.75.2.2 snj cmd->hdr_wide.qid = ring->qid;
4346 1.75.2.2 snj cmd->hdr_wide.idx = ring->cur;
4347 1.75.2.2 snj cmd->hdr_wide.length = htole16(paylen);
4348 1.75.2.2 snj cmd->hdr_wide.version = iwm_cmd_version(code);
4349 1.75.2.2 snj data = cmd->data_wide;
4350 1.75.2.2 snj } else {
4351 1.75.2.2 snj cmd->hdr.code = code;
4352 1.75.2.2 snj cmd->hdr.flags = 0;
4353 1.75.2.2 snj cmd->hdr.qid = ring->qid;
4354 1.75.2.2 snj cmd->hdr.idx = ring->cur;
4355 1.75.2.2 snj data = cmd->data;
4356 1.75.2.2 snj }
4357 1.75.2.2 snj
4358 1.75.2.2 snj for (i = 0, off = 0; i < __arraycount(hcmd->data); i++) {
4359 1.75.2.2 snj if (hcmd->len[i] == 0)
4360 1.75.2.2 snj continue;
4361 1.75.2.2 snj memcpy(data + off, hcmd->data[i], hcmd->len[i]);
4362 1.75.2.2 snj off += hcmd->len[i];
4363 1.75.2.2 snj }
4364 1.75.2.2 snj KASSERT(off == paylen);
4365 1.75.2.2 snj
4366 1.75.2.2 snj /* lo field is not aligned */
4367 1.75.2.2 snj addr_lo = htole32((uint32_t)paddr);
4368 1.75.2.2 snj memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
4369 1.75.2.2 snj desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
4370 1.75.2.2 snj | ((hdrlen + paylen) << 4));
4371 1.75.2.2 snj desc->num_tbs = 1;
4372 1.75.2.2 snj
4373 1.75.2.2 snj DPRINTFN(8, ("iwm_send_cmd 0x%x size=%zu %s\n",
4374 1.75.2.2 snj code, hdrlen + paylen, async ? " (async)" : ""));
4375 1.75.2.2 snj
4376 1.75.2.2 snj if (paylen > datasz) {
4377 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, txdata->map, 0, hdrlen + paylen,
4378 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
4379 1.75.2.2 snj } else {
4380 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4381 1.75.2.2 snj (uint8_t *)cmd - (uint8_t *)ring->cmd, hdrlen + paylen,
4382 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
4383 1.75.2.2 snj }
4384 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4385 1.75.2.2 snj (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4386 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
4387 1.75.2.2 snj
4388 1.75.2.2 snj err = iwm_set_cmd_in_flight(sc);
4389 1.75.2.2 snj if (err)
4390 1.75.2.2 snj goto out;
4391 1.75.2.2 snj ring->queued++;
4392 1.75.2.2 snj
4393 1.75.2.2 snj #if 0
4394 1.75.2.2 snj iwm_update_sched(sc, ring->qid, ring->cur, 0, 0);
4395 1.75.2.2 snj #endif
4396 1.75.2.2 snj DPRINTF(("sending command 0x%x qid %d, idx %d\n",
4397 1.75.2.2 snj code, ring->qid, ring->cur));
4398 1.75.2.2 snj
4399 1.75.2.2 snj /* Kick command ring. */
4400 1.75.2.2 snj ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4401 1.75.2.2 snj IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4402 1.75.2.2 snj
4403 1.75.2.2 snj if (!async) {
4404 1.75.2.2 snj int generation = sc->sc_generation;
4405 1.75.2.2 snj err = tsleep(desc, PCATCH, "iwmcmd", mstohz(2000));
4406 1.75.2.2 snj if (err == 0) {
4407 1.75.2.2 snj /* if hardware is no longer up, return error */
4408 1.75.2.2 snj if (generation != sc->sc_generation) {
4409 1.75.2.2 snj err = ENXIO;
4410 1.75.2.2 snj } else {
4411 1.75.2.2 snj hcmd->resp_pkt = (void *)sc->sc_cmd_resp;
4412 1.75.2.2 snj }
4413 1.75.2.2 snj }
4414 1.75.2.2 snj }
4415 1.75.2.2 snj out:
4416 1.75.2.2 snj if (wantresp && err) {
4417 1.75.2.2 snj iwm_free_resp(sc, hcmd);
4418 1.75.2.2 snj }
4419 1.75.2.2 snj splx(s);
4420 1.75.2.2 snj
4421 1.75.2.2 snj return err;
4422 1.75.2.2 snj }
4423 1.75.2.2 snj
4424 1.75.2.2 snj static int
4425 1.75.2.2 snj iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags,
4426 1.75.2.2 snj uint16_t len, const void *data)
4427 1.75.2.2 snj {
4428 1.75.2.2 snj struct iwm_host_cmd cmd = {
4429 1.75.2.2 snj .id = id,
4430 1.75.2.2 snj .len = { len, },
4431 1.75.2.2 snj .data = { data, },
4432 1.75.2.2 snj .flags = flags,
4433 1.75.2.2 snj };
4434 1.75.2.2 snj
4435 1.75.2.2 snj return iwm_send_cmd(sc, &cmd);
4436 1.75.2.2 snj }
4437 1.75.2.2 snj
4438 1.75.2.2 snj static int
4439 1.75.2.2 snj iwm_send_cmd_status(struct iwm_softc *sc, struct iwm_host_cmd *cmd,
4440 1.75.2.2 snj uint32_t *status)
4441 1.75.2.2 snj {
4442 1.75.2.2 snj struct iwm_rx_packet *pkt;
4443 1.75.2.2 snj struct iwm_cmd_response *resp;
4444 1.75.2.2 snj int err, resp_len;
4445 1.75.2.2 snj
4446 1.75.2.2 snj KASSERT((cmd->flags & IWM_CMD_WANT_SKB) == 0);
4447 1.75.2.2 snj cmd->flags |= IWM_CMD_WANT_SKB;
4448 1.75.2.2 snj
4449 1.75.2.2 snj err = iwm_send_cmd(sc, cmd);
4450 1.75.2.2 snj if (err)
4451 1.75.2.2 snj return err;
4452 1.75.2.2 snj pkt = cmd->resp_pkt;
4453 1.75.2.2 snj
4454 1.75.2.2 snj /* Can happen if RFKILL is asserted */
4455 1.75.2.2 snj if (!pkt) {
4456 1.75.2.2 snj err = 0;
4457 1.75.2.2 snj goto out_free_resp;
4458 1.75.2.2 snj }
4459 1.75.2.2 snj
4460 1.75.2.2 snj if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
4461 1.75.2.2 snj err = EIO;
4462 1.75.2.2 snj goto out_free_resp;
4463 1.75.2.2 snj }
4464 1.75.2.2 snj
4465 1.75.2.2 snj resp_len = iwm_rx_packet_payload_len(pkt);
4466 1.75.2.2 snj if (resp_len != sizeof(*resp)) {
4467 1.75.2.2 snj err = EIO;
4468 1.75.2.2 snj goto out_free_resp;
4469 1.75.2.2 snj }
4470 1.75.2.2 snj
4471 1.75.2.2 snj resp = (void *)pkt->data;
4472 1.75.2.2 snj *status = le32toh(resp->status);
4473 1.75.2.2 snj out_free_resp:
4474 1.75.2.2 snj iwm_free_resp(sc, cmd);
4475 1.75.2.2 snj return err;
4476 1.75.2.2 snj }
4477 1.75.2.2 snj
4478 1.75.2.2 snj static int
4479 1.75.2.2 snj iwm_send_cmd_pdu_status(struct iwm_softc *sc, uint32_t id, uint16_t len,
4480 1.75.2.2 snj const void *data, uint32_t *status)
4481 1.75.2.2 snj {
4482 1.75.2.2 snj struct iwm_host_cmd cmd = {
4483 1.75.2.2 snj .id = id,
4484 1.75.2.2 snj .len = { len, },
4485 1.75.2.2 snj .data = { data, },
4486 1.75.2.2 snj };
4487 1.75.2.2 snj
4488 1.75.2.2 snj return iwm_send_cmd_status(sc, &cmd, status);
4489 1.75.2.2 snj }
4490 1.75.2.2 snj
4491 1.75.2.2 snj static void
4492 1.75.2.2 snj iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
4493 1.75.2.2 snj {
4494 1.75.2.2 snj KASSERT(sc->sc_wantresp != IWM_CMD_RESP_IDLE);
4495 1.75.2.2 snj KASSERT((hcmd->flags & IWM_CMD_WANT_SKB) == IWM_CMD_WANT_SKB);
4496 1.75.2.2 snj sc->sc_wantresp = IWM_CMD_RESP_IDLE;
4497 1.75.2.2 snj wakeup(&sc->sc_wantresp);
4498 1.75.2.2 snj }
4499 1.75.2.2 snj
4500 1.75.2.2 snj static void
4501 1.75.2.2 snj iwm_cmd_done(struct iwm_softc *sc, int qid, int idx)
4502 1.75.2.2 snj {
4503 1.75.2.2 snj struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
4504 1.75.2.2 snj struct iwm_tx_data *data;
4505 1.75.2.2 snj int s;
4506 1.75.2.2 snj
4507 1.75.2.2 snj if (qid != IWM_CMD_QUEUE) {
4508 1.75.2.2 snj return; /* Not a command ack. */
4509 1.75.2.2 snj }
4510 1.75.2.2 snj
4511 1.75.2.2 snj s = splnet();
4512 1.75.2.2 snj
4513 1.75.2.2 snj data = &ring->data[idx];
4514 1.75.2.2 snj
4515 1.75.2.2 snj if (data->m != NULL) {
4516 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0,
4517 1.75.2.2 snj data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
4518 1.75.2.2 snj bus_dmamap_unload(sc->sc_dmat, data->map);
4519 1.75.2.2 snj m_freem(data->m);
4520 1.75.2.2 snj data->m = NULL;
4521 1.75.2.2 snj }
4522 1.75.2.2 snj wakeup(&ring->desc[idx]);
4523 1.75.2.2 snj
4524 1.75.2.2 snj if (((idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
4525 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
4526 1.75.2.2 snj "Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
4527 1.75.2.2 snj idx, ring->queued, ring->cur);
4528 1.75.2.2 snj }
4529 1.75.2.2 snj
4530 1.75.2.2 snj KASSERT(ring->queued > 0);
4531 1.75.2.2 snj if (--ring->queued == 0)
4532 1.75.2.2 snj iwm_clear_cmd_in_flight(sc);
4533 1.75.2.2 snj
4534 1.75.2.2 snj splx(s);
4535 1.75.2.2 snj }
4536 1.75.2.2 snj
4537 1.75.2.2 snj #if 0
4538 1.75.2.2 snj /*
4539 1.75.2.2 snj * necessary only for block ack mode
4540 1.75.2.2 snj */
4541 1.75.2.2 snj void
4542 1.75.2.2 snj iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
4543 1.75.2.2 snj uint16_t len)
4544 1.75.2.2 snj {
4545 1.75.2.2 snj struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
4546 1.75.2.2 snj uint16_t w_val;
4547 1.75.2.2 snj
4548 1.75.2.2 snj scd_bc_tbl = sc->sched_dma.vaddr;
4549 1.75.2.2 snj
4550 1.75.2.2 snj len += 8; /* magic numbers came naturally from paris */
4551 1.75.2.2 snj if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DW_BC_TABLE)
4552 1.75.2.2 snj len = roundup(len, 4) / 4;
4553 1.75.2.2 snj
4554 1.75.2.2 snj w_val = htole16(sta_id << 12 | len);
4555 1.75.2.2 snj
4556 1.75.2.2 snj /* Update TX scheduler. */
4557 1.75.2.2 snj scd_bc_tbl[qid].tfd_offset[idx] = w_val;
4558 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4559 1.75.2.2 snj (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
4560 1.75.2.2 snj sizeof(uint16_t), BUS_DMASYNC_PREWRITE);
4561 1.75.2.2 snj
4562 1.75.2.2 snj /* I really wonder what this is ?!? */
4563 1.75.2.2 snj if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
4564 1.75.2.2 snj scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
4565 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
4566 1.75.2.2 snj (char *)(void *)(w + IWM_TFD_QUEUE_SIZE_MAX) -
4567 1.75.2.2 snj (char *)(void *)sc->sched_dma.vaddr,
4568 1.75.2.2 snj sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
4569 1.75.2.2 snj }
4570 1.75.2.2 snj }
4571 1.75.2.2 snj #endif
4572 1.75.2.2 snj
4573 1.75.2.2 snj /*
4574 1.75.2.2 snj * Fill in various bit for management frames, and leave them
4575 1.75.2.2 snj * unfilled for data frames (firmware takes care of that).
4576 1.75.2.2 snj * Return the selected TX rate.
4577 1.75.2.2 snj */
4578 1.75.2.2 snj static const struct iwm_rate *
4579 1.75.2.2 snj iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
4580 1.75.2.2 snj struct ieee80211_frame *wh, struct iwm_tx_cmd *tx)
4581 1.75.2.2 snj {
4582 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
4583 1.75.2.2 snj struct ieee80211_node *ni = &in->in_ni;
4584 1.75.2.2 snj const struct iwm_rate *rinfo;
4585 1.75.2.2 snj int type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4586 1.75.2.2 snj int ridx, rate_flags, i, ind;
4587 1.75.2.2 snj int nrates = ni->ni_rates.rs_nrates;
4588 1.75.2.2 snj
4589 1.75.2.2 snj tx->rts_retry_limit = IWM_RTS_DFAULT_RETRY_LIMIT;
4590 1.75.2.2 snj tx->data_retry_limit = IWM_DEFAULT_TX_RETRY;
4591 1.75.2.2 snj
4592 1.75.2.2 snj if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4593 1.75.2.2 snj type != IEEE80211_FC0_TYPE_DATA) {
4594 1.75.2.2 snj /* for non-data, use the lowest supported rate */
4595 1.75.2.2 snj ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4596 1.75.2.2 snj IWM_RIDX_OFDM : IWM_RIDX_CCK;
4597 1.75.2.2 snj tx->data_retry_limit = IWM_MGMT_DFAULT_RETRY_LIMIT;
4598 1.75.2.2 snj #ifndef IEEE80211_NO_HT
4599 1.75.2.2 snj } else if (ic->ic_fixed_mcs != -1) {
4600 1.75.2.2 snj ridx = sc->sc_fixed_ridx;
4601 1.75.2.2 snj #endif
4602 1.75.2.2 snj } else if (ic->ic_fixed_rate != -1) {
4603 1.75.2.2 snj ridx = sc->sc_fixed_ridx;
4604 1.75.2.2 snj } else {
4605 1.75.2.2 snj /* for data frames, use RS table */
4606 1.75.2.2 snj tx->initial_rate_index = 0;
4607 1.75.2.2 snj tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
4608 1.75.2.2 snj DPRINTFN(12, ("start with txrate %d\n",
4609 1.75.2.2 snj tx->initial_rate_index));
4610 1.75.2.2 snj #ifndef IEEE80211_NO_HT
4611 1.75.2.2 snj if (ni->ni_flags & IEEE80211_NODE_HT) {
4612 1.75.2.2 snj ridx = iwm_mcs2ridx[ni->ni_txmcs];
4613 1.75.2.2 snj return &iwm_rates[ridx];
4614 1.75.2.2 snj }
4615 1.75.2.2 snj #endif
4616 1.75.2.2 snj ridx = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
4617 1.75.2.2 snj IWM_RIDX_OFDM : IWM_RIDX_CCK;
4618 1.75.2.2 snj for (i = 0; i < nrates; i++) {
4619 1.75.2.2 snj if (iwm_rates[i].rate == (ni->ni_txrate &
4620 1.75.2.2 snj IEEE80211_RATE_VAL)) {
4621 1.75.2.2 snj ridx = i;
4622 1.75.2.2 snj break;
4623 1.75.2.2 snj }
4624 1.75.2.2 snj }
4625 1.75.2.2 snj return &iwm_rates[ridx];
4626 1.75.2.2 snj }
4627 1.75.2.2 snj
4628 1.75.2.2 snj rinfo = &iwm_rates[ridx];
4629 1.75.2.2 snj for (i = 0, ind = sc->sc_mgmt_last_antenna;
4630 1.75.2.2 snj i < IWM_RATE_MCS_ANT_NUM; i++) {
4631 1.75.2.2 snj ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
4632 1.75.2.2 snj if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
4633 1.75.2.2 snj sc->sc_mgmt_last_antenna = ind;
4634 1.75.2.2 snj break;
4635 1.75.2.2 snj }
4636 1.75.2.2 snj }
4637 1.75.2.2 snj rate_flags = (1 << sc->sc_mgmt_last_antenna) << IWM_RATE_MCS_ANT_POS;
4638 1.75.2.2 snj if (IWM_RIDX_IS_CCK(ridx))
4639 1.75.2.2 snj rate_flags |= IWM_RATE_MCS_CCK_MSK;
4640 1.75.2.2 snj #ifndef IEEE80211_NO_HT
4641 1.75.2.2 snj if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4642 1.75.2.2 snj rinfo->ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
4643 1.75.2.2 snj rate_flags |= IWM_RATE_MCS_HT_MSK;
4644 1.75.2.2 snj tx->rate_n_flags = htole32(rate_flags | rinfo->ht_plcp);
4645 1.75.2.2 snj } else
4646 1.75.2.2 snj #endif
4647 1.75.2.2 snj tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
4648 1.75.2.2 snj
4649 1.75.2.2 snj return rinfo;
4650 1.75.2.2 snj }
4651 1.75.2.2 snj
4652 1.75.2.2 snj #define TB0_SIZE 16
4653 1.75.2.2 snj static int
4654 1.75.2.2 snj iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
4655 1.75.2.2 snj {
4656 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
4657 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ni;
4658 1.75.2.2 snj struct iwm_tx_ring *ring;
4659 1.75.2.2 snj struct iwm_tx_data *data;
4660 1.75.2.2 snj struct iwm_tfd *desc;
4661 1.75.2.2 snj struct iwm_device_cmd *cmd;
4662 1.75.2.2 snj struct iwm_tx_cmd *tx;
4663 1.75.2.2 snj struct ieee80211_frame *wh;
4664 1.75.2.2 snj struct ieee80211_key *k = NULL;
4665 1.75.2.2 snj struct mbuf *m1;
4666 1.75.2.2 snj const struct iwm_rate *rinfo;
4667 1.75.2.2 snj uint32_t flags;
4668 1.75.2.2 snj u_int hdrlen;
4669 1.75.2.2 snj bus_dma_segment_t *seg;
4670 1.75.2.2 snj uint8_t tid, type;
4671 1.75.2.2 snj int i, totlen, err, pad;
4672 1.75.2.2 snj
4673 1.75.2.2 snj wh = mtod(m, struct ieee80211_frame *);
4674 1.75.2.2 snj hdrlen = ieee80211_anyhdrsize(wh);
4675 1.75.2.2 snj type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
4676 1.75.2.2 snj
4677 1.75.2.2 snj tid = 0;
4678 1.75.2.2 snj
4679 1.75.2.2 snj ring = &sc->txq[ac];
4680 1.75.2.2 snj desc = &ring->desc[ring->cur];
4681 1.75.2.2 snj memset(desc, 0, sizeof(*desc));
4682 1.75.2.2 snj data = &ring->data[ring->cur];
4683 1.75.2.2 snj
4684 1.75.2.2 snj cmd = &ring->cmd[ring->cur];
4685 1.75.2.2 snj cmd->hdr.code = IWM_TX_CMD;
4686 1.75.2.2 snj cmd->hdr.flags = 0;
4687 1.75.2.2 snj cmd->hdr.qid = ring->qid;
4688 1.75.2.2 snj cmd->hdr.idx = ring->cur;
4689 1.75.2.2 snj
4690 1.75.2.2 snj tx = (void *)cmd->data;
4691 1.75.2.2 snj memset(tx, 0, sizeof(*tx));
4692 1.75.2.2 snj
4693 1.75.2.2 snj rinfo = iwm_tx_fill_cmd(sc, in, wh, tx);
4694 1.75.2.2 snj
4695 1.75.2.2 snj if (__predict_false(sc->sc_drvbpf != NULL)) {
4696 1.75.2.2 snj struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
4697 1.75.2.2 snj
4698 1.75.2.2 snj tap->wt_flags = 0;
4699 1.75.2.2 snj tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
4700 1.75.2.2 snj tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
4701 1.75.2.2 snj #ifndef IEEE80211_NO_HT
4702 1.75.2.2 snj if ((ni->ni_flags & IEEE80211_NODE_HT) &&
4703 1.75.2.2 snj !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4704 1.75.2.2 snj type == IEEE80211_FC0_TYPE_DATA &&
4705 1.75.2.2 snj rinfo->plcp == IWM_RATE_INVM_PLCP) {
4706 1.75.2.2 snj tap->wt_rate = (0x80 | rinfo->ht_plcp);
4707 1.75.2.2 snj } else
4708 1.75.2.2 snj #endif
4709 1.75.2.2 snj tap->wt_rate = rinfo->rate;
4710 1.75.2.2 snj tap->wt_hwqueue = ac;
4711 1.75.2.2 snj if (wh->i_fc[1] & IEEE80211_FC1_WEP)
4712 1.75.2.2 snj tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
4713 1.75.2.2 snj
4714 1.75.2.2 snj bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
4715 1.75.2.2 snj }
4716 1.75.2.2 snj
4717 1.75.2.2 snj /* Encrypt the frame if need be. */
4718 1.75.2.2 snj if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
4719 1.75.2.2 snj k = ieee80211_crypto_encap(ic, ni, m);
4720 1.75.2.2 snj if (k == NULL) {
4721 1.75.2.2 snj m_freem(m);
4722 1.75.2.2 snj return ENOBUFS;
4723 1.75.2.2 snj }
4724 1.75.2.2 snj /* Packet header may have moved, reset our local pointer. */
4725 1.75.2.2 snj wh = mtod(m, struct ieee80211_frame *);
4726 1.75.2.2 snj }
4727 1.75.2.2 snj totlen = m->m_pkthdr.len;
4728 1.75.2.2 snj
4729 1.75.2.2 snj flags = 0;
4730 1.75.2.2 snj if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
4731 1.75.2.2 snj flags |= IWM_TX_CMD_FLG_ACK;
4732 1.75.2.2 snj }
4733 1.75.2.2 snj
4734 1.75.2.2 snj if (type == IEEE80211_FC0_TYPE_DATA &&
4735 1.75.2.2 snj !IEEE80211_IS_MULTICAST(wh->i_addr1) &&
4736 1.75.2.2 snj (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold ||
4737 1.75.2.2 snj (ic->ic_flags & IEEE80211_F_USEPROT)))
4738 1.75.2.2 snj flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
4739 1.75.2.2 snj
4740 1.75.2.2 snj if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
4741 1.75.2.2 snj type != IEEE80211_FC0_TYPE_DATA)
4742 1.75.2.2 snj tx->sta_id = IWM_AUX_STA_ID;
4743 1.75.2.2 snj else
4744 1.75.2.2 snj tx->sta_id = IWM_STATION_ID;
4745 1.75.2.2 snj
4746 1.75.2.2 snj if (type == IEEE80211_FC0_TYPE_MGT) {
4747 1.75.2.2 snj uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
4748 1.75.2.2 snj
4749 1.75.2.2 snj if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
4750 1.75.2.2 snj subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
4751 1.75.2.2 snj tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
4752 1.75.2.2 snj else
4753 1.75.2.2 snj tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
4754 1.75.2.2 snj } else {
4755 1.75.2.2 snj tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
4756 1.75.2.2 snj }
4757 1.75.2.2 snj
4758 1.75.2.2 snj if (hdrlen & 3) {
4759 1.75.2.2 snj /* First segment length must be a multiple of 4. */
4760 1.75.2.2 snj flags |= IWM_TX_CMD_FLG_MH_PAD;
4761 1.75.2.2 snj pad = 4 - (hdrlen & 3);
4762 1.75.2.2 snj } else
4763 1.75.2.2 snj pad = 0;
4764 1.75.2.2 snj
4765 1.75.2.2 snj tx->driver_txop = 0;
4766 1.75.2.2 snj tx->next_frame_len = 0;
4767 1.75.2.2 snj
4768 1.75.2.2 snj tx->len = htole16(totlen);
4769 1.75.2.2 snj tx->tid_tspec = tid;
4770 1.75.2.2 snj tx->life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
4771 1.75.2.2 snj
4772 1.75.2.2 snj /* Set physical address of "scratch area". */
4773 1.75.2.2 snj tx->dram_lsb_ptr = htole32(data->scratch_paddr);
4774 1.75.2.2 snj tx->dram_msb_ptr = iwm_get_dma_hi_addr(data->scratch_paddr);
4775 1.75.2.2 snj
4776 1.75.2.2 snj /* Copy 802.11 header in TX command. */
4777 1.75.2.2 snj memcpy(tx + 1, wh, hdrlen);
4778 1.75.2.2 snj
4779 1.75.2.2 snj flags |= IWM_TX_CMD_FLG_BT_DIS | IWM_TX_CMD_FLG_SEQ_CTL;
4780 1.75.2.2 snj
4781 1.75.2.2 snj tx->sec_ctl = 0;
4782 1.75.2.2 snj tx->tx_flags |= htole32(flags);
4783 1.75.2.2 snj
4784 1.75.2.2 snj /* Trim 802.11 header. */
4785 1.75.2.2 snj m_adj(m, hdrlen);
4786 1.75.2.2 snj
4787 1.75.2.2 snj err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4788 1.75.2.2 snj BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4789 1.75.2.2 snj if (err) {
4790 1.75.2.2 snj if (err != EFBIG) {
4791 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
4792 1.75.2.2 snj "can't map mbuf (error %d)\n", err);
4793 1.75.2.2 snj m_freem(m);
4794 1.75.2.2 snj return err;
4795 1.75.2.2 snj }
4796 1.75.2.2 snj /* Too many DMA segments, linearize mbuf. */
4797 1.75.2.2 snj MGETHDR(m1, M_DONTWAIT, MT_DATA);
4798 1.75.2.2 snj if (m1 == NULL) {
4799 1.75.2.2 snj m_freem(m);
4800 1.75.2.2 snj return ENOBUFS;
4801 1.75.2.2 snj }
4802 1.75.2.2 snj if (m->m_pkthdr.len > MHLEN) {
4803 1.75.2.2 snj MCLGET(m1, M_DONTWAIT);
4804 1.75.2.2 snj if (!(m1->m_flags & M_EXT)) {
4805 1.75.2.2 snj m_freem(m);
4806 1.75.2.2 snj m_freem(m1);
4807 1.75.2.2 snj return ENOBUFS;
4808 1.75.2.2 snj }
4809 1.75.2.2 snj }
4810 1.75.2.2 snj m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
4811 1.75.2.2 snj m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
4812 1.75.2.2 snj m_freem(m);
4813 1.75.2.2 snj m = m1;
4814 1.75.2.2 snj
4815 1.75.2.2 snj err = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
4816 1.75.2.2 snj BUS_DMA_NOWAIT | BUS_DMA_WRITE);
4817 1.75.2.2 snj if (err) {
4818 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
4819 1.75.2.2 snj "can't map mbuf (error %d)\n", err);
4820 1.75.2.2 snj m_freem(m);
4821 1.75.2.2 snj return err;
4822 1.75.2.2 snj }
4823 1.75.2.2 snj }
4824 1.75.2.2 snj data->m = m;
4825 1.75.2.2 snj data->in = in;
4826 1.75.2.2 snj data->done = 0;
4827 1.75.2.2 snj
4828 1.75.2.2 snj DPRINTFN(8, ("sending txd %p, in %p\n", data, data->in));
4829 1.75.2.2 snj KASSERT(data->in != NULL);
4830 1.75.2.2 snj
4831 1.75.2.2 snj DPRINTFN(8, ("sending data: qid=%d idx=%d len=%d nsegs=%d type=%d "
4832 1.75.2.2 snj "subtype=%x tx_flags=%08x init_rateidx=%08x rate_n_flags=%08x\n",
4833 1.75.2.2 snj ring->qid, ring->cur, totlen, data->map->dm_nsegs, type,
4834 1.75.2.2 snj (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) >> 4,
4835 1.75.2.2 snj le32toh(tx->tx_flags), le32toh(tx->initial_rate_index),
4836 1.75.2.2 snj le32toh(tx->rate_n_flags)));
4837 1.75.2.2 snj
4838 1.75.2.2 snj /* Fill TX descriptor. */
4839 1.75.2.2 snj desc->num_tbs = 2 + data->map->dm_nsegs;
4840 1.75.2.2 snj
4841 1.75.2.2 snj desc->tbs[0].lo = htole32(data->cmd_paddr);
4842 1.75.2.2 snj desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4843 1.75.2.2 snj (TB0_SIZE << 4);
4844 1.75.2.2 snj desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
4845 1.75.2.2 snj desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr)) |
4846 1.75.2.2 snj ((sizeof(struct iwm_cmd_header) + sizeof(*tx)
4847 1.75.2.2 snj + hdrlen + pad - TB0_SIZE) << 4);
4848 1.75.2.2 snj
4849 1.75.2.2 snj /* Other DMA segments are for data payload. */
4850 1.75.2.2 snj seg = data->map->dm_segs;
4851 1.75.2.2 snj for (i = 0; i < data->map->dm_nsegs; i++, seg++) {
4852 1.75.2.2 snj desc->tbs[i+2].lo = htole32(seg->ds_addr);
4853 1.75.2.2 snj desc->tbs[i+2].hi_n_len =
4854 1.75.2.2 snj htole16(iwm_get_dma_hi_addr(seg->ds_addr))
4855 1.75.2.2 snj | ((seg->ds_len) << 4);
4856 1.75.2.2 snj }
4857 1.75.2.2 snj
4858 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
4859 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
4860 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
4861 1.75.2.2 snj (uint8_t *)cmd - (uint8_t *)ring->cmd, sizeof(*cmd),
4862 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
4863 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
4864 1.75.2.2 snj (uint8_t *)desc - (uint8_t *)ring->desc, sizeof(*desc),
4865 1.75.2.2 snj BUS_DMASYNC_PREWRITE);
4866 1.75.2.2 snj
4867 1.75.2.2 snj #if 0
4868 1.75.2.2 snj iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id,
4869 1.75.2.2 snj le16toh(tx->len));
4870 1.75.2.2 snj #endif
4871 1.75.2.2 snj
4872 1.75.2.2 snj /* Kick TX ring. */
4873 1.75.2.2 snj ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
4874 1.75.2.2 snj IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
4875 1.75.2.2 snj
4876 1.75.2.2 snj /* Mark TX ring as full if we reach a certain threshold. */
4877 1.75.2.2 snj if (++ring->queued > IWM_TX_RING_HIMARK) {
4878 1.75.2.2 snj sc->qfullmsk |= 1 << ring->qid;
4879 1.75.2.2 snj }
4880 1.75.2.2 snj
4881 1.75.2.2 snj return 0;
4882 1.75.2.2 snj }
4883 1.75.2.2 snj
4884 1.75.2.2 snj #if 0
4885 1.75.2.2 snj /* not necessary? */
4886 1.75.2.2 snj static int
4887 1.75.2.2 snj iwm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
4888 1.75.2.2 snj {
4889 1.75.2.2 snj struct iwm_tx_path_flush_cmd flush_cmd = {
4890 1.75.2.2 snj .queues_ctl = htole32(tfd_msk),
4891 1.75.2.2 snj .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
4892 1.75.2.2 snj };
4893 1.75.2.2 snj int err;
4894 1.75.2.2 snj
4895 1.75.2.2 snj err = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, sync ? 0 : IWM_CMD_ASYNC,
4896 1.75.2.2 snj sizeof(flush_cmd), &flush_cmd);
4897 1.75.2.2 snj if (err)
4898 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "Flushing tx queue failed: %d\n",
4899 1.75.2.2 snj err);
4900 1.75.2.2 snj return err;
4901 1.75.2.2 snj }
4902 1.75.2.2 snj #endif
4903 1.75.2.2 snj
4904 1.75.2.2 snj static void
4905 1.75.2.2 snj iwm_led_enable(struct iwm_softc *sc)
4906 1.75.2.2 snj {
4907 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_ON);
4908 1.75.2.2 snj }
4909 1.75.2.2 snj
4910 1.75.2.2 snj static void
4911 1.75.2.2 snj iwm_led_disable(struct iwm_softc *sc)
4912 1.75.2.2 snj {
4913 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
4914 1.75.2.2 snj }
4915 1.75.2.2 snj
4916 1.75.2.2 snj static int
4917 1.75.2.2 snj iwm_led_is_enabled(struct iwm_softc *sc)
4918 1.75.2.2 snj {
4919 1.75.2.2 snj return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
4920 1.75.2.2 snj }
4921 1.75.2.2 snj
4922 1.75.2.2 snj static void
4923 1.75.2.2 snj iwm_led_blink_timeout(void *arg)
4924 1.75.2.2 snj {
4925 1.75.2.2 snj struct iwm_softc *sc = arg;
4926 1.75.2.2 snj
4927 1.75.2.2 snj if (iwm_led_is_enabled(sc))
4928 1.75.2.2 snj iwm_led_disable(sc);
4929 1.75.2.2 snj else
4930 1.75.2.2 snj iwm_led_enable(sc);
4931 1.75.2.2 snj
4932 1.75.2.2 snj callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4933 1.75.2.2 snj }
4934 1.75.2.2 snj
4935 1.75.2.2 snj static void
4936 1.75.2.2 snj iwm_led_blink_start(struct iwm_softc *sc)
4937 1.75.2.2 snj {
4938 1.75.2.2 snj callout_schedule(&sc->sc_led_blink_to, mstohz(200));
4939 1.75.2.2 snj }
4940 1.75.2.2 snj
4941 1.75.2.2 snj static void
4942 1.75.2.2 snj iwm_led_blink_stop(struct iwm_softc *sc)
4943 1.75.2.2 snj {
4944 1.75.2.2 snj callout_stop(&sc->sc_led_blink_to);
4945 1.75.2.2 snj iwm_led_disable(sc);
4946 1.75.2.2 snj }
4947 1.75.2.2 snj
4948 1.75.2.2 snj #define IWM_POWER_KEEP_ALIVE_PERIOD_SEC 25
4949 1.75.2.2 snj
4950 1.75.2.2 snj static int
4951 1.75.2.2 snj iwm_beacon_filter_send_cmd(struct iwm_softc *sc,
4952 1.75.2.2 snj struct iwm_beacon_filter_cmd *cmd)
4953 1.75.2.2 snj {
4954 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_REPLY_BEACON_FILTERING_CMD,
4955 1.75.2.2 snj 0, sizeof(struct iwm_beacon_filter_cmd), cmd);
4956 1.75.2.2 snj }
4957 1.75.2.2 snj
4958 1.75.2.2 snj static void
4959 1.75.2.2 snj iwm_beacon_filter_set_cqm_params(struct iwm_softc *sc, struct iwm_node *in,
4960 1.75.2.2 snj struct iwm_beacon_filter_cmd *cmd)
4961 1.75.2.2 snj {
4962 1.75.2.2 snj cmd->ba_enable_beacon_abort = htole32(sc->sc_bf.ba_enabled);
4963 1.75.2.2 snj }
4964 1.75.2.2 snj
4965 1.75.2.2 snj static int
4966 1.75.2.2 snj iwm_update_beacon_abort(struct iwm_softc *sc, struct iwm_node *in, int enable)
4967 1.75.2.2 snj {
4968 1.75.2.2 snj struct iwm_beacon_filter_cmd cmd = {
4969 1.75.2.2 snj IWM_BF_CMD_CONFIG_DEFAULTS,
4970 1.75.2.2 snj .bf_enable_beacon_filter = htole32(1),
4971 1.75.2.2 snj .ba_enable_beacon_abort = htole32(enable),
4972 1.75.2.2 snj };
4973 1.75.2.2 snj
4974 1.75.2.2 snj if (!sc->sc_bf.bf_enabled)
4975 1.75.2.2 snj return 0;
4976 1.75.2.2 snj
4977 1.75.2.2 snj sc->sc_bf.ba_enabled = enable;
4978 1.75.2.2 snj iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
4979 1.75.2.2 snj return iwm_beacon_filter_send_cmd(sc, &cmd);
4980 1.75.2.2 snj }
4981 1.75.2.2 snj
4982 1.75.2.2 snj static void
4983 1.75.2.2 snj iwm_power_build_cmd(struct iwm_softc *sc, struct iwm_node *in,
4984 1.75.2.2 snj struct iwm_mac_power_cmd *cmd)
4985 1.75.2.2 snj {
4986 1.75.2.2 snj struct ieee80211_node *ni = &in->in_ni;
4987 1.75.2.2 snj int dtim_period, dtim_msec, keep_alive;
4988 1.75.2.2 snj
4989 1.75.2.2 snj cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
4990 1.75.2.2 snj in->in_color));
4991 1.75.2.2 snj if (ni->ni_dtim_period)
4992 1.75.2.2 snj dtim_period = ni->ni_dtim_period;
4993 1.75.2.2 snj else
4994 1.75.2.2 snj dtim_period = 1;
4995 1.75.2.2 snj
4996 1.75.2.2 snj /*
4997 1.75.2.2 snj * Regardless of power management state the driver must set
4998 1.75.2.2 snj * keep alive period. FW will use it for sending keep alive NDPs
4999 1.75.2.2 snj * immediately after association. Check that keep alive period
5000 1.75.2.2 snj * is at least 3 * DTIM.
5001 1.75.2.2 snj */
5002 1.75.2.2 snj dtim_msec = dtim_period * ni->ni_intval;
5003 1.75.2.2 snj keep_alive = MAX(3 * dtim_msec, 1000 * IWM_POWER_KEEP_ALIVE_PERIOD_SEC);
5004 1.75.2.2 snj keep_alive = roundup(keep_alive, 1000) / 1000;
5005 1.75.2.2 snj cmd->keep_alive_seconds = htole16(keep_alive);
5006 1.75.2.2 snj
5007 1.75.2.2 snj #ifdef notyet
5008 1.75.2.2 snj cmd->flags = htole16(IWM_POWER_FLAGS_POWER_SAVE_ENA_MSK);
5009 1.75.2.2 snj cmd->rx_data_timeout = IWM_DEFAULT_PS_RX_DATA_TIMEOUT;
5010 1.75.2.2 snj cmd->tx_data_timeout = IWM_DEFAULT_PS_TX_DATA_TIMEOUT;
5011 1.75.2.2 snj #endif
5012 1.75.2.2 snj }
5013 1.75.2.2 snj
5014 1.75.2.2 snj static int
5015 1.75.2.2 snj iwm_power_mac_update_mode(struct iwm_softc *sc, struct iwm_node *in)
5016 1.75.2.2 snj {
5017 1.75.2.2 snj int err;
5018 1.75.2.2 snj int ba_enable;
5019 1.75.2.2 snj struct iwm_mac_power_cmd cmd;
5020 1.75.2.2 snj
5021 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
5022 1.75.2.2 snj
5023 1.75.2.2 snj iwm_power_build_cmd(sc, in, &cmd);
5024 1.75.2.2 snj
5025 1.75.2.2 snj err = iwm_send_cmd_pdu(sc, IWM_MAC_PM_POWER_TABLE, 0,
5026 1.75.2.2 snj sizeof(cmd), &cmd);
5027 1.75.2.2 snj if (err)
5028 1.75.2.2 snj return err;
5029 1.75.2.2 snj
5030 1.75.2.2 snj ba_enable = !!(cmd.flags &
5031 1.75.2.2 snj htole16(IWM_POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
5032 1.75.2.2 snj return iwm_update_beacon_abort(sc, in, ba_enable);
5033 1.75.2.2 snj }
5034 1.75.2.2 snj
5035 1.75.2.2 snj static int
5036 1.75.2.2 snj iwm_power_update_device(struct iwm_softc *sc)
5037 1.75.2.2 snj {
5038 1.75.2.2 snj struct iwm_device_power_cmd cmd = {
5039 1.75.2.2 snj #ifdef notyet
5040 1.75.2.2 snj .flags = htole16(IWM_DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
5041 1.75.2.2 snj #else
5042 1.75.2.2 snj .flags = 0,
5043 1.75.2.2 snj #endif
5044 1.75.2.2 snj };
5045 1.75.2.2 snj
5046 1.75.2.2 snj if (!(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
5047 1.75.2.2 snj return 0;
5048 1.75.2.2 snj
5049 1.75.2.2 snj cmd.flags |= htole16(IWM_DEVICE_POWER_FLAGS_CAM_MSK);
5050 1.75.2.2 snj DPRINTF(("Sending device power command with flags = 0x%X\n",
5051 1.75.2.2 snj cmd.flags));
5052 1.75.2.2 snj
5053 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_POWER_TABLE_CMD, 0, sizeof(cmd), &cmd);
5054 1.75.2.2 snj }
5055 1.75.2.2 snj
5056 1.75.2.2 snj #ifdef notyet
5057 1.75.2.2 snj static int
5058 1.75.2.2 snj iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
5059 1.75.2.2 snj {
5060 1.75.2.2 snj struct iwm_beacon_filter_cmd cmd = {
5061 1.75.2.2 snj IWM_BF_CMD_CONFIG_DEFAULTS,
5062 1.75.2.2 snj .bf_enable_beacon_filter = htole32(1),
5063 1.75.2.2 snj };
5064 1.75.2.2 snj int err;
5065 1.75.2.2 snj
5066 1.75.2.2 snj iwm_beacon_filter_set_cqm_params(sc, in, &cmd);
5067 1.75.2.2 snj err = iwm_beacon_filter_send_cmd(sc, &cmd);
5068 1.75.2.2 snj
5069 1.75.2.2 snj if (err == 0)
5070 1.75.2.2 snj sc->sc_bf.bf_enabled = 1;
5071 1.75.2.2 snj
5072 1.75.2.2 snj return err;
5073 1.75.2.2 snj }
5074 1.75.2.2 snj #endif
5075 1.75.2.2 snj
5076 1.75.2.2 snj static int
5077 1.75.2.2 snj iwm_disable_beacon_filter(struct iwm_softc *sc)
5078 1.75.2.2 snj {
5079 1.75.2.2 snj struct iwm_beacon_filter_cmd cmd;
5080 1.75.2.2 snj int err;
5081 1.75.2.2 snj
5082 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
5083 1.75.2.2 snj if ((sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_BF_UPDATED) == 0)
5084 1.75.2.2 snj return 0;
5085 1.75.2.2 snj
5086 1.75.2.2 snj err = iwm_beacon_filter_send_cmd(sc, &cmd);
5087 1.75.2.2 snj if (err == 0)
5088 1.75.2.2 snj sc->sc_bf.bf_enabled = 0;
5089 1.75.2.2 snj
5090 1.75.2.2 snj return err;
5091 1.75.2.2 snj }
5092 1.75.2.2 snj
5093 1.75.2.2 snj static int
5094 1.75.2.2 snj iwm_add_sta_cmd(struct iwm_softc *sc, struct iwm_node *in, int update)
5095 1.75.2.2 snj {
5096 1.75.2.2 snj struct iwm_add_sta_cmd_v7 add_sta_cmd;
5097 1.75.2.2 snj int err;
5098 1.75.2.2 snj uint32_t status;
5099 1.75.2.2 snj
5100 1.75.2.2 snj memset(&add_sta_cmd, 0, sizeof(add_sta_cmd));
5101 1.75.2.2 snj
5102 1.75.2.2 snj add_sta_cmd.sta_id = IWM_STATION_ID;
5103 1.75.2.2 snj add_sta_cmd.mac_id_n_color
5104 1.75.2.2 snj = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id, in->in_color));
5105 1.75.2.2 snj if (!update) {
5106 1.75.2.2 snj int ac;
5107 1.75.2.2 snj for (ac = 0; ac < WME_NUM_AC; ac++) {
5108 1.75.2.2 snj add_sta_cmd.tfd_queue_msk |=
5109 1.75.2.2 snj htole32(__BIT(iwm_ac_to_tx_fifo[ac]));
5110 1.75.2.2 snj }
5111 1.75.2.2 snj IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
5112 1.75.2.2 snj }
5113 1.75.2.2 snj add_sta_cmd.add_modify = update ? 1 : 0;
5114 1.75.2.2 snj add_sta_cmd.station_flags_msk
5115 1.75.2.2 snj |= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
5116 1.75.2.2 snj add_sta_cmd.tid_disable_tx = htole16(0xffff);
5117 1.75.2.2 snj if (update)
5118 1.75.2.2 snj add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
5119 1.75.2.2 snj
5120 1.75.2.2 snj #ifndef IEEE80211_NO_HT
5121 1.75.2.2 snj if (in->in_ni.ni_flags & IEEE80211_NODE_HT) {
5122 1.75.2.2 snj add_sta_cmd.station_flags_msk
5123 1.75.2.2 snj |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_MSK |
5124 1.75.2.2 snj IWM_STA_FLG_AGG_MPDU_DENS_MSK);
5125 1.75.2.2 snj
5126 1.75.2.2 snj add_sta_cmd.station_flags
5127 1.75.2.2 snj |= htole32(IWM_STA_FLG_MAX_AGG_SIZE_64K);
5128 1.75.2.2 snj switch (ic->ic_ampdu_params & IEEE80211_AMPDU_PARAM_SS) {
5129 1.75.2.2 snj case IEEE80211_AMPDU_PARAM_SS_2:
5130 1.75.2.2 snj add_sta_cmd.station_flags
5131 1.75.2.2 snj |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_2US);
5132 1.75.2.2 snj break;
5133 1.75.2.2 snj case IEEE80211_AMPDU_PARAM_SS_4:
5134 1.75.2.2 snj add_sta_cmd.station_flags
5135 1.75.2.2 snj |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_4US);
5136 1.75.2.2 snj break;
5137 1.75.2.2 snj case IEEE80211_AMPDU_PARAM_SS_8:
5138 1.75.2.2 snj add_sta_cmd.station_flags
5139 1.75.2.2 snj |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_8US);
5140 1.75.2.2 snj break;
5141 1.75.2.2 snj case IEEE80211_AMPDU_PARAM_SS_16:
5142 1.75.2.2 snj add_sta_cmd.station_flags
5143 1.75.2.2 snj |= htole32(IWM_STA_FLG_AGG_MPDU_DENS_16US);
5144 1.75.2.2 snj break;
5145 1.75.2.2 snj default:
5146 1.75.2.2 snj break;
5147 1.75.2.2 snj }
5148 1.75.2.2 snj }
5149 1.75.2.2 snj #endif
5150 1.75.2.2 snj
5151 1.75.2.2 snj status = IWM_ADD_STA_SUCCESS;
5152 1.75.2.2 snj err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(add_sta_cmd),
5153 1.75.2.2 snj &add_sta_cmd, &status);
5154 1.75.2.2 snj if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5155 1.75.2.2 snj err = EIO;
5156 1.75.2.2 snj
5157 1.75.2.2 snj return err;
5158 1.75.2.2 snj }
5159 1.75.2.2 snj
5160 1.75.2.2 snj static int
5161 1.75.2.2 snj iwm_add_aux_sta(struct iwm_softc *sc)
5162 1.75.2.2 snj {
5163 1.75.2.2 snj struct iwm_add_sta_cmd_v7 cmd;
5164 1.75.2.2 snj int err;
5165 1.75.2.2 snj uint32_t status;
5166 1.75.2.2 snj
5167 1.75.2.2 snj err = iwm_enable_txq(sc, 0, IWM_AUX_QUEUE, IWM_TX_FIFO_MCAST);
5168 1.75.2.2 snj if (err)
5169 1.75.2.2 snj return err;
5170 1.75.2.2 snj
5171 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
5172 1.75.2.2 snj cmd.sta_id = IWM_AUX_STA_ID;
5173 1.75.2.2 snj cmd.mac_id_n_color =
5174 1.75.2.2 snj htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_MAC_INDEX_AUX, 0));
5175 1.75.2.2 snj cmd.tfd_queue_msk = htole32(1 << IWM_AUX_QUEUE);
5176 1.75.2.2 snj cmd.tid_disable_tx = htole16(0xffff);
5177 1.75.2.2 snj
5178 1.75.2.2 snj status = IWM_ADD_STA_SUCCESS;
5179 1.75.2.2 snj err = iwm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd), &cmd,
5180 1.75.2.2 snj &status);
5181 1.75.2.2 snj if (err == 0 && status != IWM_ADD_STA_SUCCESS)
5182 1.75.2.2 snj err = EIO;
5183 1.75.2.2 snj
5184 1.75.2.2 snj return err;
5185 1.75.2.2 snj }
5186 1.75.2.2 snj
5187 1.75.2.2 snj #define IWM_PLCP_QUIET_THRESH 1
5188 1.75.2.2 snj #define IWM_ACTIVE_QUIET_TIME 10
5189 1.75.2.2 snj #define LONG_OUT_TIME_PERIOD 600
5190 1.75.2.2 snj #define SHORT_OUT_TIME_PERIOD 200
5191 1.75.2.2 snj #define SUSPEND_TIME_PERIOD 100
5192 1.75.2.2 snj
5193 1.75.2.2 snj static uint16_t
5194 1.75.2.2 snj iwm_scan_rx_chain(struct iwm_softc *sc)
5195 1.75.2.2 snj {
5196 1.75.2.2 snj uint16_t rx_chain;
5197 1.75.2.2 snj uint8_t rx_ant;
5198 1.75.2.2 snj
5199 1.75.2.2 snj rx_ant = iwm_fw_valid_rx_ant(sc);
5200 1.75.2.2 snj rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
5201 1.75.2.2 snj rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
5202 1.75.2.2 snj rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
5203 1.75.2.2 snj rx_chain |= 0x1 << IWM_PHY_RX_CHAIN_DRIVER_FORCE_POS;
5204 1.75.2.2 snj return htole16(rx_chain);
5205 1.75.2.2 snj }
5206 1.75.2.2 snj
5207 1.75.2.2 snj static uint32_t
5208 1.75.2.2 snj iwm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
5209 1.75.2.2 snj {
5210 1.75.2.2 snj uint32_t tx_ant;
5211 1.75.2.2 snj int i, ind;
5212 1.75.2.2 snj
5213 1.75.2.2 snj for (i = 0, ind = sc->sc_scan_last_antenna;
5214 1.75.2.2 snj i < IWM_RATE_MCS_ANT_NUM; i++) {
5215 1.75.2.2 snj ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
5216 1.75.2.2 snj if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
5217 1.75.2.2 snj sc->sc_scan_last_antenna = ind;
5218 1.75.2.2 snj break;
5219 1.75.2.2 snj }
5220 1.75.2.2 snj }
5221 1.75.2.2 snj tx_ant = (1 << sc->sc_scan_last_antenna) << IWM_RATE_MCS_ANT_POS;
5222 1.75.2.2 snj
5223 1.75.2.2 snj if ((flags & IEEE80211_CHAN_2GHZ) && !no_cck)
5224 1.75.2.2 snj return htole32(IWM_RATE_1M_PLCP | IWM_RATE_MCS_CCK_MSK |
5225 1.75.2.2 snj tx_ant);
5226 1.75.2.2 snj else
5227 1.75.2.2 snj return htole32(IWM_RATE_6M_PLCP | tx_ant);
5228 1.75.2.2 snj }
5229 1.75.2.2 snj
5230 1.75.2.2 snj #ifdef notyet
5231 1.75.2.2 snj /*
5232 1.75.2.2 snj * If req->n_ssids > 0, it means we should do an active scan.
5233 1.75.2.2 snj * In case of active scan w/o directed scan, we receive a zero-length SSID
5234 1.75.2.2 snj * just to notify that this scan is active and not passive.
5235 1.75.2.2 snj * In order to notify the FW of the number of SSIDs we wish to scan (including
5236 1.75.2.2 snj * the zero-length one), we need to set the corresponding bits in chan->type,
5237 1.75.2.2 snj * one for each SSID, and set the active bit (first). If the first SSID is
5238 1.75.2.2 snj * already included in the probe template, so we need to set only
5239 1.75.2.2 snj * req->n_ssids - 1 bits in addition to the first bit.
5240 1.75.2.2 snj */
5241 1.75.2.2 snj static uint16_t
5242 1.75.2.2 snj iwm_get_active_dwell(struct iwm_softc *sc, int flags, int n_ssids)
5243 1.75.2.2 snj {
5244 1.75.2.2 snj if (flags & IEEE80211_CHAN_2GHZ)
5245 1.75.2.2 snj return 30 + 3 * (n_ssids + 1);
5246 1.75.2.2 snj return 20 + 2 * (n_ssids + 1);
5247 1.75.2.2 snj }
5248 1.75.2.2 snj
5249 1.75.2.2 snj static uint16_t
5250 1.75.2.2 snj iwm_get_passive_dwell(struct iwm_softc *sc, int flags)
5251 1.75.2.2 snj {
5252 1.75.2.2 snj return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
5253 1.75.2.2 snj }
5254 1.75.2.2 snj #endif
5255 1.75.2.2 snj
5256 1.75.2.2 snj static uint8_t
5257 1.75.2.2 snj iwm_lmac_scan_fill_channels(struct iwm_softc *sc,
5258 1.75.2.2 snj struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
5259 1.75.2.2 snj {
5260 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5261 1.75.2.2 snj struct ieee80211_channel *c;
5262 1.75.2.2 snj uint8_t nchan;
5263 1.75.2.2 snj
5264 1.75.2.2 snj for (nchan = 0, c = &ic->ic_channels[1];
5265 1.75.2.2 snj c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5266 1.75.2.2 snj nchan < sc->sc_capa_n_scan_channels;
5267 1.75.2.2 snj c++) {
5268 1.75.2.2 snj if (c->ic_flags == 0)
5269 1.75.2.2 snj continue;
5270 1.75.2.2 snj
5271 1.75.2.2 snj chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
5272 1.75.2.2 snj chan->iter_count = htole16(1);
5273 1.75.2.2 snj chan->iter_interval = htole32(0);
5274 1.75.2.2 snj chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
5275 1.75.2.2 snj chan->flags |= htole32(IWM_SCAN_CHANNEL_NSSIDS(n_ssids));
5276 1.75.2.2 snj if (!IEEE80211_IS_CHAN_PASSIVE(c) && n_ssids != 0)
5277 1.75.2.2 snj chan->flags |= htole32(IWM_SCAN_CHANNEL_TYPE_ACTIVE);
5278 1.75.2.2 snj chan++;
5279 1.75.2.2 snj nchan++;
5280 1.75.2.2 snj }
5281 1.75.2.2 snj
5282 1.75.2.2 snj return nchan;
5283 1.75.2.2 snj }
5284 1.75.2.2 snj
5285 1.75.2.2 snj static uint8_t
5286 1.75.2.2 snj iwm_umac_scan_fill_channels(struct iwm_softc *sc,
5287 1.75.2.2 snj struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
5288 1.75.2.2 snj {
5289 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5290 1.75.2.2 snj struct ieee80211_channel *c;
5291 1.75.2.2 snj uint8_t nchan;
5292 1.75.2.2 snj
5293 1.75.2.2 snj for (nchan = 0, c = &ic->ic_channels[1];
5294 1.75.2.2 snj c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5295 1.75.2.2 snj nchan < sc->sc_capa_n_scan_channels;
5296 1.75.2.2 snj c++) {
5297 1.75.2.2 snj if (c->ic_flags == 0)
5298 1.75.2.2 snj continue;
5299 1.75.2.2 snj
5300 1.75.2.2 snj chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
5301 1.75.2.2 snj chan->iter_count = 1;
5302 1.75.2.2 snj chan->iter_interval = htole16(0);
5303 1.75.2.2 snj chan->flags = htole32(IWM_SCAN_CHANNEL_UMAC_NSSIDS(n_ssids));
5304 1.75.2.2 snj chan++;
5305 1.75.2.2 snj nchan++;
5306 1.75.2.2 snj }
5307 1.75.2.2 snj
5308 1.75.2.2 snj return nchan;
5309 1.75.2.2 snj }
5310 1.75.2.2 snj
5311 1.75.2.2 snj static int
5312 1.75.2.2 snj iwm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
5313 1.75.2.2 snj {
5314 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5315 1.75.2.2 snj struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
5316 1.75.2.2 snj struct ieee80211_rateset *rs;
5317 1.75.2.2 snj size_t remain = sizeof(preq->buf);
5318 1.75.2.2 snj uint8_t *frm, *pos;
5319 1.75.2.2 snj
5320 1.75.2.2 snj memset(preq, 0, sizeof(*preq));
5321 1.75.2.2 snj
5322 1.75.2.2 snj if (remain < sizeof(*wh) + 2 + ic->ic_des_esslen)
5323 1.75.2.2 snj return ENOBUFS;
5324 1.75.2.2 snj
5325 1.75.2.2 snj /*
5326 1.75.2.2 snj * Build a probe request frame. Most of the following code is a
5327 1.75.2.2 snj * copy & paste of what is done in net80211.
5328 1.75.2.2 snj */
5329 1.75.2.2 snj wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
5330 1.75.2.2 snj IEEE80211_FC0_SUBTYPE_PROBE_REQ;
5331 1.75.2.2 snj wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
5332 1.75.2.2 snj IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
5333 1.75.2.2 snj IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
5334 1.75.2.2 snj IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
5335 1.75.2.2 snj *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
5336 1.75.2.2 snj *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
5337 1.75.2.2 snj
5338 1.75.2.2 snj frm = (uint8_t *)(wh + 1);
5339 1.75.2.2 snj frm = ieee80211_add_ssid(frm, ic->ic_des_essid, ic->ic_des_esslen);
5340 1.75.2.2 snj
5341 1.75.2.2 snj /* Tell the firmware where the MAC header is. */
5342 1.75.2.2 snj preq->mac_header.offset = 0;
5343 1.75.2.2 snj preq->mac_header.len = htole16(frm - (uint8_t *)wh);
5344 1.75.2.2 snj remain -= frm - (uint8_t *)wh;
5345 1.75.2.2 snj
5346 1.75.2.2 snj /* Fill in 2GHz IEs and tell firmware where they are. */
5347 1.75.2.2 snj rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
5348 1.75.2.2 snj if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5349 1.75.2.2 snj if (remain < 4 + rs->rs_nrates)
5350 1.75.2.2 snj return ENOBUFS;
5351 1.75.2.2 snj } else if (remain < 2 + rs->rs_nrates)
5352 1.75.2.2 snj return ENOBUFS;
5353 1.75.2.2 snj preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
5354 1.75.2.2 snj pos = frm;
5355 1.75.2.2 snj frm = ieee80211_add_rates(frm, rs);
5356 1.75.2.2 snj if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5357 1.75.2.2 snj frm = ieee80211_add_xrates(frm, rs);
5358 1.75.2.2 snj preq->band_data[0].len = htole16(frm - pos);
5359 1.75.2.2 snj remain -= frm - pos;
5360 1.75.2.2 snj
5361 1.75.2.2 snj if (isset(sc->sc_enabled_capa,
5362 1.75.2.2 snj IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
5363 1.75.2.2 snj if (remain < 3)
5364 1.75.2.2 snj return ENOBUFS;
5365 1.75.2.2 snj *frm++ = IEEE80211_ELEMID_DSPARMS;
5366 1.75.2.2 snj *frm++ = 1;
5367 1.75.2.2 snj *frm++ = 0;
5368 1.75.2.2 snj remain -= 3;
5369 1.75.2.2 snj }
5370 1.75.2.2 snj
5371 1.75.2.2 snj if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
5372 1.75.2.2 snj /* Fill in 5GHz IEs. */
5373 1.75.2.2 snj rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
5374 1.75.2.2 snj if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
5375 1.75.2.2 snj if (remain < 4 + rs->rs_nrates)
5376 1.75.2.2 snj return ENOBUFS;
5377 1.75.2.2 snj } else if (remain < 2 + rs->rs_nrates)
5378 1.75.2.2 snj return ENOBUFS;
5379 1.75.2.2 snj preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
5380 1.75.2.2 snj pos = frm;
5381 1.75.2.2 snj frm = ieee80211_add_rates(frm, rs);
5382 1.75.2.2 snj if (rs->rs_nrates > IEEE80211_RATE_SIZE)
5383 1.75.2.2 snj frm = ieee80211_add_xrates(frm, rs);
5384 1.75.2.2 snj preq->band_data[1].len = htole16(frm - pos);
5385 1.75.2.2 snj remain -= frm - pos;
5386 1.75.2.2 snj }
5387 1.75.2.2 snj
5388 1.75.2.2 snj #ifndef IEEE80211_NO_HT
5389 1.75.2.2 snj /* Send 11n IEs on both 2GHz and 5GHz bands. */
5390 1.75.2.2 snj preq->common_data.offset = htole16(frm - (uint8_t *)wh);
5391 1.75.2.2 snj pos = frm;
5392 1.75.2.2 snj if (ic->ic_flags & IEEE80211_F_HTON) {
5393 1.75.2.2 snj if (remain < 28)
5394 1.75.2.2 snj return ENOBUFS;
5395 1.75.2.2 snj frm = ieee80211_add_htcaps(frm, ic);
5396 1.75.2.2 snj /* XXX add WME info? */
5397 1.75.2.2 snj }
5398 1.75.2.2 snj #endif
5399 1.75.2.2 snj
5400 1.75.2.2 snj preq->common_data.len = htole16(frm - pos);
5401 1.75.2.2 snj
5402 1.75.2.2 snj return 0;
5403 1.75.2.2 snj }
5404 1.75.2.2 snj
5405 1.75.2.2 snj static int
5406 1.75.2.2 snj iwm_lmac_scan(struct iwm_softc *sc)
5407 1.75.2.2 snj {
5408 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5409 1.75.2.2 snj struct iwm_host_cmd hcmd = {
5410 1.75.2.2 snj .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
5411 1.75.2.2 snj .len = { 0, },
5412 1.75.2.2 snj .data = { NULL, },
5413 1.75.2.2 snj .flags = 0,
5414 1.75.2.2 snj };
5415 1.75.2.2 snj struct iwm_scan_req_lmac *req;
5416 1.75.2.2 snj size_t req_len;
5417 1.75.2.2 snj int err;
5418 1.75.2.2 snj
5419 1.75.2.2 snj DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5420 1.75.2.2 snj
5421 1.75.2.2 snj req_len = sizeof(struct iwm_scan_req_lmac) +
5422 1.75.2.2 snj (sizeof(struct iwm_scan_channel_cfg_lmac) *
5423 1.75.2.2 snj sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
5424 1.75.2.2 snj if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5425 1.75.2.2 snj return ENOMEM;
5426 1.75.2.2 snj req = kmem_zalloc(req_len, KM_SLEEP);
5427 1.75.2.2 snj if (req == NULL)
5428 1.75.2.2 snj return ENOMEM;
5429 1.75.2.2 snj
5430 1.75.2.2 snj hcmd.len[0] = (uint16_t)req_len;
5431 1.75.2.2 snj hcmd.data[0] = (void *)req;
5432 1.75.2.2 snj
5433 1.75.2.2 snj /* These timings correspond to iwlwifi's UNASSOC scan. */
5434 1.75.2.2 snj req->active_dwell = 10;
5435 1.75.2.2 snj req->passive_dwell = 110;
5436 1.75.2.2 snj req->fragmented_dwell = 44;
5437 1.75.2.2 snj req->extended_dwell = 90;
5438 1.75.2.2 snj req->max_out_time = 0;
5439 1.75.2.2 snj req->suspend_time = 0;
5440 1.75.2.2 snj
5441 1.75.2.2 snj req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
5442 1.75.2.2 snj req->rx_chain_select = iwm_scan_rx_chain(sc);
5443 1.75.2.2 snj req->iter_num = htole32(1);
5444 1.75.2.2 snj req->delay = 0;
5445 1.75.2.2 snj
5446 1.75.2.2 snj req->scan_flags = htole32(IWM_LMAC_SCAN_FLAG_PASS_ALL |
5447 1.75.2.2 snj IWM_LMAC_SCAN_FLAG_ITER_COMPLETE |
5448 1.75.2.2 snj IWM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
5449 1.75.2.2 snj if (ic->ic_des_esslen == 0)
5450 1.75.2.2 snj req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PASSIVE);
5451 1.75.2.2 snj else
5452 1.75.2.2 snj req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAG_PRE_CONNECTION);
5453 1.75.2.2 snj if (isset(sc->sc_enabled_capa,
5454 1.75.2.2 snj IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5455 1.75.2.2 snj req->scan_flags |= htole32(IWM_LMAC_SCAN_FLAGS_RRM_ENABLED);
5456 1.75.2.2 snj
5457 1.75.2.2 snj req->flags = htole32(IWM_PHY_BAND_24);
5458 1.75.2.2 snj if (sc->sc_nvm.sku_cap_band_52GHz_enable)
5459 1.75.2.2 snj req->flags |= htole32(IWM_PHY_BAND_5);
5460 1.75.2.2 snj req->filter_flags =
5461 1.75.2.2 snj htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
5462 1.75.2.2 snj
5463 1.75.2.2 snj /* Tx flags 2 GHz. */
5464 1.75.2.2 snj req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5465 1.75.2.2 snj IWM_TX_CMD_FLG_BT_DIS);
5466 1.75.2.2 snj req->tx_cmd[0].rate_n_flags =
5467 1.75.2.2 snj iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
5468 1.75.2.2 snj req->tx_cmd[0].sta_id = IWM_AUX_STA_ID;
5469 1.75.2.2 snj
5470 1.75.2.2 snj /* Tx flags 5 GHz. */
5471 1.75.2.2 snj req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
5472 1.75.2.2 snj IWM_TX_CMD_FLG_BT_DIS);
5473 1.75.2.2 snj req->tx_cmd[1].rate_n_flags =
5474 1.75.2.2 snj iwm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
5475 1.75.2.2 snj req->tx_cmd[1].sta_id = IWM_AUX_STA_ID;
5476 1.75.2.2 snj
5477 1.75.2.2 snj /* Check if we're doing an active directed scan. */
5478 1.75.2.2 snj if (ic->ic_des_esslen != 0) {
5479 1.75.2.2 snj req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5480 1.75.2.2 snj req->direct_scan[0].len = ic->ic_des_esslen;
5481 1.75.2.2 snj memcpy(req->direct_scan[0].ssid, ic->ic_des_essid,
5482 1.75.2.2 snj ic->ic_des_esslen);
5483 1.75.2.2 snj }
5484 1.75.2.2 snj
5485 1.75.2.2 snj req->n_channels = iwm_lmac_scan_fill_channels(sc,
5486 1.75.2.2 snj (struct iwm_scan_channel_cfg_lmac *)req->data,
5487 1.75.2.2 snj ic->ic_des_esslen != 0);
5488 1.75.2.2 snj
5489 1.75.2.2 snj err = iwm_fill_probe_req(sc,
5490 1.75.2.2 snj (struct iwm_scan_probe_req *)(req->data +
5491 1.75.2.2 snj (sizeof(struct iwm_scan_channel_cfg_lmac) *
5492 1.75.2.2 snj sc->sc_capa_n_scan_channels)));
5493 1.75.2.2 snj if (err) {
5494 1.75.2.2 snj kmem_free(req, req_len);
5495 1.75.2.2 snj return err;
5496 1.75.2.2 snj }
5497 1.75.2.2 snj
5498 1.75.2.2 snj /* Specify the scan plan: We'll do one iteration. */
5499 1.75.2.2 snj req->schedule[0].iterations = 1;
5500 1.75.2.2 snj req->schedule[0].full_scan_mul = 1;
5501 1.75.2.2 snj
5502 1.75.2.2 snj /* Disable EBS. */
5503 1.75.2.2 snj req->channel_opt[0].non_ebs_ratio = 1;
5504 1.75.2.2 snj req->channel_opt[1].non_ebs_ratio = 1;
5505 1.75.2.2 snj
5506 1.75.2.2 snj err = iwm_send_cmd(sc, &hcmd);
5507 1.75.2.2 snj kmem_free(req, req_len);
5508 1.75.2.2 snj return err;
5509 1.75.2.2 snj }
5510 1.75.2.2 snj
5511 1.75.2.2 snj static int
5512 1.75.2.2 snj iwm_config_umac_scan(struct iwm_softc *sc)
5513 1.75.2.2 snj {
5514 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5515 1.75.2.2 snj struct iwm_scan_config *scan_config;
5516 1.75.2.2 snj int err, nchan;
5517 1.75.2.2 snj size_t cmd_size;
5518 1.75.2.2 snj struct ieee80211_channel *c;
5519 1.75.2.2 snj struct iwm_host_cmd hcmd = {
5520 1.75.2.2 snj .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
5521 1.75.2.2 snj .flags = 0,
5522 1.75.2.2 snj };
5523 1.75.2.2 snj static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
5524 1.75.2.2 snj IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
5525 1.75.2.2 snj IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
5526 1.75.2.2 snj IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
5527 1.75.2.2 snj IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
5528 1.75.2.2 snj IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
5529 1.75.2.2 snj IWM_SCAN_CONFIG_RATE_54M);
5530 1.75.2.2 snj
5531 1.75.2.2 snj cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
5532 1.75.2.2 snj
5533 1.75.2.2 snj scan_config = kmem_zalloc(cmd_size, KM_SLEEP);
5534 1.75.2.2 snj if (scan_config == NULL)
5535 1.75.2.2 snj return ENOMEM;
5536 1.75.2.2 snj
5537 1.75.2.2 snj scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
5538 1.75.2.2 snj scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
5539 1.75.2.2 snj scan_config->legacy_rates = htole32(rates |
5540 1.75.2.2 snj IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
5541 1.75.2.2 snj
5542 1.75.2.2 snj /* These timings correspond to iwlwifi's UNASSOC scan. */
5543 1.75.2.2 snj scan_config->dwell_active = 10;
5544 1.75.2.2 snj scan_config->dwell_passive = 110;
5545 1.75.2.2 snj scan_config->dwell_fragmented = 44;
5546 1.75.2.2 snj scan_config->dwell_extended = 90;
5547 1.75.2.2 snj scan_config->out_of_channel_time = htole32(0);
5548 1.75.2.2 snj scan_config->suspend_time = htole32(0);
5549 1.75.2.2 snj
5550 1.75.2.2 snj IEEE80211_ADDR_COPY(scan_config->mac_addr, sc->sc_ic.ic_myaddr);
5551 1.75.2.2 snj
5552 1.75.2.2 snj scan_config->bcast_sta_id = IWM_AUX_STA_ID;
5553 1.75.2.2 snj scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
5554 1.75.2.2 snj IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
5555 1.75.2.2 snj IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
5556 1.75.2.2 snj
5557 1.75.2.2 snj for (c = &ic->ic_channels[1], nchan = 0;
5558 1.75.2.2 snj c <= &ic->ic_channels[IEEE80211_CHAN_MAX] &&
5559 1.75.2.2 snj nchan < sc->sc_capa_n_scan_channels; c++) {
5560 1.75.2.2 snj if (c->ic_flags == 0)
5561 1.75.2.2 snj continue;
5562 1.75.2.2 snj scan_config->channel_array[nchan++] =
5563 1.75.2.2 snj ieee80211_mhz2ieee(c->ic_freq, 0);
5564 1.75.2.2 snj }
5565 1.75.2.2 snj
5566 1.75.2.2 snj scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
5567 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
5568 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
5569 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
5570 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
5571 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
5572 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
5573 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
5574 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
5575 1.75.2.2 snj IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
5576 1.75.2.2 snj IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
5577 1.75.2.2 snj
5578 1.75.2.2 snj hcmd.data[0] = scan_config;
5579 1.75.2.2 snj hcmd.len[0] = cmd_size;
5580 1.75.2.2 snj
5581 1.75.2.2 snj err = iwm_send_cmd(sc, &hcmd);
5582 1.75.2.2 snj kmem_free(scan_config, cmd_size);
5583 1.75.2.2 snj return err;
5584 1.75.2.2 snj }
5585 1.75.2.2 snj
5586 1.75.2.2 snj static int
5587 1.75.2.2 snj iwm_umac_scan(struct iwm_softc *sc)
5588 1.75.2.2 snj {
5589 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5590 1.75.2.2 snj struct iwm_host_cmd hcmd = {
5591 1.75.2.2 snj .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
5592 1.75.2.2 snj .len = { 0, },
5593 1.75.2.2 snj .data = { NULL, },
5594 1.75.2.2 snj .flags = 0,
5595 1.75.2.2 snj };
5596 1.75.2.2 snj struct iwm_scan_req_umac *req;
5597 1.75.2.2 snj struct iwm_scan_req_umac_tail *tail;
5598 1.75.2.2 snj size_t req_len;
5599 1.75.2.2 snj int err;
5600 1.75.2.2 snj
5601 1.75.2.2 snj DPRINTF(("%s: %s\n", DEVNAME(sc), __func__));
5602 1.75.2.2 snj
5603 1.75.2.2 snj req_len = sizeof(struct iwm_scan_req_umac) +
5604 1.75.2.2 snj (sizeof(struct iwm_scan_channel_cfg_umac) *
5605 1.75.2.2 snj sc->sc_capa_n_scan_channels) +
5606 1.75.2.2 snj sizeof(struct iwm_scan_req_umac_tail);
5607 1.75.2.2 snj if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
5608 1.75.2.2 snj return ENOMEM;
5609 1.75.2.2 snj req = kmem_zalloc(req_len, KM_SLEEP);
5610 1.75.2.2 snj if (req == NULL)
5611 1.75.2.2 snj return ENOMEM;
5612 1.75.2.2 snj
5613 1.75.2.2 snj hcmd.len[0] = (uint16_t)req_len;
5614 1.75.2.2 snj hcmd.data[0] = (void *)req;
5615 1.75.2.2 snj
5616 1.75.2.2 snj /* These timings correspond to iwlwifi's UNASSOC scan. */
5617 1.75.2.2 snj req->active_dwell = 10;
5618 1.75.2.2 snj req->passive_dwell = 110;
5619 1.75.2.2 snj req->fragmented_dwell = 44;
5620 1.75.2.2 snj req->extended_dwell = 90;
5621 1.75.2.2 snj req->max_out_time = 0;
5622 1.75.2.2 snj req->suspend_time = 0;
5623 1.75.2.2 snj
5624 1.75.2.2 snj req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5625 1.75.2.2 snj req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
5626 1.75.2.2 snj
5627 1.75.2.2 snj req->n_channels = iwm_umac_scan_fill_channels(sc,
5628 1.75.2.2 snj (struct iwm_scan_channel_cfg_umac *)req->data,
5629 1.75.2.2 snj ic->ic_des_esslen != 0);
5630 1.75.2.2 snj
5631 1.75.2.2 snj req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
5632 1.75.2.2 snj IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
5633 1.75.2.2 snj IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
5634 1.75.2.2 snj
5635 1.75.2.2 snj tail = (struct iwm_scan_req_umac_tail *)(req->data +
5636 1.75.2.2 snj sizeof(struct iwm_scan_channel_cfg_umac) *
5637 1.75.2.2 snj sc->sc_capa_n_scan_channels);
5638 1.75.2.2 snj
5639 1.75.2.2 snj /* Check if we're doing an active directed scan. */
5640 1.75.2.2 snj if (ic->ic_des_esslen != 0) {
5641 1.75.2.2 snj tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
5642 1.75.2.2 snj tail->direct_scan[0].len = ic->ic_des_esslen;
5643 1.75.2.2 snj memcpy(tail->direct_scan[0].ssid, ic->ic_des_essid,
5644 1.75.2.2 snj ic->ic_des_esslen);
5645 1.75.2.2 snj req->general_flags |=
5646 1.75.2.2 snj htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
5647 1.75.2.2 snj } else
5648 1.75.2.2 snj req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
5649 1.75.2.2 snj
5650 1.75.2.2 snj if (isset(sc->sc_enabled_capa,
5651 1.75.2.2 snj IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
5652 1.75.2.2 snj req->general_flags |=
5653 1.75.2.2 snj htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
5654 1.75.2.2 snj
5655 1.75.2.2 snj err = iwm_fill_probe_req(sc, &tail->preq);
5656 1.75.2.2 snj if (err) {
5657 1.75.2.2 snj kmem_free(req, req_len);
5658 1.75.2.2 snj return err;
5659 1.75.2.2 snj }
5660 1.75.2.2 snj
5661 1.75.2.2 snj /* Specify the scan plan: We'll do one iteration. */
5662 1.75.2.2 snj tail->schedule[0].interval = 0;
5663 1.75.2.2 snj tail->schedule[0].iter_count = 1;
5664 1.75.2.2 snj
5665 1.75.2.2 snj err = iwm_send_cmd(sc, &hcmd);
5666 1.75.2.2 snj kmem_free(req, req_len);
5667 1.75.2.2 snj return err;
5668 1.75.2.2 snj }
5669 1.75.2.2 snj
5670 1.75.2.2 snj static uint8_t
5671 1.75.2.2 snj iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
5672 1.75.2.2 snj {
5673 1.75.2.2 snj int i;
5674 1.75.2.2 snj uint8_t rval;
5675 1.75.2.2 snj
5676 1.75.2.2 snj for (i = 0; i < rs->rs_nrates; i++) {
5677 1.75.2.2 snj rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
5678 1.75.2.2 snj if (rval == iwm_rates[ridx].rate)
5679 1.75.2.2 snj return rs->rs_rates[i];
5680 1.75.2.2 snj }
5681 1.75.2.2 snj return 0;
5682 1.75.2.2 snj }
5683 1.75.2.2 snj
5684 1.75.2.2 snj static void
5685 1.75.2.2 snj iwm_ack_rates(struct iwm_softc *sc, struct iwm_node *in, int *cck_rates,
5686 1.75.2.2 snj int *ofdm_rates)
5687 1.75.2.2 snj {
5688 1.75.2.2 snj struct ieee80211_node *ni = &in->in_ni;
5689 1.75.2.2 snj struct ieee80211_rateset *rs = &ni->ni_rates;
5690 1.75.2.2 snj int lowest_present_ofdm = -1;
5691 1.75.2.2 snj int lowest_present_cck = -1;
5692 1.75.2.2 snj uint8_t cck = 0;
5693 1.75.2.2 snj uint8_t ofdm = 0;
5694 1.75.2.2 snj int i;
5695 1.75.2.2 snj
5696 1.75.2.2 snj if (ni->ni_chan == IEEE80211_CHAN_ANYC ||
5697 1.75.2.2 snj IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) {
5698 1.75.2.2 snj for (i = IWM_FIRST_CCK_RATE; i < IWM_FIRST_OFDM_RATE; i++) {
5699 1.75.2.2 snj if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5700 1.75.2.2 snj continue;
5701 1.75.2.2 snj cck |= (1 << i);
5702 1.75.2.2 snj if (lowest_present_cck == -1 || lowest_present_cck > i)
5703 1.75.2.2 snj lowest_present_cck = i;
5704 1.75.2.2 snj }
5705 1.75.2.2 snj }
5706 1.75.2.2 snj for (i = IWM_FIRST_OFDM_RATE; i <= IWM_LAST_NON_HT_RATE; i++) {
5707 1.75.2.2 snj if ((iwm_ridx2rate(rs, i) & IEEE80211_RATE_BASIC) == 0)
5708 1.75.2.2 snj continue;
5709 1.75.2.2 snj ofdm |= (1 << (i - IWM_FIRST_OFDM_RATE));
5710 1.75.2.2 snj if (lowest_present_ofdm == -1 || lowest_present_ofdm > i)
5711 1.75.2.2 snj lowest_present_ofdm = i;
5712 1.75.2.2 snj }
5713 1.75.2.2 snj
5714 1.75.2.2 snj /*
5715 1.75.2.2 snj * Now we've got the basic rates as bitmaps in the ofdm and cck
5716 1.75.2.2 snj * variables. This isn't sufficient though, as there might not
5717 1.75.2.2 snj * be all the right rates in the bitmap. E.g. if the only basic
5718 1.75.2.2 snj * rates are 5.5 Mbps and 11 Mbps, we still need to add 1 Mbps
5719 1.75.2.2 snj * and 6 Mbps because the 802.11-2007 standard says in 9.6:
5720 1.75.2.2 snj *
5721 1.75.2.2 snj * [...] a STA responding to a received frame shall transmit
5722 1.75.2.2 snj * its Control Response frame [...] at the highest rate in the
5723 1.75.2.2 snj * BSSBasicRateSet parameter that is less than or equal to the
5724 1.75.2.2 snj * rate of the immediately previous frame in the frame exchange
5725 1.75.2.2 snj * sequence ([...]) and that is of the same modulation class
5726 1.75.2.2 snj * ([...]) as the received frame. If no rate contained in the
5727 1.75.2.2 snj * BSSBasicRateSet parameter meets these conditions, then the
5728 1.75.2.2 snj * control frame sent in response to a received frame shall be
5729 1.75.2.2 snj * transmitted at the highest mandatory rate of the PHY that is
5730 1.75.2.2 snj * less than or equal to the rate of the received frame, and
5731 1.75.2.2 snj * that is of the same modulation class as the received frame.
5732 1.75.2.2 snj *
5733 1.75.2.2 snj * As a consequence, we need to add all mandatory rates that are
5734 1.75.2.2 snj * lower than all of the basic rates to these bitmaps.
5735 1.75.2.2 snj */
5736 1.75.2.2 snj
5737 1.75.2.2 snj if (IWM_RATE_24M_INDEX < lowest_present_ofdm)
5738 1.75.2.2 snj ofdm |= IWM_RATE_BIT_MSK(24) >> IWM_FIRST_OFDM_RATE;
5739 1.75.2.2 snj if (IWM_RATE_12M_INDEX < lowest_present_ofdm)
5740 1.75.2.2 snj ofdm |= IWM_RATE_BIT_MSK(12) >> IWM_FIRST_OFDM_RATE;
5741 1.75.2.2 snj /* 6M already there or needed so always add */
5742 1.75.2.2 snj ofdm |= IWM_RATE_BIT_MSK(6) >> IWM_FIRST_OFDM_RATE;
5743 1.75.2.2 snj
5744 1.75.2.2 snj /*
5745 1.75.2.2 snj * CCK is a bit more complex with DSSS vs. HR/DSSS vs. ERP.
5746 1.75.2.2 snj * Note, however:
5747 1.75.2.2 snj * - if no CCK rates are basic, it must be ERP since there must
5748 1.75.2.2 snj * be some basic rates at all, so they're OFDM => ERP PHY
5749 1.75.2.2 snj * (or we're in 5 GHz, and the cck bitmap will never be used)
5750 1.75.2.2 snj * - if 11M is a basic rate, it must be ERP as well, so add 5.5M
5751 1.75.2.2 snj * - if 5.5M is basic, 1M and 2M are mandatory
5752 1.75.2.2 snj * - if 2M is basic, 1M is mandatory
5753 1.75.2.2 snj * - if 1M is basic, that's the only valid ACK rate.
5754 1.75.2.2 snj * As a consequence, it's not as complicated as it sounds, just add
5755 1.75.2.2 snj * any lower rates to the ACK rate bitmap.
5756 1.75.2.2 snj */
5757 1.75.2.2 snj if (IWM_RATE_11M_INDEX < lowest_present_cck)
5758 1.75.2.2 snj cck |= IWM_RATE_BIT_MSK(11) >> IWM_FIRST_CCK_RATE;
5759 1.75.2.2 snj if (IWM_RATE_5M_INDEX < lowest_present_cck)
5760 1.75.2.2 snj cck |= IWM_RATE_BIT_MSK(5) >> IWM_FIRST_CCK_RATE;
5761 1.75.2.2 snj if (IWM_RATE_2M_INDEX < lowest_present_cck)
5762 1.75.2.2 snj cck |= IWM_RATE_BIT_MSK(2) >> IWM_FIRST_CCK_RATE;
5763 1.75.2.2 snj /* 1M already there or needed so always add */
5764 1.75.2.2 snj cck |= IWM_RATE_BIT_MSK(1) >> IWM_FIRST_CCK_RATE;
5765 1.75.2.2 snj
5766 1.75.2.2 snj *cck_rates = cck;
5767 1.75.2.2 snj *ofdm_rates = ofdm;
5768 1.75.2.2 snj }
5769 1.75.2.2 snj
5770 1.75.2.2 snj static void
5771 1.75.2.2 snj iwm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
5772 1.75.2.2 snj struct iwm_mac_ctx_cmd *cmd, uint32_t action, int assoc)
5773 1.75.2.2 snj {
5774 1.75.2.2 snj #define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
5775 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5776 1.75.2.2 snj struct ieee80211_node *ni = ic->ic_bss;
5777 1.75.2.2 snj int cck_ack_rates, ofdm_ack_rates;
5778 1.75.2.2 snj int i;
5779 1.75.2.2 snj
5780 1.75.2.2 snj cmd->id_and_color = htole32(IWM_FW_CMD_ID_AND_COLOR(in->in_id,
5781 1.75.2.2 snj in->in_color));
5782 1.75.2.2 snj cmd->action = htole32(action);
5783 1.75.2.2 snj
5784 1.75.2.2 snj cmd->mac_type = htole32(IWM_FW_MAC_TYPE_BSS_STA);
5785 1.75.2.2 snj cmd->tsf_id = htole32(IWM_TSF_ID_A);
5786 1.75.2.2 snj
5787 1.75.2.2 snj IEEE80211_ADDR_COPY(cmd->node_addr, ic->ic_myaddr);
5788 1.75.2.2 snj IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
5789 1.75.2.2 snj
5790 1.75.2.2 snj iwm_ack_rates(sc, in, &cck_ack_rates, &ofdm_ack_rates);
5791 1.75.2.2 snj cmd->cck_rates = htole32(cck_ack_rates);
5792 1.75.2.2 snj cmd->ofdm_rates = htole32(ofdm_ack_rates);
5793 1.75.2.2 snj
5794 1.75.2.2 snj cmd->cck_short_preamble
5795 1.75.2.2 snj = htole32((ic->ic_flags & IEEE80211_F_SHPREAMBLE)
5796 1.75.2.2 snj ? IWM_MAC_FLG_SHORT_PREAMBLE : 0);
5797 1.75.2.2 snj cmd->short_slot
5798 1.75.2.2 snj = htole32((ic->ic_flags & IEEE80211_F_SHSLOT)
5799 1.75.2.2 snj ? IWM_MAC_FLG_SHORT_SLOT : 0);
5800 1.75.2.2 snj
5801 1.75.2.2 snj for (i = 0; i < WME_NUM_AC; i++) {
5802 1.75.2.2 snj struct wmeParams *wmep = &ic->ic_wme.wme_params[i];
5803 1.75.2.2 snj int txf = iwm_ac_to_tx_fifo[i];
5804 1.75.2.2 snj
5805 1.75.2.2 snj cmd->ac[txf].cw_min = htole16(IWM_EXP2(wmep->wmep_logcwmin));
5806 1.75.2.2 snj cmd->ac[txf].cw_max = htole16(IWM_EXP2(wmep->wmep_logcwmax));
5807 1.75.2.2 snj cmd->ac[txf].aifsn = wmep->wmep_aifsn;
5808 1.75.2.2 snj cmd->ac[txf].fifos_mask = (1 << txf);
5809 1.75.2.2 snj cmd->ac[txf].edca_txop = htole16(wmep->wmep_txopLimit * 32);
5810 1.75.2.2 snj }
5811 1.75.2.2 snj if (ni->ni_flags & IEEE80211_NODE_QOS)
5812 1.75.2.2 snj cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_UPDATE_EDCA);
5813 1.75.2.2 snj
5814 1.75.2.2 snj #ifndef IEEE80211_NO_HT
5815 1.75.2.2 snj if (ni->ni_flags & IEEE80211_NODE_HT) {
5816 1.75.2.2 snj enum ieee80211_htprot htprot =
5817 1.75.2.2 snj (ni->ni_htop1 & IEEE80211_HTOP1_PROT_MASK);
5818 1.75.2.2 snj switch (htprot) {
5819 1.75.2.2 snj case IEEE80211_HTPROT_NONE:
5820 1.75.2.2 snj break;
5821 1.75.2.2 snj case IEEE80211_HTPROT_NONMEMBER:
5822 1.75.2.2 snj case IEEE80211_HTPROT_NONHT_MIXED:
5823 1.75.2.2 snj cmd->protection_flags |=
5824 1.75.2.2 snj htole32(IWM_MAC_PROT_FLG_HT_PROT);
5825 1.75.2.2 snj case IEEE80211_HTPROT_20MHZ:
5826 1.75.2.2 snj cmd->protection_flags |=
5827 1.75.2.2 snj htole32(IWM_MAC_PROT_FLG_HT_PROT |
5828 1.75.2.2 snj IWM_MAC_PROT_FLG_FAT_PROT);
5829 1.75.2.2 snj break;
5830 1.75.2.2 snj default:
5831 1.75.2.2 snj break;
5832 1.75.2.2 snj }
5833 1.75.2.2 snj
5834 1.75.2.2 snj cmd->qos_flags |= htole32(IWM_MAC_QOS_FLG_TGN);
5835 1.75.2.2 snj }
5836 1.75.2.2 snj #endif
5837 1.75.2.2 snj
5838 1.75.2.2 snj if (ic->ic_flags & IEEE80211_F_USEPROT)
5839 1.75.2.2 snj cmd->protection_flags |= htole32(IWM_MAC_PROT_FLG_TGG_PROTECT);
5840 1.75.2.2 snj
5841 1.75.2.2 snj cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP);
5842 1.75.2.2 snj #undef IWM_EXP2
5843 1.75.2.2 snj }
5844 1.75.2.2 snj
5845 1.75.2.2 snj static void
5846 1.75.2.2 snj iwm_mac_ctxt_cmd_fill_sta(struct iwm_softc *sc, struct iwm_node *in,
5847 1.75.2.2 snj struct iwm_mac_data_sta *sta, int assoc)
5848 1.75.2.2 snj {
5849 1.75.2.2 snj struct ieee80211_node *ni = &in->in_ni;
5850 1.75.2.2 snj uint32_t dtim_off;
5851 1.75.2.2 snj uint64_t tsf;
5852 1.75.2.2 snj
5853 1.75.2.2 snj dtim_off = ni->ni_dtim_count * ni->ni_intval * IEEE80211_DUR_TU;
5854 1.75.2.2 snj tsf = le64toh(ni->ni_tstamp.tsf);
5855 1.75.2.2 snj
5856 1.75.2.2 snj sta->is_assoc = htole32(assoc);
5857 1.75.2.2 snj sta->dtim_time = htole32(ni->ni_rstamp + dtim_off);
5858 1.75.2.2 snj sta->dtim_tsf = htole64(tsf + dtim_off);
5859 1.75.2.2 snj sta->bi = htole32(ni->ni_intval);
5860 1.75.2.2 snj sta->bi_reciprocal = htole32(iwm_reciprocal(ni->ni_intval));
5861 1.75.2.2 snj sta->dtim_interval = htole32(ni->ni_intval * ni->ni_dtim_period);
5862 1.75.2.2 snj sta->dtim_reciprocal = htole32(iwm_reciprocal(sta->dtim_interval));
5863 1.75.2.2 snj sta->listen_interval = htole32(10);
5864 1.75.2.2 snj sta->assoc_id = htole32(ni->ni_associd);
5865 1.75.2.2 snj sta->assoc_beacon_arrive_time = htole32(ni->ni_rstamp);
5866 1.75.2.2 snj }
5867 1.75.2.2 snj
5868 1.75.2.2 snj static int
5869 1.75.2.2 snj iwm_mac_ctxt_cmd(struct iwm_softc *sc, struct iwm_node *in, uint32_t action,
5870 1.75.2.2 snj int assoc)
5871 1.75.2.2 snj {
5872 1.75.2.2 snj struct ieee80211_node *ni = &in->in_ni;
5873 1.75.2.2 snj struct iwm_mac_ctx_cmd cmd;
5874 1.75.2.2 snj
5875 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
5876 1.75.2.2 snj
5877 1.75.2.2 snj iwm_mac_ctxt_cmd_common(sc, in, &cmd, action, assoc);
5878 1.75.2.2 snj
5879 1.75.2.2 snj /* Allow beacons to pass through as long as we are not associated or we
5880 1.75.2.2 snj * do not have dtim period information */
5881 1.75.2.2 snj if (!assoc || !ni->ni_associd || !ni->ni_dtim_period)
5882 1.75.2.2 snj cmd.filter_flags |= htole32(IWM_MAC_FILTER_IN_BEACON);
5883 1.75.2.2 snj else
5884 1.75.2.2 snj iwm_mac_ctxt_cmd_fill_sta(sc, in, &cmd.sta, assoc);
5885 1.75.2.2 snj
5886 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_MAC_CONTEXT_CMD, 0, sizeof(cmd), &cmd);
5887 1.75.2.2 snj }
5888 1.75.2.2 snj
5889 1.75.2.2 snj #define IWM_MISSED_BEACONS_THRESHOLD 8
5890 1.75.2.2 snj
5891 1.75.2.2 snj static void
5892 1.75.2.2 snj iwm_rx_missed_beacons_notif(struct iwm_softc *sc,
5893 1.75.2.2 snj struct iwm_rx_packet *pkt, struct iwm_rx_data *data)
5894 1.75.2.2 snj {
5895 1.75.2.2 snj struct iwm_missed_beacons_notif *mb = (void *)pkt->data;
5896 1.75.2.2 snj int s;
5897 1.75.2.2 snj
5898 1.75.2.2 snj DPRINTF(("missed bcn mac_id=%u, consecutive=%u (%u, %u, %u)\n",
5899 1.75.2.2 snj le32toh(mb->mac_id),
5900 1.75.2.2 snj le32toh(mb->consec_missed_beacons),
5901 1.75.2.2 snj le32toh(mb->consec_missed_beacons_since_last_rx),
5902 1.75.2.2 snj le32toh(mb->num_recvd_beacons),
5903 1.75.2.2 snj le32toh(mb->num_expected_beacons)));
5904 1.75.2.2 snj
5905 1.75.2.2 snj /*
5906 1.75.2.2 snj * TODO: the threshold should be adjusted based on latency conditions,
5907 1.75.2.2 snj * and/or in case of a CS flow on one of the other AP vifs.
5908 1.75.2.2 snj */
5909 1.75.2.2 snj if (le32toh(mb->consec_missed_beacons_since_last_rx) >
5910 1.75.2.2 snj IWM_MISSED_BEACONS_THRESHOLD) {
5911 1.75.2.2 snj s = splnet();
5912 1.75.2.2 snj ieee80211_beacon_miss(&sc->sc_ic);
5913 1.75.2.2 snj splx(s);
5914 1.75.2.2 snj }
5915 1.75.2.2 snj }
5916 1.75.2.2 snj
5917 1.75.2.2 snj static int
5918 1.75.2.2 snj iwm_update_quotas(struct iwm_softc *sc, struct iwm_node *in)
5919 1.75.2.2 snj {
5920 1.75.2.2 snj struct iwm_time_quota_cmd cmd;
5921 1.75.2.2 snj int i, idx, num_active_macs, quota, quota_rem;
5922 1.75.2.2 snj int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
5923 1.75.2.2 snj int n_ifs[IWM_MAX_BINDINGS] = {0, };
5924 1.75.2.2 snj uint16_t id;
5925 1.75.2.2 snj
5926 1.75.2.2 snj memset(&cmd, 0, sizeof(cmd));
5927 1.75.2.2 snj
5928 1.75.2.2 snj /* currently, PHY ID == binding ID */
5929 1.75.2.2 snj if (in) {
5930 1.75.2.2 snj id = in->in_phyctxt->id;
5931 1.75.2.2 snj KASSERT(id < IWM_MAX_BINDINGS);
5932 1.75.2.2 snj colors[id] = in->in_phyctxt->color;
5933 1.75.2.2 snj
5934 1.75.2.2 snj if (1)
5935 1.75.2.2 snj n_ifs[id] = 1;
5936 1.75.2.2 snj }
5937 1.75.2.2 snj
5938 1.75.2.2 snj /*
5939 1.75.2.2 snj * The FW's scheduling session consists of
5940 1.75.2.2 snj * IWM_MAX_QUOTA fragments. Divide these fragments
5941 1.75.2.2 snj * equally between all the bindings that require quota
5942 1.75.2.2 snj */
5943 1.75.2.2 snj num_active_macs = 0;
5944 1.75.2.2 snj for (i = 0; i < IWM_MAX_BINDINGS; i++) {
5945 1.75.2.2 snj cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
5946 1.75.2.2 snj num_active_macs += n_ifs[i];
5947 1.75.2.2 snj }
5948 1.75.2.2 snj
5949 1.75.2.2 snj quota = 0;
5950 1.75.2.2 snj quota_rem = 0;
5951 1.75.2.2 snj if (num_active_macs) {
5952 1.75.2.2 snj quota = IWM_MAX_QUOTA / num_active_macs;
5953 1.75.2.2 snj quota_rem = IWM_MAX_QUOTA % num_active_macs;
5954 1.75.2.2 snj }
5955 1.75.2.2 snj
5956 1.75.2.2 snj for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
5957 1.75.2.2 snj if (colors[i] < 0)
5958 1.75.2.2 snj continue;
5959 1.75.2.2 snj
5960 1.75.2.2 snj cmd.quotas[idx].id_and_color =
5961 1.75.2.2 snj htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
5962 1.75.2.2 snj
5963 1.75.2.2 snj if (n_ifs[i] <= 0) {
5964 1.75.2.2 snj cmd.quotas[idx].quota = htole32(0);
5965 1.75.2.2 snj cmd.quotas[idx].max_duration = htole32(0);
5966 1.75.2.2 snj } else {
5967 1.75.2.2 snj cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
5968 1.75.2.2 snj cmd.quotas[idx].max_duration = htole32(0);
5969 1.75.2.2 snj }
5970 1.75.2.2 snj idx++;
5971 1.75.2.2 snj }
5972 1.75.2.2 snj
5973 1.75.2.2 snj /* Give the remainder of the session to the first binding */
5974 1.75.2.2 snj cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
5975 1.75.2.2 snj
5976 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_TIME_QUOTA_CMD, 0, sizeof(cmd), &cmd);
5977 1.75.2.2 snj }
5978 1.75.2.2 snj
5979 1.75.2.2 snj static int
5980 1.75.2.2 snj iwm_auth(struct iwm_softc *sc)
5981 1.75.2.2 snj {
5982 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
5983 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
5984 1.75.2.2 snj uint32_t duration;
5985 1.75.2.2 snj int err;
5986 1.75.2.2 snj
5987 1.75.2.2 snj err = iwm_sf_config(sc, IWM_SF_FULL_ON);
5988 1.75.2.2 snj if (err)
5989 1.75.2.2 snj return err;
5990 1.75.2.2 snj
5991 1.75.2.2 snj err = iwm_allow_mcast(sc);
5992 1.75.2.2 snj if (err)
5993 1.75.2.2 snj return err;
5994 1.75.2.2 snj
5995 1.75.2.2 snj sc->sc_phyctxt[0].channel = in->in_ni.ni_chan;
5996 1.75.2.2 snj err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[0], 1, 1,
5997 1.75.2.2 snj IWM_FW_CTXT_ACTION_MODIFY, 0);
5998 1.75.2.2 snj if (err)
5999 1.75.2.2 snj return err;
6000 1.75.2.2 snj in->in_phyctxt = &sc->sc_phyctxt[0];
6001 1.75.2.2 snj
6002 1.75.2.2 snj err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD, 0);
6003 1.75.2.2 snj if (err) {
6004 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6005 1.75.2.2 snj "could not add MAC context (error %d)\n", err);
6006 1.75.2.2 snj return err;
6007 1.75.2.2 snj }
6008 1.75.2.2 snj
6009 1.75.2.2 snj err = iwm_binding_cmd(sc, in, IWM_FW_CTXT_ACTION_ADD);
6010 1.75.2.2 snj if (err)
6011 1.75.2.2 snj return err;
6012 1.75.2.2 snj
6013 1.75.2.2 snj err = iwm_add_sta_cmd(sc, in, 0);
6014 1.75.2.2 snj if (err)
6015 1.75.2.2 snj return err;
6016 1.75.2.2 snj
6017 1.75.2.2 snj err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 0);
6018 1.75.2.2 snj if (err) {
6019 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6020 1.75.2.2 snj return err;
6021 1.75.2.2 snj }
6022 1.75.2.2 snj
6023 1.75.2.2 snj /*
6024 1.75.2.2 snj * Prevent the FW from wandering off channel during association
6025 1.75.2.2 snj * by "protecting" the session with a time event.
6026 1.75.2.2 snj */
6027 1.75.2.2 snj if (in->in_ni.ni_intval)
6028 1.75.2.2 snj duration = in->in_ni.ni_intval * 2;
6029 1.75.2.2 snj else
6030 1.75.2.2 snj duration = IEEE80211_DUR_TU;
6031 1.75.2.2 snj iwm_protect_session(sc, in, duration, in->in_ni.ni_intval / 2);
6032 1.75.2.2 snj DELAY(100);
6033 1.75.2.2 snj
6034 1.75.2.2 snj return 0;
6035 1.75.2.2 snj }
6036 1.75.2.2 snj
6037 1.75.2.2 snj static int
6038 1.75.2.2 snj iwm_assoc(struct iwm_softc *sc)
6039 1.75.2.2 snj {
6040 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6041 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6042 1.75.2.2 snj int err;
6043 1.75.2.2 snj
6044 1.75.2.2 snj err = iwm_add_sta_cmd(sc, in, 1);
6045 1.75.2.2 snj if (err)
6046 1.75.2.2 snj return err;
6047 1.75.2.2 snj
6048 1.75.2.2 snj return 0;
6049 1.75.2.2 snj }
6050 1.75.2.2 snj
6051 1.75.2.2 snj static struct ieee80211_node *
6052 1.75.2.2 snj iwm_node_alloc(struct ieee80211_node_table *nt)
6053 1.75.2.2 snj {
6054 1.75.2.2 snj return malloc(sizeof(struct iwm_node), M_80211_NODE, M_NOWAIT | M_ZERO);
6055 1.75.2.2 snj }
6056 1.75.2.2 snj
6057 1.75.2.2 snj static void
6058 1.75.2.2 snj iwm_calib_timeout(void *arg)
6059 1.75.2.2 snj {
6060 1.75.2.2 snj struct iwm_softc *sc = arg;
6061 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6062 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6063 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6064 1.75.2.2 snj struct ieee80211_node *ni = &in->in_ni;
6065 1.75.2.2 snj int otxrate;
6066 1.75.2.2 snj #endif
6067 1.75.2.2 snj int s;
6068 1.75.2.2 snj
6069 1.75.2.2 snj s = splnet();
6070 1.75.2.2 snj if ((ic->ic_fixed_rate == -1
6071 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6072 1.75.2.2 snj || ic->ic_fixed_mcs == -1
6073 1.75.2.2 snj #endif
6074 1.75.2.2 snj ) &&
6075 1.75.2.2 snj ic->ic_opmode == IEEE80211_M_STA && ic->ic_bss) {
6076 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6077 1.75.2.2 snj if (ni->ni_flags & IEEE80211_NODE_HT)
6078 1.75.2.2 snj otxrate = ni->ni_txmcs;
6079 1.75.2.2 snj else
6080 1.75.2.2 snj otxrate = ni->ni_txrate;
6081 1.75.2.2 snj #endif
6082 1.75.2.2 snj ieee80211_amrr_choose(&sc->sc_amrr, &in->in_ni, &in->in_amn);
6083 1.75.2.2 snj
6084 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6085 1.75.2.2 snj /*
6086 1.75.2.2 snj * If AMRR has chosen a new TX rate we must update
6087 1.75.2.2 snj * the firwmare's LQ rate table from process context.
6088 1.75.2.2 snj */
6089 1.75.2.2 snj if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6090 1.75.2.2 snj otxrate != ni->ni_txmcs)
6091 1.75.2.2 snj softint_schedule(sc->setrates_task);
6092 1.75.2.2 snj else if (otxrate != ni->ni_txrate)
6093 1.75.2.2 snj softint_schedule(sc->setrates_task);
6094 1.75.2.2 snj #endif
6095 1.75.2.2 snj }
6096 1.75.2.2 snj splx(s);
6097 1.75.2.2 snj
6098 1.75.2.2 snj callout_schedule(&sc->sc_calib_to, mstohz(500));
6099 1.75.2.2 snj }
6100 1.75.2.2 snj
6101 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6102 1.75.2.2 snj static void
6103 1.75.2.2 snj iwm_setrates_task(void *arg)
6104 1.75.2.2 snj {
6105 1.75.2.2 snj struct iwm_softc *sc = arg;
6106 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6107 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6108 1.75.2.2 snj
6109 1.75.2.2 snj /* Update rates table based on new TX rate determined by AMRR. */
6110 1.75.2.2 snj iwm_setrates(in);
6111 1.75.2.2 snj }
6112 1.75.2.2 snj
6113 1.75.2.2 snj static int
6114 1.75.2.2 snj iwm_setrates(struct iwm_node *in)
6115 1.75.2.2 snj {
6116 1.75.2.2 snj struct ieee80211_node *ni = &in->in_ni;
6117 1.75.2.2 snj struct ieee80211com *ic = ni->ni_ic;
6118 1.75.2.2 snj struct iwm_softc *sc = IC2IFP(ic)->if_softc;
6119 1.75.2.2 snj struct iwm_lq_cmd *lq = &in->in_lq;
6120 1.75.2.2 snj struct ieee80211_rateset *rs = &ni->ni_rates;
6121 1.75.2.2 snj int i, j, ridx, ridx_min, tab = 0;
6122 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6123 1.75.2.2 snj int sgi_ok;
6124 1.75.2.2 snj #endif
6125 1.75.2.2 snj struct iwm_host_cmd cmd = {
6126 1.75.2.2 snj .id = IWM_LQ_CMD,
6127 1.75.2.2 snj .len = { sizeof(in->in_lq), },
6128 1.75.2.2 snj };
6129 1.75.2.2 snj
6130 1.75.2.2 snj memset(lq, 0, sizeof(*lq));
6131 1.75.2.2 snj lq->sta_id = IWM_STATION_ID;
6132 1.75.2.2 snj
6133 1.75.2.2 snj if (ic->ic_flags & IEEE80211_F_USEPROT)
6134 1.75.2.2 snj lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
6135 1.75.2.2 snj
6136 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6137 1.75.2.2 snj sgi_ok = ((ni->ni_flags & IEEE80211_NODE_HT) &&
6138 1.75.2.2 snj (ni->ni_htcaps & IEEE80211_HTCAP_SGI20));
6139 1.75.2.2 snj #endif
6140 1.75.2.2 snj
6141 1.75.2.2 snj
6142 1.75.2.2 snj /*
6143 1.75.2.2 snj * Fill the LQ rate selection table with legacy and/or HT rates
6144 1.75.2.2 snj * in descending order, i.e. with the node's current TX rate first.
6145 1.75.2.2 snj * In cases where throughput of an HT rate corresponds to a legacy
6146 1.75.2.2 snj * rate it makes no sense to add both. We rely on the fact that
6147 1.75.2.2 snj * iwm_rates is laid out such that equivalent HT/legacy rates share
6148 1.75.2.2 snj * the same IWM_RATE_*_INDEX value. Also, rates not applicable to
6149 1.75.2.2 snj * legacy/HT are assumed to be marked with an 'invalid' PLCP value.
6150 1.75.2.2 snj */
6151 1.75.2.2 snj j = 0;
6152 1.75.2.2 snj ridx_min = (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) ?
6153 1.75.2.2 snj IWM_RIDX_OFDM : IWM_RIDX_CCK;
6154 1.75.2.2 snj for (ridx = IWM_RIDX_MAX; ridx >= ridx_min; ridx--) {
6155 1.75.2.2 snj if (j >= __arraycount(lq->rs_table))
6156 1.75.2.2 snj break;
6157 1.75.2.2 snj tab = 0;
6158 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6159 1.75.2.2 snj if ((ni->ni_flags & IEEE80211_NODE_HT) &&
6160 1.75.2.2 snj iwm_rates[ridx].ht_plcp != IWM_RATE_HT_SISO_MCS_INV_PLCP) {
6161 1.75.2.2 snj for (i = ni->ni_txmcs; i >= 0; i--) {
6162 1.75.2.2 snj if (isclr(ni->ni_rxmcs, i))
6163 1.75.2.2 snj continue;
6164 1.75.2.2 snj if (ridx == iwm_mcs2ridx[i]) {
6165 1.75.2.2 snj tab = iwm_rates[ridx].ht_plcp;
6166 1.75.2.2 snj tab |= IWM_RATE_MCS_HT_MSK;
6167 1.75.2.2 snj if (sgi_ok)
6168 1.75.2.2 snj tab |= IWM_RATE_MCS_SGI_MSK;
6169 1.75.2.2 snj break;
6170 1.75.2.2 snj }
6171 1.75.2.2 snj }
6172 1.75.2.2 snj }
6173 1.75.2.2 snj #endif
6174 1.75.2.2 snj if (tab == 0 && iwm_rates[ridx].plcp != IWM_RATE_INVM_PLCP) {
6175 1.75.2.2 snj for (i = ni->ni_txrate; i >= 0; i--) {
6176 1.75.2.2 snj if (iwm_rates[ridx].rate == (rs->rs_rates[i] &
6177 1.75.2.2 snj IEEE80211_RATE_VAL)) {
6178 1.75.2.2 snj tab = iwm_rates[ridx].plcp;
6179 1.75.2.2 snj break;
6180 1.75.2.2 snj }
6181 1.75.2.2 snj }
6182 1.75.2.2 snj }
6183 1.75.2.2 snj
6184 1.75.2.2 snj if (tab == 0)
6185 1.75.2.2 snj continue;
6186 1.75.2.2 snj
6187 1.75.2.2 snj tab |= 1 << IWM_RATE_MCS_ANT_POS;
6188 1.75.2.2 snj if (IWM_RIDX_IS_CCK(ridx))
6189 1.75.2.2 snj tab |= IWM_RATE_MCS_CCK_MSK;
6190 1.75.2.2 snj DPRINTFN(2, ("station rate %d %x\n", i, tab));
6191 1.75.2.2 snj lq->rs_table[j++] = htole32(tab);
6192 1.75.2.2 snj }
6193 1.75.2.2 snj
6194 1.75.2.2 snj /* Fill the rest with the lowest possible rate */
6195 1.75.2.2 snj i = j > 0 ? j - 1 : 0;
6196 1.75.2.2 snj while (j < __arraycount(lq->rs_table))
6197 1.75.2.2 snj lq->rs_table[j++] = lq->rs_table[i];
6198 1.75.2.2 snj
6199 1.75.2.2 snj lq->single_stream_ant_msk = IWM_ANT_A;
6200 1.75.2.2 snj lq->dual_stream_ant_msk = IWM_ANT_AB;
6201 1.75.2.2 snj
6202 1.75.2.2 snj lq->agg_time_limit = htole16(4000); /* 4ms */
6203 1.75.2.2 snj lq->agg_disable_start_th = 3;
6204 1.75.2.2 snj #ifdef notyet
6205 1.75.2.2 snj lq->agg_frame_cnt_limit = 0x3f;
6206 1.75.2.2 snj #else
6207 1.75.2.2 snj lq->agg_frame_cnt_limit = 1; /* tx agg disabled */
6208 1.75.2.2 snj #endif
6209 1.75.2.2 snj
6210 1.75.2.2 snj cmd.data[0] = &in->in_lq;
6211 1.75.2.2 snj return iwm_send_cmd(sc, &cmd);
6212 1.75.2.2 snj }
6213 1.75.2.2 snj #endif
6214 1.75.2.2 snj
6215 1.75.2.2 snj static int
6216 1.75.2.2 snj iwm_media_change(struct ifnet *ifp)
6217 1.75.2.2 snj {
6218 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
6219 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6220 1.75.2.2 snj uint8_t rate, ridx;
6221 1.75.2.2 snj int err;
6222 1.75.2.2 snj
6223 1.75.2.2 snj err = ieee80211_media_change(ifp);
6224 1.75.2.2 snj if (err != ENETRESET)
6225 1.75.2.2 snj return err;
6226 1.75.2.2 snj
6227 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6228 1.75.2.2 snj if (ic->ic_fixed_mcs != -1)
6229 1.75.2.2 snj sc->sc_fixed_ridx = iwm_mcs2ridx[ic->ic_fixed_mcs];
6230 1.75.2.2 snj else
6231 1.75.2.2 snj #endif
6232 1.75.2.2 snj if (ic->ic_fixed_rate != -1) {
6233 1.75.2.2 snj rate = ic->ic_sup_rates[ic->ic_curmode].
6234 1.75.2.2 snj rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
6235 1.75.2.2 snj /* Map 802.11 rate to HW rate index. */
6236 1.75.2.2 snj for (ridx = 0; ridx <= IWM_RIDX_MAX; ridx++)
6237 1.75.2.2 snj if (iwm_rates[ridx].rate == rate)
6238 1.75.2.2 snj break;
6239 1.75.2.2 snj sc->sc_fixed_ridx = ridx;
6240 1.75.2.2 snj }
6241 1.75.2.2 snj
6242 1.75.2.2 snj if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
6243 1.75.2.2 snj (IFF_UP | IFF_RUNNING)) {
6244 1.75.2.2 snj iwm_stop(ifp, 0);
6245 1.75.2.2 snj err = iwm_init(ifp);
6246 1.75.2.2 snj }
6247 1.75.2.2 snj return err;
6248 1.75.2.2 snj }
6249 1.75.2.2 snj
6250 1.75.2.2 snj static int
6251 1.75.2.2 snj iwm_do_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6252 1.75.2.2 snj {
6253 1.75.2.2 snj struct ifnet *ifp = IC2IFP(ic);
6254 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
6255 1.75.2.2 snj enum ieee80211_state ostate = ic->ic_state;
6256 1.75.2.2 snj struct iwm_node *in;
6257 1.75.2.2 snj int err;
6258 1.75.2.2 snj
6259 1.75.2.2 snj DPRINTF(("switching state %s->%s\n", ieee80211_state_name[ostate],
6260 1.75.2.2 snj ieee80211_state_name[nstate]));
6261 1.75.2.2 snj
6262 1.75.2.2 snj if (ostate == IEEE80211_S_SCAN && nstate != ostate)
6263 1.75.2.2 snj iwm_led_blink_stop(sc);
6264 1.75.2.2 snj
6265 1.75.2.2 snj if (ostate == IEEE80211_S_RUN && nstate != ostate)
6266 1.75.2.2 snj iwm_disable_beacon_filter(sc);
6267 1.75.2.2 snj
6268 1.75.2.2 snj /* Reset the device if moving out of AUTH, ASSOC, or RUN. */
6269 1.75.2.2 snj /* XXX Is there a way to switch states without a full reset? */
6270 1.75.2.2 snj if (ostate > IEEE80211_S_SCAN && nstate < ostate) {
6271 1.75.2.2 snj /*
6272 1.75.2.2 snj * Upon receiving a deauth frame from AP the net80211 stack
6273 1.75.2.2 snj * puts the driver into AUTH state. This will fail with this
6274 1.75.2.2 snj * driver so bring the FSM from RUN to SCAN in this case.
6275 1.75.2.2 snj */
6276 1.75.2.2 snj if (nstate != IEEE80211_S_INIT) {
6277 1.75.2.2 snj DPRINTF(("Force transition to INIT; MGT=%d\n", arg));
6278 1.75.2.2 snj /* Always pass arg as -1 since we can't Tx right now. */
6279 1.75.2.2 snj sc->sc_newstate(ic, IEEE80211_S_INIT, -1);
6280 1.75.2.2 snj iwm_stop(ifp, 0);
6281 1.75.2.2 snj iwm_init(ifp);
6282 1.75.2.2 snj return 0;
6283 1.75.2.2 snj }
6284 1.75.2.2 snj
6285 1.75.2.2 snj iwm_stop_device(sc);
6286 1.75.2.2 snj iwm_init_hw(sc);
6287 1.75.2.2 snj }
6288 1.75.2.2 snj
6289 1.75.2.2 snj switch (nstate) {
6290 1.75.2.2 snj case IEEE80211_S_INIT:
6291 1.75.2.2 snj break;
6292 1.75.2.2 snj
6293 1.75.2.2 snj case IEEE80211_S_SCAN:
6294 1.75.2.2 snj if (ostate == nstate &&
6295 1.75.2.2 snj ISSET(sc->sc_flags, IWM_FLAG_SCANNING))
6296 1.75.2.2 snj return 0;
6297 1.75.2.2 snj if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
6298 1.75.2.2 snj err = iwm_umac_scan(sc);
6299 1.75.2.2 snj else
6300 1.75.2.2 snj err = iwm_lmac_scan(sc);
6301 1.75.2.2 snj if (err) {
6302 1.75.2.2 snj DPRINTF(("%s: could not initiate scan: %d\n",
6303 1.75.2.2 snj DEVNAME(sc), err));
6304 1.75.2.2 snj return err;
6305 1.75.2.2 snj }
6306 1.75.2.2 snj SET(sc->sc_flags, IWM_FLAG_SCANNING);
6307 1.75.2.2 snj ic->ic_state = nstate;
6308 1.75.2.2 snj iwm_led_blink_start(sc);
6309 1.75.2.2 snj return 0;
6310 1.75.2.2 snj
6311 1.75.2.2 snj case IEEE80211_S_AUTH:
6312 1.75.2.2 snj err = iwm_auth(sc);
6313 1.75.2.2 snj if (err) {
6314 1.75.2.2 snj DPRINTF(("%s: could not move to auth state: %d\n",
6315 1.75.2.2 snj DEVNAME(sc), err));
6316 1.75.2.2 snj return err;
6317 1.75.2.2 snj }
6318 1.75.2.2 snj break;
6319 1.75.2.2 snj
6320 1.75.2.2 snj case IEEE80211_S_ASSOC:
6321 1.75.2.2 snj err = iwm_assoc(sc);
6322 1.75.2.2 snj if (err) {
6323 1.75.2.2 snj DPRINTF(("%s: failed to associate: %d\n", DEVNAME(sc),
6324 1.75.2.2 snj err));
6325 1.75.2.2 snj return err;
6326 1.75.2.2 snj }
6327 1.75.2.2 snj break;
6328 1.75.2.2 snj
6329 1.75.2.2 snj case IEEE80211_S_RUN:
6330 1.75.2.2 snj in = (struct iwm_node *)ic->ic_bss;
6331 1.75.2.2 snj
6332 1.75.2.2 snj /* We have now been assigned an associd by the AP. */
6333 1.75.2.2 snj err = iwm_mac_ctxt_cmd(sc, in, IWM_FW_CTXT_ACTION_MODIFY, 1);
6334 1.75.2.2 snj if (err) {
6335 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "failed to update MAC\n");
6336 1.75.2.2 snj return err;
6337 1.75.2.2 snj }
6338 1.75.2.2 snj
6339 1.75.2.2 snj err = iwm_power_update_device(sc);
6340 1.75.2.2 snj if (err) {
6341 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6342 1.75.2.2 snj "could send power command (error %d)\n", err);
6343 1.75.2.2 snj return err;
6344 1.75.2.2 snj }
6345 1.75.2.2 snj #ifdef notyet
6346 1.75.2.2 snj /*
6347 1.75.2.2 snj * Disabled for now. Default beacon filter settings
6348 1.75.2.2 snj * prevent net80211 from getting ERP and HT protection
6349 1.75.2.2 snj * updates from beacons.
6350 1.75.2.2 snj */
6351 1.75.2.2 snj err = iwm_enable_beacon_filter(sc, in);
6352 1.75.2.2 snj if (err) {
6353 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6354 1.75.2.2 snj "could not enable beacon filter\n");
6355 1.75.2.2 snj return err;
6356 1.75.2.2 snj }
6357 1.75.2.2 snj #endif
6358 1.75.2.2 snj err = iwm_power_mac_update_mode(sc, in);
6359 1.75.2.2 snj if (err) {
6360 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6361 1.75.2.2 snj "could not update MAC power (error %d)\n", err);
6362 1.75.2.2 snj return err;
6363 1.75.2.2 snj }
6364 1.75.2.2 snj
6365 1.75.2.2 snj err = iwm_update_quotas(sc, in);
6366 1.75.2.2 snj if (err) {
6367 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6368 1.75.2.2 snj "could not update quotas (error %d)\n", err);
6369 1.75.2.2 snj return err;
6370 1.75.2.2 snj }
6371 1.75.2.2 snj
6372 1.75.2.2 snj ieee80211_amrr_node_init(&sc->sc_amrr, &in->in_amn);
6373 1.75.2.2 snj
6374 1.75.2.2 snj /* Start at lowest available bit-rate, AMRR will raise. */
6375 1.75.2.2 snj in->in_ni.ni_txrate = 0;
6376 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6377 1.75.2.2 snj in->in_ni.ni_txmcs = 0;
6378 1.75.2.2 snj iwm_setrates(in);
6379 1.75.2.2 snj #endif
6380 1.75.2.2 snj
6381 1.75.2.2 snj callout_schedule(&sc->sc_calib_to, mstohz(500));
6382 1.75.2.2 snj iwm_led_enable(sc);
6383 1.75.2.2 snj break;
6384 1.75.2.2 snj
6385 1.75.2.2 snj default:
6386 1.75.2.2 snj break;
6387 1.75.2.2 snj }
6388 1.75.2.2 snj
6389 1.75.2.2 snj return sc->sc_newstate(ic, nstate, arg);
6390 1.75.2.2 snj }
6391 1.75.2.2 snj
6392 1.75.2.2 snj static void
6393 1.75.2.2 snj iwm_newstate_cb(struct work *wk, void *v)
6394 1.75.2.2 snj {
6395 1.75.2.2 snj struct iwm_softc *sc = v;
6396 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6397 1.75.2.2 snj struct iwm_newstate_state *iwmns = (struct iwm_newstate_state *)wk;
6398 1.75.2.2 snj enum ieee80211_state nstate = iwmns->ns_nstate;
6399 1.75.2.2 snj int generation = iwmns->ns_generation;
6400 1.75.2.2 snj int arg = iwmns->ns_arg;
6401 1.75.2.2 snj int s;
6402 1.75.2.2 snj
6403 1.75.2.2 snj kmem_free(iwmns, sizeof(*iwmns));
6404 1.75.2.2 snj
6405 1.75.2.2 snj s = splnet();
6406 1.75.2.2 snj
6407 1.75.2.2 snj DPRINTF(("Prepare to switch state %d->%d\n", ic->ic_state, nstate));
6408 1.75.2.2 snj if (sc->sc_generation != generation) {
6409 1.75.2.2 snj DPRINTF(("newstate_cb: someone pulled the plug meanwhile\n"));
6410 1.75.2.2 snj if (nstate == IEEE80211_S_INIT) {
6411 1.75.2.2 snj DPRINTF(("newstate_cb: nstate == IEEE80211_S_INIT: "
6412 1.75.2.2 snj "calling sc_newstate()\n"));
6413 1.75.2.2 snj (void) sc->sc_newstate(ic, nstate, arg);
6414 1.75.2.2 snj }
6415 1.75.2.2 snj } else
6416 1.75.2.2 snj (void) iwm_do_newstate(ic, nstate, arg);
6417 1.75.2.2 snj
6418 1.75.2.2 snj splx(s);
6419 1.75.2.2 snj }
6420 1.75.2.2 snj
6421 1.75.2.2 snj static int
6422 1.75.2.2 snj iwm_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
6423 1.75.2.2 snj {
6424 1.75.2.2 snj struct iwm_newstate_state *iwmns;
6425 1.75.2.2 snj struct ifnet *ifp = IC2IFP(ic);
6426 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
6427 1.75.2.2 snj
6428 1.75.2.2 snj callout_stop(&sc->sc_calib_to);
6429 1.75.2.2 snj
6430 1.75.2.2 snj iwmns = kmem_intr_alloc(sizeof(*iwmns), KM_NOSLEEP);
6431 1.75.2.2 snj if (!iwmns) {
6432 1.75.2.2 snj DPRINTF(("%s: allocating state cb mem failed\n", DEVNAME(sc)));
6433 1.75.2.2 snj return ENOMEM;
6434 1.75.2.2 snj }
6435 1.75.2.2 snj
6436 1.75.2.2 snj iwmns->ns_nstate = nstate;
6437 1.75.2.2 snj iwmns->ns_arg = arg;
6438 1.75.2.2 snj iwmns->ns_generation = sc->sc_generation;
6439 1.75.2.2 snj
6440 1.75.2.2 snj workqueue_enqueue(sc->sc_nswq, &iwmns->ns_wk, NULL);
6441 1.75.2.2 snj
6442 1.75.2.2 snj return 0;
6443 1.75.2.2 snj }
6444 1.75.2.2 snj
6445 1.75.2.2 snj static void
6446 1.75.2.2 snj iwm_endscan(struct iwm_softc *sc)
6447 1.75.2.2 snj {
6448 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6449 1.75.2.2 snj int s;
6450 1.75.2.2 snj
6451 1.75.2.2 snj DPRINTF(("%s: scan ended\n", DEVNAME(sc)));
6452 1.75.2.2 snj
6453 1.75.2.2 snj s = splnet();
6454 1.75.2.2 snj if (ic->ic_state == IEEE80211_S_SCAN)
6455 1.75.2.2 snj ieee80211_end_scan(ic);
6456 1.75.2.2 snj splx(s);
6457 1.75.2.2 snj }
6458 1.75.2.2 snj
6459 1.75.2.2 snj /*
6460 1.75.2.2 snj * Aging and idle timeouts for the different possible scenarios
6461 1.75.2.2 snj * in default configuration
6462 1.75.2.2 snj */
6463 1.75.2.2 snj static const uint32_t
6464 1.75.2.2 snj iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6465 1.75.2.2 snj {
6466 1.75.2.2 snj htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
6467 1.75.2.2 snj htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
6468 1.75.2.2 snj },
6469 1.75.2.2 snj {
6470 1.75.2.2 snj htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
6471 1.75.2.2 snj htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
6472 1.75.2.2 snj },
6473 1.75.2.2 snj {
6474 1.75.2.2 snj htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
6475 1.75.2.2 snj htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
6476 1.75.2.2 snj },
6477 1.75.2.2 snj {
6478 1.75.2.2 snj htole32(IWM_SF_BA_AGING_TIMER_DEF),
6479 1.75.2.2 snj htole32(IWM_SF_BA_IDLE_TIMER_DEF)
6480 1.75.2.2 snj },
6481 1.75.2.2 snj {
6482 1.75.2.2 snj htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
6483 1.75.2.2 snj htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
6484 1.75.2.2 snj },
6485 1.75.2.2 snj };
6486 1.75.2.2 snj
6487 1.75.2.2 snj /*
6488 1.75.2.2 snj * Aging and idle timeouts for the different possible scenarios
6489 1.75.2.2 snj * in single BSS MAC configuration.
6490 1.75.2.2 snj */
6491 1.75.2.2 snj static const uint32_t
6492 1.75.2.2 snj iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
6493 1.75.2.2 snj {
6494 1.75.2.2 snj htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
6495 1.75.2.2 snj htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
6496 1.75.2.2 snj },
6497 1.75.2.2 snj {
6498 1.75.2.2 snj htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
6499 1.75.2.2 snj htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
6500 1.75.2.2 snj },
6501 1.75.2.2 snj {
6502 1.75.2.2 snj htole32(IWM_SF_MCAST_AGING_TIMER),
6503 1.75.2.2 snj htole32(IWM_SF_MCAST_IDLE_TIMER)
6504 1.75.2.2 snj },
6505 1.75.2.2 snj {
6506 1.75.2.2 snj htole32(IWM_SF_BA_AGING_TIMER),
6507 1.75.2.2 snj htole32(IWM_SF_BA_IDLE_TIMER)
6508 1.75.2.2 snj },
6509 1.75.2.2 snj {
6510 1.75.2.2 snj htole32(IWM_SF_TX_RE_AGING_TIMER),
6511 1.75.2.2 snj htole32(IWM_SF_TX_RE_IDLE_TIMER)
6512 1.75.2.2 snj },
6513 1.75.2.2 snj };
6514 1.75.2.2 snj
6515 1.75.2.2 snj static void
6516 1.75.2.2 snj iwm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
6517 1.75.2.2 snj struct ieee80211_node *ni)
6518 1.75.2.2 snj {
6519 1.75.2.2 snj int i, j, watermark;
6520 1.75.2.2 snj
6521 1.75.2.2 snj sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
6522 1.75.2.2 snj
6523 1.75.2.2 snj /*
6524 1.75.2.2 snj * If we are in association flow - check antenna configuration
6525 1.75.2.2 snj * capabilities of the AP station, and choose the watermark accordingly.
6526 1.75.2.2 snj */
6527 1.75.2.2 snj if (ni) {
6528 1.75.2.2 snj #ifndef IEEE80211_NO_HT
6529 1.75.2.2 snj if (ni->ni_flags & IEEE80211_NODE_HT) {
6530 1.75.2.2 snj #ifdef notyet
6531 1.75.2.2 snj if (ni->ni_rxmcs[2] != 0)
6532 1.75.2.2 snj watermark = IWM_SF_W_MARK_MIMO3;
6533 1.75.2.2 snj else if (ni->ni_rxmcs[1] != 0)
6534 1.75.2.2 snj watermark = IWM_SF_W_MARK_MIMO2;
6535 1.75.2.2 snj else
6536 1.75.2.2 snj #endif
6537 1.75.2.2 snj watermark = IWM_SF_W_MARK_SISO;
6538 1.75.2.2 snj } else
6539 1.75.2.2 snj #endif
6540 1.75.2.2 snj watermark = IWM_SF_W_MARK_LEGACY;
6541 1.75.2.2 snj /* default watermark value for unassociated mode. */
6542 1.75.2.2 snj } else {
6543 1.75.2.2 snj watermark = IWM_SF_W_MARK_MIMO2;
6544 1.75.2.2 snj }
6545 1.75.2.2 snj sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
6546 1.75.2.2 snj
6547 1.75.2.2 snj for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
6548 1.75.2.2 snj for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
6549 1.75.2.2 snj sf_cmd->long_delay_timeouts[i][j] =
6550 1.75.2.2 snj htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
6551 1.75.2.2 snj }
6552 1.75.2.2 snj }
6553 1.75.2.2 snj
6554 1.75.2.2 snj if (ni) {
6555 1.75.2.2 snj memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
6556 1.75.2.2 snj sizeof(iwm_sf_full_timeout));
6557 1.75.2.2 snj } else {
6558 1.75.2.2 snj memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
6559 1.75.2.2 snj sizeof(iwm_sf_full_timeout_def));
6560 1.75.2.2 snj }
6561 1.75.2.2 snj }
6562 1.75.2.2 snj
6563 1.75.2.2 snj static int
6564 1.75.2.2 snj iwm_sf_config(struct iwm_softc *sc, int new_state)
6565 1.75.2.2 snj {
6566 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6567 1.75.2.2 snj struct iwm_sf_cfg_cmd sf_cmd = {
6568 1.75.2.2 snj .state = htole32(IWM_SF_FULL_ON),
6569 1.75.2.2 snj };
6570 1.75.2.2 snj
6571 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6572 1.75.2.2 snj sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
6573 1.75.2.2 snj
6574 1.75.2.2 snj switch (new_state) {
6575 1.75.2.2 snj case IWM_SF_UNINIT:
6576 1.75.2.2 snj case IWM_SF_INIT_OFF:
6577 1.75.2.2 snj iwm_fill_sf_command(sc, &sf_cmd, NULL);
6578 1.75.2.2 snj break;
6579 1.75.2.2 snj case IWM_SF_FULL_ON:
6580 1.75.2.2 snj iwm_fill_sf_command(sc, &sf_cmd, ic->ic_bss);
6581 1.75.2.2 snj break;
6582 1.75.2.2 snj default:
6583 1.75.2.2 snj return EINVAL;
6584 1.75.2.2 snj }
6585 1.75.2.2 snj
6586 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
6587 1.75.2.2 snj sizeof(sf_cmd), &sf_cmd);
6588 1.75.2.2 snj }
6589 1.75.2.2 snj
6590 1.75.2.2 snj static int
6591 1.75.2.2 snj iwm_send_bt_init_conf(struct iwm_softc *sc)
6592 1.75.2.2 snj {
6593 1.75.2.2 snj struct iwm_bt_coex_cmd bt_cmd;
6594 1.75.2.2 snj
6595 1.75.2.2 snj bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
6596 1.75.2.2 snj bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
6597 1.75.2.2 snj
6598 1.75.2.2 snj return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd), &bt_cmd);
6599 1.75.2.2 snj }
6600 1.75.2.2 snj
6601 1.75.2.2 snj static bool
6602 1.75.2.2 snj iwm_is_lar_supported(struct iwm_softc *sc)
6603 1.75.2.2 snj {
6604 1.75.2.2 snj bool nvm_lar = sc->sc_nvm.lar_enabled;
6605 1.75.2.2 snj bool tlv_lar = isset(sc->sc_enabled_capa,
6606 1.75.2.2 snj IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
6607 1.75.2.2 snj
6608 1.75.2.2 snj if (iwm_lar_disable)
6609 1.75.2.2 snj return false;
6610 1.75.2.2 snj
6611 1.75.2.2 snj /*
6612 1.75.2.2 snj * Enable LAR only if it is supported by the FW (TLV) &&
6613 1.75.2.2 snj * enabled in the NVM
6614 1.75.2.2 snj */
6615 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
6616 1.75.2.2 snj return nvm_lar && tlv_lar;
6617 1.75.2.2 snj else
6618 1.75.2.2 snj return tlv_lar;
6619 1.75.2.2 snj }
6620 1.75.2.2 snj
6621 1.75.2.2 snj static int
6622 1.75.2.2 snj iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
6623 1.75.2.2 snj {
6624 1.75.2.2 snj struct iwm_mcc_update_cmd mcc_cmd;
6625 1.75.2.2 snj struct iwm_host_cmd hcmd = {
6626 1.75.2.2 snj .id = IWM_MCC_UPDATE_CMD,
6627 1.75.2.2 snj .flags = IWM_CMD_WANT_SKB,
6628 1.75.2.2 snj .data = { &mcc_cmd },
6629 1.75.2.2 snj };
6630 1.75.2.2 snj int err;
6631 1.75.2.2 snj int resp_v2 = isset(sc->sc_enabled_capa,
6632 1.75.2.2 snj IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
6633 1.75.2.2 snj
6634 1.75.2.2 snj if (!iwm_is_lar_supported(sc)) {
6635 1.75.2.2 snj DPRINTF(("%s: no LAR support\n", __func__));
6636 1.75.2.2 snj return 0;
6637 1.75.2.2 snj }
6638 1.75.2.2 snj
6639 1.75.2.2 snj memset(&mcc_cmd, 0, sizeof(mcc_cmd));
6640 1.75.2.2 snj mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
6641 1.75.2.2 snj if (isset(sc->sc_ucode_api, IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
6642 1.75.2.2 snj isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
6643 1.75.2.2 snj mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
6644 1.75.2.2 snj else
6645 1.75.2.2 snj mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
6646 1.75.2.2 snj
6647 1.75.2.2 snj if (resp_v2)
6648 1.75.2.2 snj hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
6649 1.75.2.2 snj else
6650 1.75.2.2 snj hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
6651 1.75.2.2 snj
6652 1.75.2.2 snj err = iwm_send_cmd(sc, &hcmd);
6653 1.75.2.2 snj if (err)
6654 1.75.2.2 snj return err;
6655 1.75.2.2 snj
6656 1.75.2.2 snj iwm_free_resp(sc, &hcmd);
6657 1.75.2.2 snj
6658 1.75.2.2 snj return 0;
6659 1.75.2.2 snj }
6660 1.75.2.2 snj
6661 1.75.2.2 snj static void
6662 1.75.2.2 snj iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
6663 1.75.2.2 snj {
6664 1.75.2.2 snj struct iwm_host_cmd cmd = {
6665 1.75.2.2 snj .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
6666 1.75.2.2 snj .len = { sizeof(uint32_t), },
6667 1.75.2.2 snj .data = { &backoff, },
6668 1.75.2.2 snj };
6669 1.75.2.2 snj
6670 1.75.2.2 snj iwm_send_cmd(sc, &cmd);
6671 1.75.2.2 snj }
6672 1.75.2.2 snj
6673 1.75.2.2 snj static int
6674 1.75.2.2 snj iwm_init_hw(struct iwm_softc *sc)
6675 1.75.2.2 snj {
6676 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6677 1.75.2.2 snj int err, i, ac;
6678 1.75.2.2 snj
6679 1.75.2.2 snj err = iwm_preinit(sc);
6680 1.75.2.2 snj if (err)
6681 1.75.2.2 snj return err;
6682 1.75.2.2 snj
6683 1.75.2.2 snj err = iwm_start_hw(sc);
6684 1.75.2.2 snj if (err) {
6685 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6686 1.75.2.2 snj return err;
6687 1.75.2.2 snj }
6688 1.75.2.2 snj
6689 1.75.2.2 snj err = iwm_run_init_mvm_ucode(sc, 0);
6690 1.75.2.2 snj if (err)
6691 1.75.2.2 snj return err;
6692 1.75.2.2 snj
6693 1.75.2.2 snj /* Should stop and start HW since INIT image just loaded. */
6694 1.75.2.2 snj iwm_stop_device(sc);
6695 1.75.2.2 snj err = iwm_start_hw(sc);
6696 1.75.2.2 snj if (err) {
6697 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
6698 1.75.2.2 snj return err;
6699 1.75.2.2 snj }
6700 1.75.2.2 snj
6701 1.75.2.2 snj /* Restart, this time with the regular firmware */
6702 1.75.2.2 snj err = iwm_load_ucode_wait_alive(sc, IWM_UCODE_TYPE_REGULAR);
6703 1.75.2.2 snj if (err) {
6704 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6705 1.75.2.2 snj "could not load firmware (error %d)\n", err);
6706 1.75.2.2 snj goto err;
6707 1.75.2.2 snj }
6708 1.75.2.2 snj
6709 1.75.2.2 snj err = iwm_send_bt_init_conf(sc);
6710 1.75.2.2 snj if (err) {
6711 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6712 1.75.2.2 snj "could not init bt coex (error %d)\n", err);
6713 1.75.2.2 snj goto err;
6714 1.75.2.2 snj }
6715 1.75.2.2 snj
6716 1.75.2.2 snj err = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc));
6717 1.75.2.2 snj if (err) {
6718 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6719 1.75.2.2 snj "could not init tx ant config (error %d)\n", err);
6720 1.75.2.2 snj goto err;
6721 1.75.2.2 snj }
6722 1.75.2.2 snj
6723 1.75.2.2 snj /* Send phy db control command and then phy db calibration*/
6724 1.75.2.2 snj err = iwm_send_phy_db_data(sc);
6725 1.75.2.2 snj if (err) {
6726 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6727 1.75.2.2 snj "could not init phy db (error %d)\n", err);
6728 1.75.2.2 snj goto err;
6729 1.75.2.2 snj }
6730 1.75.2.2 snj
6731 1.75.2.2 snj err = iwm_send_phy_cfg_cmd(sc);
6732 1.75.2.2 snj if (err) {
6733 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6734 1.75.2.2 snj "could not send phy config (error %d)\n", err);
6735 1.75.2.2 snj goto err;
6736 1.75.2.2 snj }
6737 1.75.2.2 snj
6738 1.75.2.2 snj /* Add auxiliary station for scanning */
6739 1.75.2.2 snj err = iwm_add_aux_sta(sc);
6740 1.75.2.2 snj if (err) {
6741 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6742 1.75.2.2 snj "could not add aux station (error %d)\n", err);
6743 1.75.2.2 snj goto err;
6744 1.75.2.2 snj }
6745 1.75.2.2 snj
6746 1.75.2.2 snj for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
6747 1.75.2.2 snj /*
6748 1.75.2.2 snj * The channel used here isn't relevant as it's
6749 1.75.2.2 snj * going to be overwritten in the other flows.
6750 1.75.2.2 snj * For now use the first channel we have.
6751 1.75.2.2 snj */
6752 1.75.2.2 snj sc->sc_phyctxt[i].channel = &ic->ic_channels[1];
6753 1.75.2.2 snj err = iwm_phy_ctxt_cmd(sc, &sc->sc_phyctxt[i], 1, 1,
6754 1.75.2.2 snj IWM_FW_CTXT_ACTION_ADD, 0);
6755 1.75.2.2 snj if (err) {
6756 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6757 1.75.2.2 snj "could not add phy context %d (error %d)\n",
6758 1.75.2.2 snj i, err);
6759 1.75.2.2 snj goto err;
6760 1.75.2.2 snj }
6761 1.75.2.2 snj }
6762 1.75.2.2 snj
6763 1.75.2.2 snj /* Initialize tx backoffs to the minimum. */
6764 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
6765 1.75.2.2 snj iwm_tt_tx_backoff(sc, 0);
6766 1.75.2.2 snj
6767 1.75.2.2 snj err = iwm_power_update_device(sc);
6768 1.75.2.2 snj if (err) {
6769 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6770 1.75.2.2 snj "could send power command (error %d)\n", err);
6771 1.75.2.2 snj goto err;
6772 1.75.2.2 snj }
6773 1.75.2.2 snj
6774 1.75.2.2 snj err = iwm_send_update_mcc_cmd(sc, iwm_default_mcc);
6775 1.75.2.2 snj if (err) {
6776 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6777 1.75.2.2 snj "could not init LAR (error %d)\n", err);
6778 1.75.2.2 snj goto err;
6779 1.75.2.2 snj }
6780 1.75.2.2 snj
6781 1.75.2.2 snj if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
6782 1.75.2.2 snj err = iwm_config_umac_scan(sc);
6783 1.75.2.2 snj if (err) {
6784 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6785 1.75.2.2 snj "could not configure scan (error %d)\n", err);
6786 1.75.2.2 snj goto err;
6787 1.75.2.2 snj }
6788 1.75.2.2 snj }
6789 1.75.2.2 snj
6790 1.75.2.2 snj for (ac = 0; ac < WME_NUM_AC; ac++) {
6791 1.75.2.2 snj err = iwm_enable_txq(sc, IWM_STATION_ID, ac,
6792 1.75.2.2 snj iwm_ac_to_tx_fifo[ac]);
6793 1.75.2.2 snj if (err) {
6794 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6795 1.75.2.2 snj "could not enable Tx queue %d (error %d)\n",
6796 1.75.2.2 snj i, err);
6797 1.75.2.2 snj goto err;
6798 1.75.2.2 snj }
6799 1.75.2.2 snj }
6800 1.75.2.2 snj
6801 1.75.2.2 snj err = iwm_disable_beacon_filter(sc);
6802 1.75.2.2 snj if (err) {
6803 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
6804 1.75.2.2 snj "could not disable beacon filter (error %d)\n", err);
6805 1.75.2.2 snj goto err;
6806 1.75.2.2 snj }
6807 1.75.2.2 snj
6808 1.75.2.2 snj return 0;
6809 1.75.2.2 snj
6810 1.75.2.2 snj err:
6811 1.75.2.2 snj iwm_stop_device(sc);
6812 1.75.2.2 snj return err;
6813 1.75.2.2 snj }
6814 1.75.2.2 snj
6815 1.75.2.2 snj /* Allow multicast from our BSSID. */
6816 1.75.2.2 snj static int
6817 1.75.2.2 snj iwm_allow_mcast(struct iwm_softc *sc)
6818 1.75.2.2 snj {
6819 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6820 1.75.2.2 snj struct ieee80211_node *ni = ic->ic_bss;
6821 1.75.2.2 snj struct iwm_mcast_filter_cmd *cmd;
6822 1.75.2.2 snj size_t size;
6823 1.75.2.2 snj int err;
6824 1.75.2.2 snj
6825 1.75.2.2 snj size = roundup(sizeof(*cmd), 4);
6826 1.75.2.2 snj cmd = kmem_intr_zalloc(size, KM_NOSLEEP);
6827 1.75.2.2 snj if (cmd == NULL)
6828 1.75.2.2 snj return ENOMEM;
6829 1.75.2.2 snj cmd->filter_own = 1;
6830 1.75.2.2 snj cmd->port_id = 0;
6831 1.75.2.2 snj cmd->count = 0;
6832 1.75.2.2 snj cmd->pass_all = 1;
6833 1.75.2.2 snj IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
6834 1.75.2.2 snj
6835 1.75.2.2 snj err = iwm_send_cmd_pdu(sc, IWM_MCAST_FILTER_CMD, 0, size, cmd);
6836 1.75.2.2 snj kmem_intr_free(cmd, size);
6837 1.75.2.2 snj return err;
6838 1.75.2.2 snj }
6839 1.75.2.2 snj
6840 1.75.2.2 snj static int
6841 1.75.2.2 snj iwm_init(struct ifnet *ifp)
6842 1.75.2.2 snj {
6843 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
6844 1.75.2.2 snj int err;
6845 1.75.2.2 snj
6846 1.75.2.2 snj if (ISSET(sc->sc_flags, IWM_FLAG_HW_INITED))
6847 1.75.2.2 snj return 0;
6848 1.75.2.2 snj
6849 1.75.2.2 snj sc->sc_generation++;
6850 1.75.2.2 snj sc->sc_flags &= ~IWM_FLAG_STOPPED;
6851 1.75.2.2 snj
6852 1.75.2.2 snj err = iwm_init_hw(sc);
6853 1.75.2.2 snj if (err) {
6854 1.75.2.2 snj iwm_stop(ifp, 1);
6855 1.75.2.2 snj return err;
6856 1.75.2.2 snj }
6857 1.75.2.2 snj
6858 1.75.2.2 snj ifp->if_flags &= ~IFF_OACTIVE;
6859 1.75.2.2 snj ifp->if_flags |= IFF_RUNNING;
6860 1.75.2.2 snj
6861 1.75.2.2 snj ieee80211_begin_scan(&sc->sc_ic, 0);
6862 1.75.2.2 snj SET(sc->sc_flags, IWM_FLAG_HW_INITED);
6863 1.75.2.2 snj
6864 1.75.2.2 snj return 0;
6865 1.75.2.2 snj }
6866 1.75.2.2 snj
6867 1.75.2.2 snj static void
6868 1.75.2.2 snj iwm_start(struct ifnet *ifp)
6869 1.75.2.2 snj {
6870 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
6871 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6872 1.75.2.2 snj struct ieee80211_node *ni;
6873 1.75.2.2 snj struct ether_header *eh;
6874 1.75.2.2 snj struct mbuf *m;
6875 1.75.2.2 snj int ac;
6876 1.75.2.2 snj
6877 1.75.2.2 snj if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
6878 1.75.2.2 snj return;
6879 1.75.2.2 snj
6880 1.75.2.2 snj for (;;) {
6881 1.75.2.2 snj /* why isn't this done per-queue? */
6882 1.75.2.2 snj if (sc->qfullmsk != 0) {
6883 1.75.2.2 snj ifp->if_flags |= IFF_OACTIVE;
6884 1.75.2.2 snj break;
6885 1.75.2.2 snj }
6886 1.75.2.2 snj
6887 1.75.2.2 snj /* need to send management frames even if we're not RUNning */
6888 1.75.2.2 snj IF_DEQUEUE(&ic->ic_mgtq, m);
6889 1.75.2.2 snj if (m) {
6890 1.75.2.2 snj ni = M_GETCTX(m, struct ieee80211_node *);
6891 1.75.2.2 snj m->m_pkthdr.rcvif = NULL;
6892 1.75.2.2 snj ac = WME_AC_BE;
6893 1.75.2.2 snj goto sendit;
6894 1.75.2.2 snj }
6895 1.75.2.2 snj if (ic->ic_state != IEEE80211_S_RUN) {
6896 1.75.2.2 snj break;
6897 1.75.2.2 snj }
6898 1.75.2.2 snj
6899 1.75.2.2 snj IFQ_DEQUEUE(&ifp->if_snd, m);
6900 1.75.2.2 snj if (m == NULL)
6901 1.75.2.2 snj break;
6902 1.75.2.2 snj
6903 1.75.2.2 snj if (m->m_len < sizeof (*eh) &&
6904 1.75.2.2 snj (m = m_pullup(m, sizeof (*eh))) == NULL) {
6905 1.75.2.2 snj ifp->if_oerrors++;
6906 1.75.2.2 snj continue;
6907 1.75.2.2 snj }
6908 1.75.2.2 snj
6909 1.75.2.2 snj eh = mtod(m, struct ether_header *);
6910 1.75.2.2 snj ni = ieee80211_find_txnode(ic, eh->ether_dhost);
6911 1.75.2.2 snj if (ni == NULL) {
6912 1.75.2.2 snj m_freem(m);
6913 1.75.2.2 snj ifp->if_oerrors++;
6914 1.75.2.2 snj continue;
6915 1.75.2.2 snj }
6916 1.75.2.2 snj
6917 1.75.2.2 snj /* classify mbuf so we can find which tx ring to use */
6918 1.75.2.2 snj if (ieee80211_classify(ic, m, ni) != 0) {
6919 1.75.2.2 snj m_freem(m);
6920 1.75.2.2 snj ieee80211_free_node(ni);
6921 1.75.2.2 snj ifp->if_oerrors++;
6922 1.75.2.2 snj continue;
6923 1.75.2.2 snj }
6924 1.75.2.2 snj
6925 1.75.2.2 snj /* No QoS encapsulation for EAPOL frames. */
6926 1.75.2.2 snj ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
6927 1.75.2.2 snj M_WME_GETAC(m) : WME_AC_BE;
6928 1.75.2.2 snj
6929 1.75.2.2 snj bpf_mtap(ifp, m);
6930 1.75.2.2 snj
6931 1.75.2.2 snj if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
6932 1.75.2.2 snj ieee80211_free_node(ni);
6933 1.75.2.2 snj ifp->if_oerrors++;
6934 1.75.2.2 snj continue;
6935 1.75.2.2 snj }
6936 1.75.2.2 snj
6937 1.75.2.2 snj sendit:
6938 1.75.2.2 snj bpf_mtap3(ic->ic_rawbpf, m);
6939 1.75.2.2 snj
6940 1.75.2.2 snj if (iwm_tx(sc, m, ni, ac) != 0) {
6941 1.75.2.2 snj ieee80211_free_node(ni);
6942 1.75.2.2 snj ifp->if_oerrors++;
6943 1.75.2.2 snj continue;
6944 1.75.2.2 snj }
6945 1.75.2.2 snj
6946 1.75.2.2 snj if (ifp->if_flags & IFF_UP) {
6947 1.75.2.2 snj sc->sc_tx_timer = 15;
6948 1.75.2.2 snj ifp->if_timer = 1;
6949 1.75.2.2 snj }
6950 1.75.2.2 snj }
6951 1.75.2.2 snj }
6952 1.75.2.2 snj
6953 1.75.2.2 snj static void
6954 1.75.2.2 snj iwm_stop(struct ifnet *ifp, int disable)
6955 1.75.2.2 snj {
6956 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
6957 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
6958 1.75.2.2 snj struct iwm_node *in = (struct iwm_node *)ic->ic_bss;
6959 1.75.2.2 snj
6960 1.75.2.2 snj sc->sc_flags &= ~IWM_FLAG_HW_INITED;
6961 1.75.2.2 snj sc->sc_flags |= IWM_FLAG_STOPPED;
6962 1.75.2.2 snj sc->sc_generation++;
6963 1.75.2.2 snj ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
6964 1.75.2.2 snj
6965 1.75.2.2 snj if (in)
6966 1.75.2.2 snj in->in_phyctxt = NULL;
6967 1.75.2.2 snj
6968 1.75.2.2 snj if (ic->ic_state != IEEE80211_S_INIT)
6969 1.75.2.2 snj ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
6970 1.75.2.2 snj
6971 1.75.2.2 snj callout_stop(&sc->sc_calib_to);
6972 1.75.2.2 snj iwm_led_blink_stop(sc);
6973 1.75.2.2 snj ifp->if_timer = sc->sc_tx_timer = 0;
6974 1.75.2.2 snj iwm_stop_device(sc);
6975 1.75.2.2 snj }
6976 1.75.2.2 snj
6977 1.75.2.2 snj static void
6978 1.75.2.2 snj iwm_watchdog(struct ifnet *ifp)
6979 1.75.2.2 snj {
6980 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
6981 1.75.2.2 snj
6982 1.75.2.2 snj ifp->if_timer = 0;
6983 1.75.2.2 snj if (sc->sc_tx_timer > 0) {
6984 1.75.2.2 snj if (--sc->sc_tx_timer == 0) {
6985 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "device timeout\n");
6986 1.75.2.2 snj #ifdef IWM_DEBUG
6987 1.75.2.2 snj iwm_nic_error(sc);
6988 1.75.2.2 snj #endif
6989 1.75.2.2 snj ifp->if_flags &= ~IFF_UP;
6990 1.75.2.2 snj iwm_stop(ifp, 1);
6991 1.75.2.2 snj ifp->if_oerrors++;
6992 1.75.2.2 snj return;
6993 1.75.2.2 snj }
6994 1.75.2.2 snj ifp->if_timer = 1;
6995 1.75.2.2 snj }
6996 1.75.2.2 snj
6997 1.75.2.2 snj ieee80211_watchdog(&sc->sc_ic);
6998 1.75.2.2 snj }
6999 1.75.2.2 snj
7000 1.75.2.2 snj static int
7001 1.75.2.2 snj iwm_ioctl(struct ifnet *ifp, u_long cmd, void *data)
7002 1.75.2.2 snj {
7003 1.75.2.2 snj struct iwm_softc *sc = ifp->if_softc;
7004 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
7005 1.75.2.2 snj const struct sockaddr *sa;
7006 1.75.2.2 snj int s, err = 0;
7007 1.75.2.2 snj
7008 1.75.2.2 snj s = splnet();
7009 1.75.2.2 snj
7010 1.75.2.2 snj switch (cmd) {
7011 1.75.2.2 snj case SIOCSIFADDR:
7012 1.75.2.2 snj ifp->if_flags |= IFF_UP;
7013 1.75.2.2 snj /* FALLTHROUGH */
7014 1.75.2.2 snj case SIOCSIFFLAGS:
7015 1.75.2.2 snj err = ifioctl_common(ifp, cmd, data);
7016 1.75.2.2 snj if (err)
7017 1.75.2.2 snj break;
7018 1.75.2.2 snj if (ifp->if_flags & IFF_UP) {
7019 1.75.2.2 snj if (!(ifp->if_flags & IFF_RUNNING)) {
7020 1.75.2.2 snj err = iwm_init(ifp);
7021 1.75.2.2 snj if (err)
7022 1.75.2.2 snj ifp->if_flags &= ~IFF_UP;
7023 1.75.2.2 snj }
7024 1.75.2.2 snj } else {
7025 1.75.2.2 snj if (ifp->if_flags & IFF_RUNNING)
7026 1.75.2.2 snj iwm_stop(ifp, 1);
7027 1.75.2.2 snj }
7028 1.75.2.2 snj break;
7029 1.75.2.2 snj
7030 1.75.2.2 snj case SIOCADDMULTI:
7031 1.75.2.2 snj case SIOCDELMULTI:
7032 1.75.2.2 snj if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
7033 1.75.2.2 snj err = ENXIO;
7034 1.75.2.2 snj break;
7035 1.75.2.2 snj }
7036 1.75.2.2 snj sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
7037 1.75.2.2 snj err = (cmd == SIOCADDMULTI) ?
7038 1.75.2.2 snj ether_addmulti(sa, &sc->sc_ec) :
7039 1.75.2.2 snj ether_delmulti(sa, &sc->sc_ec);
7040 1.75.2.2 snj if (err == ENETRESET)
7041 1.75.2.2 snj err = 0;
7042 1.75.2.2 snj break;
7043 1.75.2.2 snj
7044 1.75.2.2 snj default:
7045 1.75.2.2 snj if (!ISSET(sc->sc_flags, IWM_FLAG_ATTACHED)) {
7046 1.75.2.2 snj err = ether_ioctl(ifp, cmd, data);
7047 1.75.2.2 snj break;
7048 1.75.2.2 snj }
7049 1.75.2.2 snj err = ieee80211_ioctl(ic, cmd, data);
7050 1.75.2.2 snj break;
7051 1.75.2.2 snj }
7052 1.75.2.2 snj
7053 1.75.2.2 snj if (err == ENETRESET) {
7054 1.75.2.2 snj err = 0;
7055 1.75.2.2 snj if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
7056 1.75.2.2 snj (IFF_UP | IFF_RUNNING)) {
7057 1.75.2.2 snj iwm_stop(ifp, 0);
7058 1.75.2.2 snj err = iwm_init(ifp);
7059 1.75.2.2 snj }
7060 1.75.2.2 snj }
7061 1.75.2.2 snj
7062 1.75.2.2 snj splx(s);
7063 1.75.2.2 snj return err;
7064 1.75.2.2 snj }
7065 1.75.2.2 snj
7066 1.75.2.2 snj /*
7067 1.75.2.2 snj * Note: This structure is read from the device with IO accesses,
7068 1.75.2.2 snj * and the reading already does the endian conversion. As it is
7069 1.75.2.2 snj * read with uint32_t-sized accesses, any members with a different size
7070 1.75.2.2 snj * need to be ordered correctly though!
7071 1.75.2.2 snj */
7072 1.75.2.2 snj struct iwm_error_event_table {
7073 1.75.2.2 snj uint32_t valid; /* (nonzero) valid, (0) log is empty */
7074 1.75.2.2 snj uint32_t error_id; /* type of error */
7075 1.75.2.2 snj uint32_t trm_hw_status0; /* TRM HW status */
7076 1.75.2.2 snj uint32_t trm_hw_status1; /* TRM HW status */
7077 1.75.2.2 snj uint32_t blink2; /* branch link */
7078 1.75.2.2 snj uint32_t ilink1; /* interrupt link */
7079 1.75.2.2 snj uint32_t ilink2; /* interrupt link */
7080 1.75.2.2 snj uint32_t data1; /* error-specific data */
7081 1.75.2.2 snj uint32_t data2; /* error-specific data */
7082 1.75.2.2 snj uint32_t data3; /* error-specific data */
7083 1.75.2.2 snj uint32_t bcon_time; /* beacon timer */
7084 1.75.2.2 snj uint32_t tsf_low; /* network timestamp function timer */
7085 1.75.2.2 snj uint32_t tsf_hi; /* network timestamp function timer */
7086 1.75.2.2 snj uint32_t gp1; /* GP1 timer register */
7087 1.75.2.2 snj uint32_t gp2; /* GP2 timer register */
7088 1.75.2.2 snj uint32_t fw_rev_type; /* firmware revision type */
7089 1.75.2.2 snj uint32_t major; /* uCode version major */
7090 1.75.2.2 snj uint32_t minor; /* uCode version minor */
7091 1.75.2.2 snj uint32_t hw_ver; /* HW Silicon version */
7092 1.75.2.2 snj uint32_t brd_ver; /* HW board version */
7093 1.75.2.2 snj uint32_t log_pc; /* log program counter */
7094 1.75.2.2 snj uint32_t frame_ptr; /* frame pointer */
7095 1.75.2.2 snj uint32_t stack_ptr; /* stack pointer */
7096 1.75.2.2 snj uint32_t hcmd; /* last host command header */
7097 1.75.2.2 snj uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
7098 1.75.2.2 snj * rxtx_flag */
7099 1.75.2.2 snj uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
7100 1.75.2.2 snj * host_flag */
7101 1.75.2.2 snj uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
7102 1.75.2.2 snj * enc_flag */
7103 1.75.2.2 snj uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
7104 1.75.2.2 snj * time_flag */
7105 1.75.2.2 snj uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
7106 1.75.2.2 snj * wico interrupt */
7107 1.75.2.2 snj uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
7108 1.75.2.2 snj uint32_t wait_event; /* wait event() caller address */
7109 1.75.2.2 snj uint32_t l2p_control; /* L2pControlField */
7110 1.75.2.2 snj uint32_t l2p_duration; /* L2pDurationField */
7111 1.75.2.2 snj uint32_t l2p_mhvalid; /* L2pMhValidBits */
7112 1.75.2.2 snj uint32_t l2p_addr_match; /* L2pAddrMatchStat */
7113 1.75.2.2 snj uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
7114 1.75.2.2 snj * (LMPM_PMG_SEL) */
7115 1.75.2.2 snj uint32_t u_timestamp; /* indicate when the date and time of the
7116 1.75.2.2 snj * compilation */
7117 1.75.2.2 snj uint32_t flow_handler; /* FH read/write pointers, RX credit */
7118 1.75.2.2 snj } __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
7119 1.75.2.2 snj
7120 1.75.2.2 snj /*
7121 1.75.2.2 snj * UMAC error struct - relevant starting from family 8000 chip.
7122 1.75.2.2 snj * Note: This structure is read from the device with IO accesses,
7123 1.75.2.2 snj * and the reading already does the endian conversion. As it is
7124 1.75.2.2 snj * read with u32-sized accesses, any members with a different size
7125 1.75.2.2 snj * need to be ordered correctly though!
7126 1.75.2.2 snj */
7127 1.75.2.2 snj struct iwm_umac_error_event_table {
7128 1.75.2.2 snj uint32_t valid; /* (nonzero) valid, (0) log is empty */
7129 1.75.2.2 snj uint32_t error_id; /* type of error */
7130 1.75.2.2 snj uint32_t blink1; /* branch link */
7131 1.75.2.2 snj uint32_t blink2; /* branch link */
7132 1.75.2.2 snj uint32_t ilink1; /* interrupt link */
7133 1.75.2.2 snj uint32_t ilink2; /* interrupt link */
7134 1.75.2.2 snj uint32_t data1; /* error-specific data */
7135 1.75.2.2 snj uint32_t data2; /* error-specific data */
7136 1.75.2.2 snj uint32_t data3; /* error-specific data */
7137 1.75.2.2 snj uint32_t umac_major;
7138 1.75.2.2 snj uint32_t umac_minor;
7139 1.75.2.2 snj uint32_t frame_pointer; /* core register 27 */
7140 1.75.2.2 snj uint32_t stack_pointer; /* core register 28 */
7141 1.75.2.2 snj uint32_t cmd_header; /* latest host cmd sent to UMAC */
7142 1.75.2.2 snj uint32_t nic_isr_pref; /* ISR status register */
7143 1.75.2.2 snj } __packed;
7144 1.75.2.2 snj
7145 1.75.2.2 snj #define ERROR_START_OFFSET (1 * sizeof(uint32_t))
7146 1.75.2.2 snj #define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
7147 1.75.2.2 snj
7148 1.75.2.2 snj #ifdef IWM_DEBUG
7149 1.75.2.2 snj static const struct {
7150 1.75.2.2 snj const char *name;
7151 1.75.2.2 snj uint8_t num;
7152 1.75.2.2 snj } advanced_lookup[] = {
7153 1.75.2.2 snj { "NMI_INTERRUPT_WDG", 0x34 },
7154 1.75.2.2 snj { "SYSASSERT", 0x35 },
7155 1.75.2.2 snj { "UCODE_VERSION_MISMATCH", 0x37 },
7156 1.75.2.2 snj { "BAD_COMMAND", 0x38 },
7157 1.75.2.2 snj { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
7158 1.75.2.2 snj { "FATAL_ERROR", 0x3D },
7159 1.75.2.2 snj { "NMI_TRM_HW_ERR", 0x46 },
7160 1.75.2.2 snj { "NMI_INTERRUPT_TRM", 0x4C },
7161 1.75.2.2 snj { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
7162 1.75.2.2 snj { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
7163 1.75.2.2 snj { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
7164 1.75.2.2 snj { "NMI_INTERRUPT_HOST", 0x66 },
7165 1.75.2.2 snj { "NMI_INTERRUPT_ACTION_PT", 0x7C },
7166 1.75.2.2 snj { "NMI_INTERRUPT_UNKNOWN", 0x84 },
7167 1.75.2.2 snj { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
7168 1.75.2.2 snj { "ADVANCED_SYSASSERT", 0 },
7169 1.75.2.2 snj };
7170 1.75.2.2 snj
7171 1.75.2.2 snj static const char *
7172 1.75.2.2 snj iwm_desc_lookup(uint32_t num)
7173 1.75.2.2 snj {
7174 1.75.2.2 snj int i;
7175 1.75.2.2 snj
7176 1.75.2.2 snj for (i = 0; i < __arraycount(advanced_lookup) - 1; i++)
7177 1.75.2.2 snj if (advanced_lookup[i].num == num)
7178 1.75.2.2 snj return advanced_lookup[i].name;
7179 1.75.2.2 snj
7180 1.75.2.2 snj /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
7181 1.75.2.2 snj return advanced_lookup[i].name;
7182 1.75.2.2 snj }
7183 1.75.2.2 snj
7184 1.75.2.2 snj /*
7185 1.75.2.2 snj * Support for dumping the error log seemed like a good idea ...
7186 1.75.2.2 snj * but it's mostly hex junk and the only sensible thing is the
7187 1.75.2.2 snj * hw/ucode revision (which we know anyway). Since it's here,
7188 1.75.2.2 snj * I'll just leave it in, just in case e.g. the Intel guys want to
7189 1.75.2.2 snj * help us decipher some "ADVANCED_SYSASSERT" later.
7190 1.75.2.2 snj */
7191 1.75.2.2 snj static void
7192 1.75.2.2 snj iwm_nic_error(struct iwm_softc *sc)
7193 1.75.2.2 snj {
7194 1.75.2.2 snj struct iwm_error_event_table t;
7195 1.75.2.2 snj uint32_t base;
7196 1.75.2.2 snj
7197 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "dumping device error log\n");
7198 1.75.2.2 snj base = sc->sc_uc.uc_error_event_table;
7199 1.75.2.2 snj if (base < 0x800000) {
7200 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
7201 1.75.2.2 snj "Invalid error log pointer 0x%08x\n", base);
7202 1.75.2.2 snj return;
7203 1.75.2.2 snj }
7204 1.75.2.2 snj
7205 1.75.2.2 snj if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7206 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7207 1.75.2.2 snj return;
7208 1.75.2.2 snj }
7209 1.75.2.2 snj
7210 1.75.2.2 snj if (!t.valid) {
7211 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "errlog not found, skipping\n");
7212 1.75.2.2 snj return;
7213 1.75.2.2 snj }
7214 1.75.2.2 snj
7215 1.75.2.2 snj if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7216 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "Start Error Log Dump:\n");
7217 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7218 1.75.2.2 snj sc->sc_flags, t.valid);
7219 1.75.2.2 snj }
7220 1.75.2.2 snj
7221 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | %-28s\n", t.error_id,
7222 1.75.2.2 snj iwm_desc_lookup(t.error_id));
7223 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status0\n",
7224 1.75.2.2 snj t.trm_hw_status0);
7225 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | trm_hw_status1\n",
7226 1.75.2.2 snj t.trm_hw_status1);
7227 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | branchlink2\n", t.blink2);
7228 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | interruptlink1\n", t.ilink1);
7229 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | interruptlink2\n", t.ilink2);
7230 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | data1\n", t.data1);
7231 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | data2\n", t.data2);
7232 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | data3\n", t.data3);
7233 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | beacon time\n", t.bcon_time);
7234 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | tsf low\n", t.tsf_low);
7235 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | tsf hi\n", t.tsf_hi);
7236 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | time gp1\n", t.gp1);
7237 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | time gp2\n", t.gp2);
7238 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | uCode revision type\n",
7239 1.75.2.2 snj t.fw_rev_type);
7240 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | uCode version major\n",
7241 1.75.2.2 snj t.major);
7242 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | uCode version minor\n",
7243 1.75.2.2 snj t.minor);
7244 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | hw version\n", t.hw_ver);
7245 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | board version\n", t.brd_ver);
7246 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | hcmd\n", t.hcmd);
7247 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | isr0\n", t.isr0);
7248 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | isr1\n", t.isr1);
7249 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | isr2\n", t.isr2);
7250 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | isr3\n", t.isr3);
7251 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | isr4\n", t.isr4);
7252 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | last cmd Id\n", t.last_cmd_id);
7253 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | wait_event\n", t.wait_event);
7254 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | l2p_control\n", t.l2p_control);
7255 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | l2p_duration\n", t.l2p_duration);
7256 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | l2p_mhvalid\n", t.l2p_mhvalid);
7257 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | l2p_addr_match\n",
7258 1.75.2.2 snj t.l2p_addr_match);
7259 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | lmpm_pmg_sel\n", t.lmpm_pmg_sel);
7260 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | timestamp\n", t.u_timestamp);
7261 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "%08X | flow_handler\n", t.flow_handler);
7262 1.75.2.2 snj
7263 1.75.2.2 snj if (sc->sc_uc.uc_umac_error_event_table)
7264 1.75.2.2 snj iwm_nic_umac_error(sc);
7265 1.75.2.2 snj }
7266 1.75.2.2 snj
7267 1.75.2.2 snj static void
7268 1.75.2.2 snj iwm_nic_umac_error(struct iwm_softc *sc)
7269 1.75.2.2 snj {
7270 1.75.2.2 snj struct iwm_umac_error_event_table t;
7271 1.75.2.2 snj uint32_t base;
7272 1.75.2.2 snj
7273 1.75.2.2 snj base = sc->sc_uc.uc_umac_error_event_table;
7274 1.75.2.2 snj
7275 1.75.2.2 snj if (base < 0x800000) {
7276 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
7277 1.75.2.2 snj "Invalid error log pointer 0x%08x\n", base);
7278 1.75.2.2 snj return;
7279 1.75.2.2 snj }
7280 1.75.2.2 snj
7281 1.75.2.2 snj if (iwm_read_mem(sc, base, &t, sizeof(t)/sizeof(uint32_t))) {
7282 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "reading errlog failed\n");
7283 1.75.2.2 snj return;
7284 1.75.2.2 snj }
7285 1.75.2.2 snj
7286 1.75.2.2 snj if (ERROR_START_OFFSET <= t.valid * ERROR_ELEM_SIZE) {
7287 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "Start UMAC Error Log Dump:\n");
7288 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "Status: 0x%x, count: %d\n",
7289 1.75.2.2 snj sc->sc_flags, t.valid);
7290 1.75.2.2 snj }
7291 1.75.2.2 snj
7292 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | %s\n", t.error_id,
7293 1.75.2.2 snj iwm_desc_lookup(t.error_id));
7294 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink1\n", t.blink1);
7295 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac branchlink2\n", t.blink2);
7296 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink1\n",
7297 1.75.2.2 snj t.ilink1);
7298 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac interruptlink2\n",
7299 1.75.2.2 snj t.ilink2);
7300 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac data1\n", t.data1);
7301 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac data2\n", t.data2);
7302 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac data3\n", t.data3);
7303 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac major\n", t.umac_major);
7304 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | umac minor\n", t.umac_minor);
7305 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | frame pointer\n",
7306 1.75.2.2 snj t.frame_pointer);
7307 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | stack pointer\n",
7308 1.75.2.2 snj t.stack_pointer);
7309 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | last host cmd\n", t.cmd_header);
7310 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "0x%08X | isr status reg\n",
7311 1.75.2.2 snj t.nic_isr_pref);
7312 1.75.2.2 snj }
7313 1.75.2.2 snj #endif
7314 1.75.2.2 snj
7315 1.75.2.2 snj #define SYNC_RESP_STRUCT(_var_, _pkt_) \
7316 1.75.2.2 snj do { \
7317 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7318 1.75.2.2 snj sizeof(*(_var_)), BUS_DMASYNC_POSTREAD); \
7319 1.75.2.2 snj _var_ = (void *)((_pkt_)+1); \
7320 1.75.2.2 snj } while (/*CONSTCOND*/0)
7321 1.75.2.2 snj
7322 1.75.2.2 snj #define SYNC_RESP_PTR(_ptr_, _len_, _pkt_) \
7323 1.75.2.2 snj do { \
7324 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, sizeof(*(_pkt_)), \
7325 1.75.2.2 snj sizeof(len), BUS_DMASYNC_POSTREAD); \
7326 1.75.2.2 snj _ptr_ = (void *)((_pkt_)+1); \
7327 1.75.2.2 snj } while (/*CONSTCOND*/0)
7328 1.75.2.2 snj
7329 1.75.2.2 snj #define ADVANCE_RXQ(sc) (sc->rxq.cur = (sc->rxq.cur + 1) % IWM_RX_RING_COUNT);
7330 1.75.2.2 snj
7331 1.75.2.2 snj static void
7332 1.75.2.2 snj iwm_notif_intr(struct iwm_softc *sc)
7333 1.75.2.2 snj {
7334 1.75.2.2 snj uint16_t hw;
7335 1.75.2.2 snj
7336 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
7337 1.75.2.2 snj 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
7338 1.75.2.2 snj
7339 1.75.2.2 snj hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
7340 1.75.2.2 snj while (sc->rxq.cur != hw) {
7341 1.75.2.2 snj struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
7342 1.75.2.2 snj struct iwm_rx_packet *pkt;
7343 1.75.2.2 snj struct iwm_cmd_response *cresp;
7344 1.75.2.2 snj int orig_qid, qid, idx, code;
7345 1.75.2.2 snj
7346 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof(*pkt),
7347 1.75.2.2 snj BUS_DMASYNC_POSTREAD);
7348 1.75.2.2 snj pkt = mtod(data->m, struct iwm_rx_packet *);
7349 1.75.2.2 snj
7350 1.75.2.2 snj orig_qid = pkt->hdr.qid;
7351 1.75.2.2 snj qid = orig_qid & ~0x80;
7352 1.75.2.2 snj idx = pkt->hdr.idx;
7353 1.75.2.2 snj
7354 1.75.2.2 snj code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
7355 1.75.2.2 snj
7356 1.75.2.2 snj /*
7357 1.75.2.2 snj * randomly get these from the firmware, no idea why.
7358 1.75.2.2 snj * they at least seem harmless, so just ignore them for now
7359 1.75.2.2 snj */
7360 1.75.2.2 snj if (__predict_false((pkt->hdr.code == 0 && qid == 0 && idx == 0)
7361 1.75.2.2 snj || pkt->len_n_flags == htole32(0x55550000))) {
7362 1.75.2.2 snj ADVANCE_RXQ(sc);
7363 1.75.2.2 snj continue;
7364 1.75.2.2 snj }
7365 1.75.2.2 snj
7366 1.75.2.2 snj switch (code) {
7367 1.75.2.2 snj case IWM_REPLY_RX_PHY_CMD:
7368 1.75.2.2 snj iwm_rx_rx_phy_cmd(sc, pkt, data);
7369 1.75.2.2 snj break;
7370 1.75.2.2 snj
7371 1.75.2.2 snj case IWM_REPLY_RX_MPDU_CMD:
7372 1.75.2.2 snj iwm_rx_rx_mpdu(sc, pkt, data);
7373 1.75.2.2 snj break;
7374 1.75.2.2 snj
7375 1.75.2.2 snj case IWM_TX_CMD:
7376 1.75.2.2 snj iwm_rx_tx_cmd(sc, pkt, data);
7377 1.75.2.2 snj break;
7378 1.75.2.2 snj
7379 1.75.2.2 snj case IWM_MISSED_BEACONS_NOTIFICATION:
7380 1.75.2.2 snj iwm_rx_missed_beacons_notif(sc, pkt, data);
7381 1.75.2.2 snj break;
7382 1.75.2.2 snj
7383 1.75.2.2 snj case IWM_MFUART_LOAD_NOTIFICATION:
7384 1.75.2.2 snj break;
7385 1.75.2.2 snj
7386 1.75.2.2 snj case IWM_ALIVE: {
7387 1.75.2.2 snj struct iwm_alive_resp_v1 *resp1;
7388 1.75.2.2 snj struct iwm_alive_resp_v2 *resp2;
7389 1.75.2.2 snj struct iwm_alive_resp_v3 *resp3;
7390 1.75.2.2 snj
7391 1.75.2.2 snj if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
7392 1.75.2.2 snj SYNC_RESP_STRUCT(resp1, pkt);
7393 1.75.2.2 snj sc->sc_uc.uc_error_event_table
7394 1.75.2.2 snj = le32toh(resp1->error_event_table_ptr);
7395 1.75.2.2 snj sc->sc_uc.uc_log_event_table
7396 1.75.2.2 snj = le32toh(resp1->log_event_table_ptr);
7397 1.75.2.2 snj sc->sched_base = le32toh(resp1->scd_base_ptr);
7398 1.75.2.2 snj if (resp1->status == IWM_ALIVE_STATUS_OK)
7399 1.75.2.2 snj sc->sc_uc.uc_ok = 1;
7400 1.75.2.2 snj else
7401 1.75.2.2 snj sc->sc_uc.uc_ok = 0;
7402 1.75.2.2 snj }
7403 1.75.2.2 snj if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
7404 1.75.2.2 snj SYNC_RESP_STRUCT(resp2, pkt);
7405 1.75.2.2 snj sc->sc_uc.uc_error_event_table
7406 1.75.2.2 snj = le32toh(resp2->error_event_table_ptr);
7407 1.75.2.2 snj sc->sc_uc.uc_log_event_table
7408 1.75.2.2 snj = le32toh(resp2->log_event_table_ptr);
7409 1.75.2.2 snj sc->sched_base = le32toh(resp2->scd_base_ptr);
7410 1.75.2.2 snj sc->sc_uc.uc_umac_error_event_table
7411 1.75.2.2 snj = le32toh(resp2->error_info_addr);
7412 1.75.2.2 snj if (resp2->status == IWM_ALIVE_STATUS_OK)
7413 1.75.2.2 snj sc->sc_uc.uc_ok = 1;
7414 1.75.2.2 snj else
7415 1.75.2.2 snj sc->sc_uc.uc_ok = 0;
7416 1.75.2.2 snj }
7417 1.75.2.2 snj if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
7418 1.75.2.2 snj SYNC_RESP_STRUCT(resp3, pkt);
7419 1.75.2.2 snj sc->sc_uc.uc_error_event_table
7420 1.75.2.2 snj = le32toh(resp3->error_event_table_ptr);
7421 1.75.2.2 snj sc->sc_uc.uc_log_event_table
7422 1.75.2.2 snj = le32toh(resp3->log_event_table_ptr);
7423 1.75.2.2 snj sc->sched_base = le32toh(resp3->scd_base_ptr);
7424 1.75.2.2 snj sc->sc_uc.uc_umac_error_event_table
7425 1.75.2.2 snj = le32toh(resp3->error_info_addr);
7426 1.75.2.2 snj if (resp3->status == IWM_ALIVE_STATUS_OK)
7427 1.75.2.2 snj sc->sc_uc.uc_ok = 1;
7428 1.75.2.2 snj else
7429 1.75.2.2 snj sc->sc_uc.uc_ok = 0;
7430 1.75.2.2 snj }
7431 1.75.2.2 snj
7432 1.75.2.2 snj sc->sc_uc.uc_intr = 1;
7433 1.75.2.2 snj wakeup(&sc->sc_uc);
7434 1.75.2.2 snj break;
7435 1.75.2.2 snj }
7436 1.75.2.2 snj
7437 1.75.2.2 snj case IWM_CALIB_RES_NOTIF_PHY_DB: {
7438 1.75.2.2 snj struct iwm_calib_res_notif_phy_db *phy_db_notif;
7439 1.75.2.2 snj SYNC_RESP_STRUCT(phy_db_notif, pkt);
7440 1.75.2.2 snj uint16_t size = le16toh(phy_db_notif->length);
7441 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map,
7442 1.75.2.2 snj sizeof(*pkt) + sizeof(*phy_db_notif),
7443 1.75.2.2 snj size, BUS_DMASYNC_POSTREAD);
7444 1.75.2.2 snj iwm_phy_db_set_section(sc, phy_db_notif, size);
7445 1.75.2.2 snj break;
7446 1.75.2.2 snj }
7447 1.75.2.2 snj
7448 1.75.2.2 snj case IWM_STATISTICS_NOTIFICATION: {
7449 1.75.2.2 snj struct iwm_notif_statistics *stats;
7450 1.75.2.2 snj SYNC_RESP_STRUCT(stats, pkt);
7451 1.75.2.2 snj memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
7452 1.75.2.2 snj sc->sc_noise = iwm_get_noise(&stats->rx.general);
7453 1.75.2.2 snj break;
7454 1.75.2.2 snj }
7455 1.75.2.2 snj
7456 1.75.2.2 snj case IWM_NVM_ACCESS_CMD:
7457 1.75.2.2 snj case IWM_MCC_UPDATE_CMD:
7458 1.75.2.2 snj if (sc->sc_wantresp == ((qid << 16) | idx)) {
7459 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, data->map, 0,
7460 1.75.2.2 snj sizeof(sc->sc_cmd_resp),
7461 1.75.2.2 snj BUS_DMASYNC_POSTREAD);
7462 1.75.2.2 snj memcpy(sc->sc_cmd_resp,
7463 1.75.2.2 snj pkt, sizeof(sc->sc_cmd_resp));
7464 1.75.2.2 snj }
7465 1.75.2.2 snj break;
7466 1.75.2.2 snj
7467 1.75.2.2 snj case IWM_MCC_CHUB_UPDATE_CMD: {
7468 1.75.2.2 snj struct iwm_mcc_chub_notif *notif;
7469 1.75.2.2 snj SYNC_RESP_STRUCT(notif, pkt);
7470 1.75.2.2 snj
7471 1.75.2.2 snj sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
7472 1.75.2.2 snj sc->sc_fw_mcc[1] = notif->mcc & 0xff;
7473 1.75.2.2 snj sc->sc_fw_mcc[2] = '\0';
7474 1.75.2.2 snj break;
7475 1.75.2.2 snj }
7476 1.75.2.2 snj
7477 1.75.2.2 snj case IWM_DTS_MEASUREMENT_NOTIFICATION:
7478 1.75.2.2 snj case IWM_WIDE_ID(IWM_PHY_OPS_GROUP,
7479 1.75.2.2 snj IWM_DTS_MEASUREMENT_NOTIF_WIDE): {
7480 1.75.2.2 snj struct iwm_dts_measurement_notif_v1 *notif1;
7481 1.75.2.2 snj struct iwm_dts_measurement_notif_v2 *notif2;
7482 1.75.2.2 snj
7483 1.75.2.2 snj if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif1)) {
7484 1.75.2.2 snj SYNC_RESP_STRUCT(notif1, pkt);
7485 1.75.2.2 snj DPRINTF(("%s: DTS temp=%d \n",
7486 1.75.2.2 snj DEVNAME(sc), notif1->temp));
7487 1.75.2.2 snj break;
7488 1.75.2.2 snj }
7489 1.75.2.2 snj if (iwm_rx_packet_payload_len(pkt) == sizeof(*notif2)) {
7490 1.75.2.2 snj SYNC_RESP_STRUCT(notif2, pkt);
7491 1.75.2.2 snj DPRINTF(("%s: DTS temp=%d \n",
7492 1.75.2.2 snj DEVNAME(sc), notif2->temp));
7493 1.75.2.2 snj break;
7494 1.75.2.2 snj }
7495 1.75.2.2 snj break;
7496 1.75.2.2 snj }
7497 1.75.2.2 snj
7498 1.75.2.2 snj case IWM_PHY_CONFIGURATION_CMD:
7499 1.75.2.2 snj case IWM_TX_ANT_CONFIGURATION_CMD:
7500 1.75.2.2 snj case IWM_ADD_STA:
7501 1.75.2.2 snj case IWM_MAC_CONTEXT_CMD:
7502 1.75.2.2 snj case IWM_REPLY_SF_CFG_CMD:
7503 1.75.2.2 snj case IWM_POWER_TABLE_CMD:
7504 1.75.2.2 snj case IWM_PHY_CONTEXT_CMD:
7505 1.75.2.2 snj case IWM_BINDING_CONTEXT_CMD:
7506 1.75.2.2 snj case IWM_TIME_EVENT_CMD:
7507 1.75.2.2 snj case IWM_SCAN_REQUEST_CMD:
7508 1.75.2.2 snj case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
7509 1.75.2.2 snj case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
7510 1.75.2.2 snj case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_ABORT_UMAC):
7511 1.75.2.2 snj case IWM_SCAN_OFFLOAD_REQUEST_CMD:
7512 1.75.2.2 snj case IWM_SCAN_OFFLOAD_ABORT_CMD:
7513 1.75.2.2 snj case IWM_REPLY_BEACON_FILTERING_CMD:
7514 1.75.2.2 snj case IWM_MAC_PM_POWER_TABLE:
7515 1.75.2.2 snj case IWM_TIME_QUOTA_CMD:
7516 1.75.2.2 snj case IWM_REMOVE_STA:
7517 1.75.2.2 snj case IWM_TXPATH_FLUSH:
7518 1.75.2.2 snj case IWM_LQ_CMD:
7519 1.75.2.2 snj case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_FW_PAGING_BLOCK_CMD):
7520 1.75.2.2 snj case IWM_BT_CONFIG:
7521 1.75.2.2 snj case IWM_REPLY_THERMAL_MNG_BACKOFF:
7522 1.75.2.2 snj SYNC_RESP_STRUCT(cresp, pkt);
7523 1.75.2.2 snj if (sc->sc_wantresp == ((qid << 16) | idx)) {
7524 1.75.2.2 snj memcpy(sc->sc_cmd_resp,
7525 1.75.2.2 snj pkt, sizeof(*pkt) + sizeof(*cresp));
7526 1.75.2.2 snj }
7527 1.75.2.2 snj break;
7528 1.75.2.2 snj
7529 1.75.2.2 snj /* ignore */
7530 1.75.2.2 snj case IWM_PHY_DB_CMD:
7531 1.75.2.2 snj break;
7532 1.75.2.2 snj
7533 1.75.2.2 snj case IWM_INIT_COMPLETE_NOTIF:
7534 1.75.2.2 snj sc->sc_init_complete = 1;
7535 1.75.2.2 snj wakeup(&sc->sc_init_complete);
7536 1.75.2.2 snj break;
7537 1.75.2.2 snj
7538 1.75.2.2 snj case IWM_SCAN_OFFLOAD_COMPLETE: {
7539 1.75.2.2 snj struct iwm_periodic_scan_complete *notif;
7540 1.75.2.2 snj SYNC_RESP_STRUCT(notif, pkt);
7541 1.75.2.2 snj break;
7542 1.75.2.2 snj }
7543 1.75.2.2 snj
7544 1.75.2.2 snj case IWM_SCAN_ITERATION_COMPLETE: {
7545 1.75.2.2 snj struct iwm_lmac_scan_complete_notif *notif;
7546 1.75.2.2 snj SYNC_RESP_STRUCT(notif, pkt);
7547 1.75.2.2 snj if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7548 1.75.2.2 snj CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7549 1.75.2.2 snj iwm_endscan(sc);
7550 1.75.2.2 snj }
7551 1.75.2.2 snj break;
7552 1.75.2.2 snj }
7553 1.75.2.2 snj
7554 1.75.2.2 snj case IWM_SCAN_COMPLETE_UMAC: {
7555 1.75.2.2 snj struct iwm_umac_scan_complete *notif;
7556 1.75.2.2 snj SYNC_RESP_STRUCT(notif, pkt);
7557 1.75.2.2 snj if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7558 1.75.2.2 snj CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7559 1.75.2.2 snj iwm_endscan(sc);
7560 1.75.2.2 snj }
7561 1.75.2.2 snj break;
7562 1.75.2.2 snj }
7563 1.75.2.2 snj
7564 1.75.2.2 snj case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
7565 1.75.2.2 snj struct iwm_umac_scan_iter_complete_notif *notif;
7566 1.75.2.2 snj SYNC_RESP_STRUCT(notif, pkt);
7567 1.75.2.2 snj if (ISSET(sc->sc_flags, IWM_FLAG_SCANNING)) {
7568 1.75.2.2 snj CLR(sc->sc_flags, IWM_FLAG_SCANNING);
7569 1.75.2.2 snj iwm_endscan(sc);
7570 1.75.2.2 snj }
7571 1.75.2.2 snj break;
7572 1.75.2.2 snj }
7573 1.75.2.2 snj
7574 1.75.2.2 snj case IWM_REPLY_ERROR: {
7575 1.75.2.2 snj struct iwm_error_resp *resp;
7576 1.75.2.2 snj SYNC_RESP_STRUCT(resp, pkt);
7577 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
7578 1.75.2.2 snj "firmware error 0x%x, cmd 0x%x\n",
7579 1.75.2.2 snj le32toh(resp->error_type), resp->cmd_id);
7580 1.75.2.2 snj break;
7581 1.75.2.2 snj }
7582 1.75.2.2 snj
7583 1.75.2.2 snj case IWM_TIME_EVENT_NOTIFICATION: {
7584 1.75.2.2 snj struct iwm_time_event_notif *notif;
7585 1.75.2.2 snj SYNC_RESP_STRUCT(notif, pkt);
7586 1.75.2.2 snj break;
7587 1.75.2.2 snj }
7588 1.75.2.2 snj
7589 1.75.2.2 snj case IWM_DEBUG_LOG_MSG:
7590 1.75.2.2 snj break;
7591 1.75.2.2 snj
7592 1.75.2.2 snj case IWM_MCAST_FILTER_CMD:
7593 1.75.2.2 snj break;
7594 1.75.2.2 snj
7595 1.75.2.2 snj case IWM_SCD_QUEUE_CFG: {
7596 1.75.2.2 snj struct iwm_scd_txq_cfg_rsp *rsp;
7597 1.75.2.2 snj SYNC_RESP_STRUCT(rsp, pkt);
7598 1.75.2.2 snj break;
7599 1.75.2.2 snj }
7600 1.75.2.2 snj
7601 1.75.2.2 snj default:
7602 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
7603 1.75.2.2 snj "unhandled firmware response 0x%x 0x%x/0x%x "
7604 1.75.2.2 snj "rx ring %d[%d]\n",
7605 1.75.2.2 snj code, pkt->hdr.code, pkt->len_n_flags, qid, idx);
7606 1.75.2.2 snj break;
7607 1.75.2.2 snj }
7608 1.75.2.2 snj
7609 1.75.2.2 snj /*
7610 1.75.2.2 snj * uCode sets bit 0x80 when it originates the notification,
7611 1.75.2.2 snj * i.e. when the notification is not a direct response to a
7612 1.75.2.2 snj * command sent by the driver.
7613 1.75.2.2 snj * For example, uCode issues IWM_REPLY_RX when it sends a
7614 1.75.2.2 snj * received frame to the driver.
7615 1.75.2.2 snj */
7616 1.75.2.2 snj if (!(orig_qid & (1 << 7))) {
7617 1.75.2.2 snj iwm_cmd_done(sc, qid, idx);
7618 1.75.2.2 snj }
7619 1.75.2.2 snj
7620 1.75.2.2 snj ADVANCE_RXQ(sc);
7621 1.75.2.2 snj }
7622 1.75.2.2 snj
7623 1.75.2.2 snj /*
7624 1.75.2.2 snj * Seems like the hardware gets upset unless we align the write by 8??
7625 1.75.2.2 snj */
7626 1.75.2.2 snj hw = (hw == 0) ? IWM_RX_RING_COUNT - 1 : hw - 1;
7627 1.75.2.2 snj IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, hw & ~7);
7628 1.75.2.2 snj }
7629 1.75.2.2 snj
7630 1.75.2.2 snj static int
7631 1.75.2.2 snj iwm_intr(void *arg)
7632 1.75.2.2 snj {
7633 1.75.2.2 snj struct iwm_softc *sc = arg;
7634 1.75.2.2 snj
7635 1.75.2.2 snj /* Disable interrupts */
7636 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT_MASK, 0);
7637 1.75.2.2 snj
7638 1.75.2.2 snj iwm_softintr(arg);
7639 1.75.2.2 snj return 1;
7640 1.75.2.2 snj }
7641 1.75.2.2 snj
7642 1.75.2.2 snj static void
7643 1.75.2.2 snj iwm_softintr(void *arg)
7644 1.75.2.2 snj {
7645 1.75.2.2 snj struct iwm_softc *sc = arg;
7646 1.75.2.2 snj struct ifnet *ifp = IC2IFP(&sc->sc_ic);
7647 1.75.2.2 snj uint32_t r1, r2;
7648 1.75.2.2 snj int isperiodic = 0, s;
7649 1.75.2.2 snj
7650 1.75.2.2 snj if (__predict_true(sc->sc_flags & IWM_FLAG_USE_ICT)) {
7651 1.75.2.2 snj uint32_t *ict = sc->ict_dma.vaddr;
7652 1.75.2.2 snj int tmp;
7653 1.75.2.2 snj
7654 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7655 1.75.2.2 snj 0, sc->ict_dma.size, BUS_DMASYNC_POSTREAD);
7656 1.75.2.2 snj tmp = htole32(ict[sc->ict_cur]);
7657 1.75.2.2 snj if (tmp == 0)
7658 1.75.2.2 snj goto out_ena; /* Interrupt not for us. */
7659 1.75.2.2 snj
7660 1.75.2.2 snj /*
7661 1.75.2.2 snj * ok, there was something. keep plowing until we have all.
7662 1.75.2.2 snj */
7663 1.75.2.2 snj r1 = r2 = 0;
7664 1.75.2.2 snj while (tmp) {
7665 1.75.2.2 snj r1 |= tmp;
7666 1.75.2.2 snj ict[sc->ict_cur] = 0; /* Acknowledge. */
7667 1.75.2.2 snj sc->ict_cur = (sc->ict_cur + 1) % IWM_ICT_COUNT;
7668 1.75.2.2 snj tmp = htole32(ict[sc->ict_cur]);
7669 1.75.2.2 snj }
7670 1.75.2.2 snj
7671 1.75.2.2 snj bus_dmamap_sync(sc->sc_dmat, sc->ict_dma.map,
7672 1.75.2.2 snj 0, sc->ict_dma.size, BUS_DMASYNC_PREWRITE);
7673 1.75.2.2 snj
7674 1.75.2.2 snj /* this is where the fun begins. don't ask */
7675 1.75.2.2 snj if (r1 == 0xffffffff)
7676 1.75.2.2 snj r1 = 0;
7677 1.75.2.2 snj
7678 1.75.2.2 snj /* i am not expected to understand this */
7679 1.75.2.2 snj if (r1 & 0xc0000)
7680 1.75.2.2 snj r1 |= 0x8000;
7681 1.75.2.2 snj r1 = (0xff & r1) | ((0xff00 & r1) << 16);
7682 1.75.2.2 snj } else {
7683 1.75.2.2 snj r1 = IWM_READ(sc, IWM_CSR_INT);
7684 1.75.2.2 snj if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
7685 1.75.2.2 snj return; /* Hardware gone! */
7686 1.75.2.2 snj r2 = IWM_READ(sc, IWM_CSR_FH_INT_STATUS);
7687 1.75.2.2 snj }
7688 1.75.2.2 snj if (r1 == 0 && r2 == 0) {
7689 1.75.2.2 snj goto out_ena; /* Interrupt not for us. */
7690 1.75.2.2 snj }
7691 1.75.2.2 snj
7692 1.75.2.2 snj /* Acknowledge interrupts. */
7693 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
7694 1.75.2.2 snj if (__predict_false(!(sc->sc_flags & IWM_FLAG_USE_ICT)))
7695 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, r2);
7696 1.75.2.2 snj
7697 1.75.2.2 snj if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
7698 1.75.2.2 snj #ifdef IWM_DEBUG
7699 1.75.2.2 snj int i;
7700 1.75.2.2 snj
7701 1.75.2.2 snj iwm_nic_error(sc);
7702 1.75.2.2 snj
7703 1.75.2.2 snj /* Dump driver status (TX and RX rings) while we're here. */
7704 1.75.2.2 snj DPRINTF(("driver status:\n"));
7705 1.75.2.2 snj for (i = 0; i < IWM_MAX_QUEUES; i++) {
7706 1.75.2.2 snj struct iwm_tx_ring *ring = &sc->txq[i];
7707 1.75.2.2 snj DPRINTF((" tx ring %2d: qid=%-2d cur=%-3d "
7708 1.75.2.2 snj "queued=%-3d\n",
7709 1.75.2.2 snj i, ring->qid, ring->cur, ring->queued));
7710 1.75.2.2 snj }
7711 1.75.2.2 snj DPRINTF((" rx ring: cur=%d\n", sc->rxq.cur));
7712 1.75.2.2 snj DPRINTF((" 802.11 state %s\n",
7713 1.75.2.2 snj ieee80211_state_name[sc->sc_ic.ic_state]));
7714 1.75.2.2 snj #endif
7715 1.75.2.2 snj
7716 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "fatal firmware error\n");
7717 1.75.2.2 snj fatal:
7718 1.75.2.2 snj s = splnet();
7719 1.75.2.2 snj ifp->if_flags &= ~IFF_UP;
7720 1.75.2.2 snj iwm_stop(ifp, 1);
7721 1.75.2.2 snj splx(s);
7722 1.75.2.2 snj /* Don't restore interrupt mask */
7723 1.75.2.2 snj return;
7724 1.75.2.2 snj
7725 1.75.2.2 snj }
7726 1.75.2.2 snj
7727 1.75.2.2 snj if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
7728 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
7729 1.75.2.2 snj "hardware error, stopping device\n");
7730 1.75.2.2 snj goto fatal;
7731 1.75.2.2 snj }
7732 1.75.2.2 snj
7733 1.75.2.2 snj /* firmware chunk loaded */
7734 1.75.2.2 snj if (r1 & IWM_CSR_INT_BIT_FH_TX) {
7735 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_TX_MASK);
7736 1.75.2.2 snj sc->sc_fw_chunk_done = 1;
7737 1.75.2.2 snj wakeup(&sc->sc_fw);
7738 1.75.2.2 snj }
7739 1.75.2.2 snj
7740 1.75.2.2 snj if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
7741 1.75.2.2 snj if (iwm_check_rfkill(sc) && (ifp->if_flags & IFF_UP))
7742 1.75.2.2 snj goto fatal;
7743 1.75.2.2 snj }
7744 1.75.2.2 snj
7745 1.75.2.2 snj if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
7746 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT, IWM_CSR_INT_BIT_RX_PERIODIC);
7747 1.75.2.2 snj if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
7748 1.75.2.2 snj IWM_WRITE_1(sc,
7749 1.75.2.2 snj IWM_CSR_INT_PERIODIC_REG, IWM_CSR_INT_PERIODIC_DIS);
7750 1.75.2.2 snj isperiodic = 1;
7751 1.75.2.2 snj }
7752 1.75.2.2 snj
7753 1.75.2.2 snj if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) ||
7754 1.75.2.2 snj isperiodic) {
7755 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_FH_INT_STATUS, IWM_CSR_FH_INT_RX_MASK);
7756 1.75.2.2 snj
7757 1.75.2.2 snj iwm_notif_intr(sc);
7758 1.75.2.2 snj
7759 1.75.2.2 snj /* enable periodic interrupt, see above */
7760 1.75.2.2 snj if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) &&
7761 1.75.2.2 snj !isperiodic)
7762 1.75.2.2 snj IWM_WRITE_1(sc, IWM_CSR_INT_PERIODIC_REG,
7763 1.75.2.2 snj IWM_CSR_INT_PERIODIC_ENA);
7764 1.75.2.2 snj }
7765 1.75.2.2 snj
7766 1.75.2.2 snj out_ena:
7767 1.75.2.2 snj iwm_restore_interrupts(sc);
7768 1.75.2.2 snj }
7769 1.75.2.2 snj
7770 1.75.2.2 snj /*
7771 1.75.2.2 snj * Autoconf glue-sniffing
7772 1.75.2.2 snj */
7773 1.75.2.2 snj
7774 1.75.2.2 snj static const pci_product_id_t iwm_devices[] = {
7775 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_7260_1,
7776 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_7260_2,
7777 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_3160_1,
7778 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_3160_2,
7779 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_7265_1,
7780 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_7265_2,
7781 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_3165_1,
7782 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_3165_2,
7783 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_8260_1,
7784 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_8260_2,
7785 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_4165_1,
7786 1.75.2.2 snj PCI_PRODUCT_INTEL_WIFI_LINK_4165_2,
7787 1.75.2.2 snj };
7788 1.75.2.2 snj
7789 1.75.2.2 snj static int
7790 1.75.2.2 snj iwm_match(device_t parent, cfdata_t match __unused, void *aux)
7791 1.75.2.2 snj {
7792 1.75.2.2 snj struct pci_attach_args *pa = aux;
7793 1.75.2.2 snj
7794 1.75.2.2 snj if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
7795 1.75.2.2 snj return 0;
7796 1.75.2.2 snj
7797 1.75.2.2 snj for (size_t i = 0; i < __arraycount(iwm_devices); i++)
7798 1.75.2.2 snj if (PCI_PRODUCT(pa->pa_id) == iwm_devices[i])
7799 1.75.2.2 snj return 1;
7800 1.75.2.2 snj
7801 1.75.2.2 snj return 0;
7802 1.75.2.2 snj }
7803 1.75.2.2 snj
7804 1.75.2.2 snj static int
7805 1.75.2.2 snj iwm_preinit(struct iwm_softc *sc)
7806 1.75.2.2 snj {
7807 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
7808 1.75.2.2 snj int err;
7809 1.75.2.2 snj
7810 1.75.2.2 snj if (ISSET(sc->sc_flags, IWM_FLAG_ATTACHED))
7811 1.75.2.2 snj return 0;
7812 1.75.2.2 snj
7813 1.75.2.2 snj err = iwm_start_hw(sc);
7814 1.75.2.2 snj if (err) {
7815 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
7816 1.75.2.2 snj return err;
7817 1.75.2.2 snj }
7818 1.75.2.2 snj
7819 1.75.2.2 snj err = iwm_run_init_mvm_ucode(sc, 1);
7820 1.75.2.2 snj iwm_stop_device(sc);
7821 1.75.2.2 snj if (err)
7822 1.75.2.2 snj return err;
7823 1.75.2.2 snj
7824 1.75.2.2 snj sc->sc_flags |= IWM_FLAG_ATTACHED;
7825 1.75.2.2 snj
7826 1.75.2.2 snj aprint_normal_dev(sc->sc_dev, "hw rev 0x%x, fw ver %s, address %s\n",
7827 1.75.2.2 snj sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK, sc->sc_fwver,
7828 1.75.2.2 snj ether_sprintf(sc->sc_nvm.hw_addr));
7829 1.75.2.2 snj
7830 1.75.2.2 snj #ifndef IEEE80211_NO_HT
7831 1.75.2.2 snj if (sc->sc_nvm.sku_cap_11n_enable)
7832 1.75.2.2 snj iwm_setup_ht_rates(sc);
7833 1.75.2.2 snj #endif
7834 1.75.2.2 snj
7835 1.75.2.2 snj /* not all hardware can do 5GHz band */
7836 1.75.2.2 snj if (sc->sc_nvm.sku_cap_band_52GHz_enable)
7837 1.75.2.2 snj ic->ic_sup_rates[IEEE80211_MODE_11A] = ieee80211_std_rateset_11a;
7838 1.75.2.2 snj
7839 1.75.2.2 snj ieee80211_ifattach(ic);
7840 1.75.2.2 snj
7841 1.75.2.2 snj ic->ic_node_alloc = iwm_node_alloc;
7842 1.75.2.2 snj
7843 1.75.2.2 snj /* Override 802.11 state transition machine. */
7844 1.75.2.2 snj sc->sc_newstate = ic->ic_newstate;
7845 1.75.2.2 snj ic->ic_newstate = iwm_newstate;
7846 1.75.2.2 snj ieee80211_media_init(ic, iwm_media_change, ieee80211_media_status);
7847 1.75.2.2 snj ieee80211_announce(ic);
7848 1.75.2.2 snj
7849 1.75.2.2 snj iwm_radiotap_attach(sc);
7850 1.75.2.2 snj
7851 1.75.2.2 snj return 0;
7852 1.75.2.2 snj }
7853 1.75.2.2 snj
7854 1.75.2.2 snj static void
7855 1.75.2.2 snj iwm_attach_hook(device_t dev)
7856 1.75.2.2 snj {
7857 1.75.2.2 snj struct iwm_softc *sc = device_private(dev);
7858 1.75.2.2 snj
7859 1.75.2.2 snj iwm_preinit(sc);
7860 1.75.2.2 snj }
7861 1.75.2.2 snj
7862 1.75.2.2 snj static void
7863 1.75.2.2 snj iwm_attach(device_t parent, device_t self, void *aux)
7864 1.75.2.2 snj {
7865 1.75.2.2 snj struct iwm_softc *sc = device_private(self);
7866 1.75.2.2 snj struct pci_attach_args *pa = aux;
7867 1.75.2.2 snj struct ieee80211com *ic = &sc->sc_ic;
7868 1.75.2.2 snj struct ifnet *ifp = &sc->sc_ec.ec_if;
7869 1.75.2.2 snj pcireg_t reg, memtype;
7870 1.75.2.2 snj pci_intr_handle_t ih;
7871 1.75.2.2 snj char intrbuf[PCI_INTRSTR_LEN];
7872 1.75.2.2 snj const char *intrstr;
7873 1.75.2.2 snj int err;
7874 1.75.2.2 snj int txq_i;
7875 1.75.2.2 snj const struct sysctlnode *node;
7876 1.75.2.2 snj
7877 1.75.2.2 snj sc->sc_dev = self;
7878 1.75.2.2 snj sc->sc_pct = pa->pa_pc;
7879 1.75.2.2 snj sc->sc_pcitag = pa->pa_tag;
7880 1.75.2.2 snj sc->sc_dmat = pa->pa_dmat;
7881 1.75.2.2 snj sc->sc_pciid = pa->pa_id;
7882 1.75.2.2 snj
7883 1.75.2.2 snj pci_aprint_devinfo(pa, NULL);
7884 1.75.2.2 snj
7885 1.75.2.2 snj if (workqueue_create(&sc->sc_nswq, "iwmns",
7886 1.75.2.2 snj iwm_newstate_cb, sc, PRI_NONE, IPL_NET, 0))
7887 1.75.2.2 snj panic("%s: could not create workqueue: newstate",
7888 1.75.2.2 snj device_xname(self));
7889 1.75.2.2 snj
7890 1.75.2.2 snj /*
7891 1.75.2.2 snj * Get the offset of the PCI Express Capability Structure in PCI
7892 1.75.2.2 snj * Configuration Space.
7893 1.75.2.2 snj */
7894 1.75.2.2 snj err = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
7895 1.75.2.2 snj PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
7896 1.75.2.2 snj if (err == 0) {
7897 1.75.2.2 snj aprint_error_dev(self,
7898 1.75.2.2 snj "PCIe capability structure not found!\n");
7899 1.75.2.2 snj return;
7900 1.75.2.2 snj }
7901 1.75.2.2 snj
7902 1.75.2.2 snj /* Clear device-specific "PCI retry timeout" register (41h). */
7903 1.75.2.2 snj reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
7904 1.75.2.2 snj pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
7905 1.75.2.2 snj
7906 1.75.2.2 snj /* Enable bus-mastering */
7907 1.75.2.2 snj reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7908 1.75.2.2 snj reg |= PCI_COMMAND_MASTER_ENABLE;
7909 1.75.2.2 snj pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7910 1.75.2.2 snj
7911 1.75.2.2 snj memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START);
7912 1.75.2.2 snj err = pci_mapreg_map(pa, PCI_MAPREG_START, memtype, 0,
7913 1.75.2.2 snj &sc->sc_st, &sc->sc_sh, NULL, &sc->sc_sz);
7914 1.75.2.2 snj if (err) {
7915 1.75.2.2 snj aprint_error_dev(self, "can't map mem space\n");
7916 1.75.2.2 snj return;
7917 1.75.2.2 snj }
7918 1.75.2.2 snj
7919 1.75.2.2 snj /* Install interrupt handler. */
7920 1.75.2.2 snj err = pci_intr_map(pa, &ih);
7921 1.75.2.2 snj if (err) {
7922 1.75.2.2 snj aprint_error_dev(self, "can't allocate interrupt\n");
7923 1.75.2.2 snj return;
7924 1.75.2.2 snj }
7925 1.75.2.2 snj reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
7926 1.75.2.2 snj CLR(reg, PCI_COMMAND_INTERRUPT_DISABLE);
7927 1.75.2.2 snj pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
7928 1.75.2.2 snj intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
7929 1.75.2.2 snj sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwm_intr, sc);
7930 1.75.2.2 snj if (sc->sc_ih == NULL) {
7931 1.75.2.2 snj aprint_error_dev(self, "can't establish interrupt");
7932 1.75.2.2 snj if (intrstr != NULL)
7933 1.75.2.2 snj aprint_error(" at %s", intrstr);
7934 1.75.2.2 snj aprint_error("\n");
7935 1.75.2.2 snj return;
7936 1.75.2.2 snj }
7937 1.75.2.2 snj aprint_normal_dev(self, "interrupting at %s\n", intrstr);
7938 1.75.2.2 snj
7939 1.75.2.2 snj sc->sc_wantresp = IWM_CMD_RESP_IDLE;
7940 1.75.2.2 snj
7941 1.75.2.2 snj sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
7942 1.75.2.2 snj switch (PCI_PRODUCT(sc->sc_pciid)) {
7943 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_3160_1:
7944 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_3160_2:
7945 1.75.2.2 snj sc->sc_fwname = "iwlwifi-3160-17.ucode";
7946 1.75.2.2 snj sc->host_interrupt_operation_mode = 1;
7947 1.75.2.2 snj sc->apmg_wake_up_wa = 1;
7948 1.75.2.2 snj sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7949 1.75.2.2 snj sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7950 1.75.2.2 snj break;
7951 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_3165_1:
7952 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_3165_2:
7953 1.75.2.2 snj sc->sc_fwname = "iwlwifi-7265D-22.ucode";
7954 1.75.2.2 snj sc->host_interrupt_operation_mode = 0;
7955 1.75.2.2 snj sc->apmg_wake_up_wa = 1;
7956 1.75.2.2 snj sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7957 1.75.2.2 snj sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7958 1.75.2.2 snj break;
7959 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_3168:
7960 1.75.2.2 snj sc->sc_fwname = "iwlwifi-3168-22.ucode";
7961 1.75.2.2 snj sc->host_interrupt_operation_mode = 0;
7962 1.75.2.2 snj sc->apmg_wake_up_wa = 1;
7963 1.75.2.2 snj sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7964 1.75.2.2 snj sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7965 1.75.2.2 snj break;
7966 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_7260_1:
7967 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_7260_2:
7968 1.75.2.2 snj sc->sc_fwname = "iwlwifi-7260-17.ucode";
7969 1.75.2.2 snj sc->host_interrupt_operation_mode = 1;
7970 1.75.2.2 snj sc->apmg_wake_up_wa = 1;
7971 1.75.2.2 snj sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7972 1.75.2.2 snj sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7973 1.75.2.2 snj break;
7974 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_7265_1:
7975 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_7265_2:
7976 1.75.2.2 snj sc->sc_fwname = (sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK) ==
7977 1.75.2.2 snj IWM_CSR_HW_REV_TYPE_7265D ?
7978 1.75.2.2 snj "iwlwifi-7265D-22.ucode": "iwlwifi-7265-17.ucode";
7979 1.75.2.2 snj sc->host_interrupt_operation_mode = 0;
7980 1.75.2.2 snj sc->apmg_wake_up_wa = 1;
7981 1.75.2.2 snj sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
7982 1.75.2.2 snj sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
7983 1.75.2.2 snj break;
7984 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_8260_1:
7985 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_8260_2:
7986 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_4165_1:
7987 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_4165_2:
7988 1.75.2.2 snj sc->sc_fwname = "iwlwifi-8000C-22.ucode";
7989 1.75.2.2 snj sc->host_interrupt_operation_mode = 0;
7990 1.75.2.2 snj sc->apmg_wake_up_wa = 0;
7991 1.75.2.2 snj sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7992 1.75.2.2 snj sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
7993 1.75.2.2 snj break;
7994 1.75.2.2 snj case PCI_PRODUCT_INTEL_WIFI_LINK_8265:
7995 1.75.2.2 snj sc->sc_fwname = "iwlwifi-8265-22.ucode";
7996 1.75.2.2 snj sc->host_interrupt_operation_mode = 0;
7997 1.75.2.2 snj sc->apmg_wake_up_wa = 0;
7998 1.75.2.2 snj sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
7999 1.75.2.2 snj sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
8000 1.75.2.2 snj break;
8001 1.75.2.2 snj default:
8002 1.75.2.2 snj aprint_error_dev(self, "unknown product %#x",
8003 1.75.2.2 snj PCI_PRODUCT(sc->sc_pciid));
8004 1.75.2.2 snj return;
8005 1.75.2.2 snj }
8006 1.75.2.2 snj DPRINTF(("%s: firmware=%s\n", DEVNAME(sc), sc->sc_fwname));
8007 1.75.2.2 snj
8008 1.75.2.2 snj /*
8009 1.75.2.2 snj * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
8010 1.75.2.2 snj * changed, and now the revision step also includes bit 0-1 (no more
8011 1.75.2.2 snj * "dash" value). To keep hw_rev backwards compatible - we'll store it
8012 1.75.2.2 snj * in the old format.
8013 1.75.2.2 snj */
8014 1.75.2.2 snj
8015 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
8016 1.75.2.2 snj sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
8017 1.75.2.2 snj (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
8018 1.75.2.2 snj
8019 1.75.2.2 snj if (iwm_prepare_card_hw(sc) != 0) {
8020 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "could not initialize hardware\n");
8021 1.75.2.2 snj return;
8022 1.75.2.2 snj }
8023 1.75.2.2 snj
8024 1.75.2.2 snj if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
8025 1.75.2.2 snj uint32_t hw_step;
8026 1.75.2.2 snj
8027 1.75.2.2 snj /*
8028 1.75.2.2 snj * In order to recognize C step the driver should read the
8029 1.75.2.2 snj * chip version id located at the AUX bus MISC address.
8030 1.75.2.2 snj */
8031 1.75.2.2 snj IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
8032 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
8033 1.75.2.2 snj DELAY(2);
8034 1.75.2.2 snj
8035 1.75.2.2 snj err = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
8036 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8037 1.75.2.2 snj IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
8038 1.75.2.2 snj 25000);
8039 1.75.2.2 snj if (!err) {
8040 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
8041 1.75.2.2 snj "failed to wake up the nic\n");
8042 1.75.2.2 snj return;
8043 1.75.2.2 snj }
8044 1.75.2.2 snj
8045 1.75.2.2 snj if (iwm_nic_lock(sc)) {
8046 1.75.2.2 snj hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
8047 1.75.2.2 snj hw_step |= IWM_ENABLE_WFPM;
8048 1.75.2.2 snj iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
8049 1.75.2.2 snj hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
8050 1.75.2.2 snj hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
8051 1.75.2.2 snj if (hw_step == 0x3)
8052 1.75.2.2 snj sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
8053 1.75.2.2 snj (IWM_SILICON_C_STEP << 2);
8054 1.75.2.2 snj iwm_nic_unlock(sc);
8055 1.75.2.2 snj } else {
8056 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
8057 1.75.2.2 snj "failed to lock the nic\n");
8058 1.75.2.2 snj return;
8059 1.75.2.2 snj }
8060 1.75.2.2 snj }
8061 1.75.2.2 snj
8062 1.75.2.2 snj /*
8063 1.75.2.2 snj * Allocate DMA memory for firmware transfers.
8064 1.75.2.2 snj * Must be aligned on a 16-byte boundary.
8065 1.75.2.2 snj */
8066 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, sc->sc_fwdmasegsz,
8067 1.75.2.2 snj 16);
8068 1.75.2.2 snj if (err) {
8069 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
8070 1.75.2.2 snj "could not allocate memory for firmware\n");
8071 1.75.2.2 snj return;
8072 1.75.2.2 snj }
8073 1.75.2.2 snj
8074 1.75.2.2 snj /* Allocate "Keep Warm" page, used internally by the card. */
8075 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
8076 1.75.2.2 snj if (err) {
8077 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
8078 1.75.2.2 snj "could not allocate keep warm page\n");
8079 1.75.2.2 snj goto fail1;
8080 1.75.2.2 snj }
8081 1.75.2.2 snj
8082 1.75.2.2 snj /* Allocate interrupt cause table (ICT).*/
8083 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma, IWM_ICT_SIZE,
8084 1.75.2.2 snj 1 << IWM_ICT_PADDR_SHIFT);
8085 1.75.2.2 snj if (err) {
8086 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate ICT table\n");
8087 1.75.2.2 snj goto fail2;
8088 1.75.2.2 snj }
8089 1.75.2.2 snj
8090 1.75.2.2 snj /* TX scheduler rings must be aligned on a 1KB boundary. */
8091 1.75.2.2 snj err = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
8092 1.75.2.2 snj __arraycount(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
8093 1.75.2.2 snj if (err) {
8094 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
8095 1.75.2.2 snj "could not allocate TX scheduler rings\n");
8096 1.75.2.2 snj goto fail3;
8097 1.75.2.2 snj }
8098 1.75.2.2 snj
8099 1.75.2.2 snj for (txq_i = 0; txq_i < __arraycount(sc->txq); txq_i++) {
8100 1.75.2.2 snj err = iwm_alloc_tx_ring(sc, &sc->txq[txq_i], txq_i);
8101 1.75.2.2 snj if (err) {
8102 1.75.2.2 snj aprint_error_dev(sc->sc_dev,
8103 1.75.2.2 snj "could not allocate TX ring %d\n", txq_i);
8104 1.75.2.2 snj goto fail4;
8105 1.75.2.2 snj }
8106 1.75.2.2 snj }
8107 1.75.2.2 snj
8108 1.75.2.2 snj err = iwm_alloc_rx_ring(sc, &sc->rxq);
8109 1.75.2.2 snj if (err) {
8110 1.75.2.2 snj aprint_error_dev(sc->sc_dev, "could not allocate RX ring\n");
8111 1.75.2.2 snj goto fail4;
8112 1.75.2.2 snj }
8113 1.75.2.2 snj
8114 1.75.2.2 snj /* Clear pending interrupts. */
8115 1.75.2.2 snj IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
8116 1.75.2.2 snj
8117 1.75.2.2 snj if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8118 1.75.2.2 snj 0, CTLTYPE_NODE, device_xname(sc->sc_dev),
8119 1.75.2.2 snj SYSCTL_DESCR("iwm per-controller controls"),
8120 1.75.2.2 snj NULL, 0, NULL, 0,
8121 1.75.2.2 snj CTL_HW, iwm_sysctl_root_num, CTL_CREATE,
8122 1.75.2.2 snj CTL_EOL)) != 0) {
8123 1.75.2.2 snj aprint_normal_dev(sc->sc_dev,
8124 1.75.2.2 snj "couldn't create iwm per-controller sysctl node\n");
8125 1.75.2.2 snj }
8126 1.75.2.2 snj if (err == 0) {
8127 1.75.2.2 snj int iwm_nodenum = node->sysctl_num;
8128 1.75.2.2 snj
8129 1.75.2.2 snj /* Reload firmware sysctl node */
8130 1.75.2.2 snj if ((err = sysctl_createv(&sc->sc_clog, 0, NULL, &node,
8131 1.75.2.2 snj CTLFLAG_READWRITE, CTLTYPE_INT, "fw_loaded",
8132 1.75.2.2 snj SYSCTL_DESCR("Reload firmware"),
8133 1.75.2.2 snj iwm_sysctl_fw_loaded_handler, 0, (void *)sc, 0,
8134 1.75.2.2 snj CTL_HW, iwm_sysctl_root_num, iwm_nodenum, CTL_CREATE,
8135 1.75.2.2 snj CTL_EOL)) != 0) {
8136 1.75.2.2 snj aprint_normal_dev(sc->sc_dev,
8137 1.75.2.2 snj "couldn't create load_fw sysctl node\n");
8138 1.75.2.2 snj }
8139 1.75.2.2 snj }
8140 1.75.2.2 snj
8141 1.75.2.2 snj /*
8142 1.75.2.2 snj * Attach interface
8143 1.75.2.2 snj */
8144 1.75.2.2 snj ic->ic_ifp = ifp;
8145 1.75.2.2 snj ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
8146 1.75.2.2 snj ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
8147 1.75.2.2 snj ic->ic_state = IEEE80211_S_INIT;
8148 1.75.2.2 snj
8149 1.75.2.2 snj /* Set device capabilities. */
8150 1.75.2.2 snj ic->ic_caps =
8151 1.75.2.2 snj IEEE80211_C_WEP | /* WEP */
8152 1.75.2.2 snj IEEE80211_C_WPA | /* 802.11i */
8153 1.75.2.2 snj #ifdef notyet
8154 1.75.2.2 snj IEEE80211_C_SCANALL | /* device scans all channels at once */
8155 1.75.2.2 snj IEEE80211_C_SCANALLBAND | /* device scans all bands at once */
8156 1.75.2.2 snj #endif
8157 1.75.2.2 snj IEEE80211_C_SHSLOT | /* short slot time supported */
8158 1.75.2.2 snj IEEE80211_C_SHPREAMBLE; /* short preamble supported */
8159 1.75.2.2 snj
8160 1.75.2.2 snj #ifndef IEEE80211_NO_HT
8161 1.75.2.2 snj ic->ic_htcaps = IEEE80211_HTCAP_SGI20;
8162 1.75.2.2 snj ic->ic_htxcaps = 0;
8163 1.75.2.2 snj ic->ic_txbfcaps = 0;
8164 1.75.2.2 snj ic->ic_aselcaps = 0;
8165 1.75.2.2 snj ic->ic_ampdu_params = (IEEE80211_AMPDU_PARAM_SS_4 | 0x3 /* 64k */);
8166 1.75.2.2 snj #endif
8167 1.75.2.2 snj
8168 1.75.2.2 snj /* all hardware can do 2.4GHz band */
8169 1.75.2.2 snj ic->ic_sup_rates[IEEE80211_MODE_11B] = ieee80211_std_rateset_11b;
8170 1.75.2.2 snj ic->ic_sup_rates[IEEE80211_MODE_11G] = ieee80211_std_rateset_11g;
8171 1.75.2.2 snj
8172 1.75.2.2 snj for (int i = 0; i < __arraycount(sc->sc_phyctxt); i++) {
8173 1.75.2.2 snj sc->sc_phyctxt[i].id = i;
8174 1.75.2.2 snj }
8175 1.75.2.2 snj
8176 1.75.2.2 snj sc->sc_amrr.amrr_min_success_threshold = 1;
8177 1.75.2.2 snj sc->sc_amrr.amrr_max_success_threshold = 15;
8178 1.75.2.2 snj
8179 1.75.2.2 snj /* IBSS channel undefined for now. */
8180 1.75.2.2 snj ic->ic_ibss_chan = &ic->ic_channels[1];
8181 1.75.2.2 snj
8182 1.75.2.2 snj #if 0
8183 1.75.2.2 snj ic->ic_max_rssi = IWM_MAX_DBM - IWM_MIN_DBM;
8184 1.75.2.2 snj #endif
8185 1.75.2.2 snj
8186 1.75.2.2 snj ifp->if_softc = sc;
8187 1.75.2.2 snj ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
8188 1.75.2.2 snj ifp->if_init = iwm_init;
8189 1.75.2.2 snj ifp->if_stop = iwm_stop;
8190 1.75.2.2 snj ifp->if_ioctl = iwm_ioctl;
8191 1.75.2.2 snj ifp->if_start = iwm_start;
8192 1.75.2.2 snj ifp->if_watchdog = iwm_watchdog;
8193 1.75.2.2 snj IFQ_SET_READY(&ifp->if_snd);
8194 1.75.2.2 snj memcpy(ifp->if_xname, DEVNAME(sc), IFNAMSIZ);
8195 1.75.2.2 snj
8196 1.75.2.2 snj if_attach(ifp);
8197 1.75.2.2 snj #if 0
8198 1.75.2.2 snj ieee80211_ifattach(ic);
8199 1.75.2.2 snj #else
8200 1.75.2.2 snj ether_ifattach(ifp, ic->ic_myaddr); /* XXX */
8201 1.75.2.2 snj #endif
8202 1.75.2.2 snj
8203 1.75.2.2 snj callout_init(&sc->sc_calib_to, 0);
8204 1.75.2.2 snj callout_setfunc(&sc->sc_calib_to, iwm_calib_timeout, sc);
8205 1.75.2.2 snj callout_init(&sc->sc_led_blink_to, 0);
8206 1.75.2.2 snj callout_setfunc(&sc->sc_led_blink_to, iwm_led_blink_timeout, sc);
8207 1.75.2.2 snj #ifndef IEEE80211_NO_HT
8208 1.75.2.2 snj if (workqueue_create(&sc->sc_setratewq, "iwmsr",
8209 1.75.2.2 snj iwm_setrates_task, sc, PRI_NONE, IPL_NET, 0))
8210 1.75.2.2 snj panic("%s: could not create workqueue: setrates",
8211 1.75.2.2 snj device_xname(self));
8212 1.75.2.2 snj if (workqueue_create(&sc->sc_bawq, "iwmba",
8213 1.75.2.2 snj iwm_ba_task, sc, PRI_NONE, IPL_NET, 0))
8214 1.75.2.2 snj panic("%s: could not create workqueue: blockack",
8215 1.75.2.2 snj device_xname(self));
8216 1.75.2.2 snj if (workqueue_create(&sc->sc_htprowq, "iwmhtpro",
8217 1.75.2.2 snj iwm_htprot_task, sc, PRI_NONE, IPL_NET, 0))
8218 1.75.2.2 snj panic("%s: could not create workqueue: htprot",
8219 1.75.2.2 snj device_xname(self));
8220 1.75.2.2 snj #endif
8221 1.75.2.2 snj
8222 1.75.2.2 snj if (pmf_device_register(self, NULL, NULL))
8223 1.75.2.2 snj pmf_class_network_register(self, ifp);
8224 1.75.2.2 snj else
8225 1.75.2.2 snj aprint_error_dev(self, "couldn't establish power handler\n");
8226 1.75.2.2 snj
8227 1.75.2.2 snj /*
8228 1.75.2.2 snj * We can't do normal attach before the file system is mounted
8229 1.75.2.2 snj * because we cannot read the MAC address without loading the
8230 1.75.2.2 snj * firmware from disk. So we postpone until mountroot is done.
8231 1.75.2.2 snj * Notably, this will require a full driver unload/load cycle
8232 1.75.2.2 snj * (or reboot) in case the firmware is not present when the
8233 1.75.2.2 snj * hook runs.
8234 1.75.2.2 snj */
8235 1.75.2.2 snj config_mountroot(self, iwm_attach_hook);
8236 1.75.2.2 snj
8237 1.75.2.2 snj return;
8238 1.75.2.2 snj
8239 1.75.2.2 snj fail4: while (--txq_i >= 0)
8240 1.75.2.2 snj iwm_free_tx_ring(sc, &sc->txq[txq_i]);
8241 1.75.2.2 snj iwm_free_rx_ring(sc, &sc->rxq);
8242 1.75.2.2 snj iwm_dma_contig_free(&sc->sched_dma);
8243 1.75.2.2 snj fail3: if (sc->ict_dma.vaddr != NULL)
8244 1.75.2.2 snj iwm_dma_contig_free(&sc->ict_dma);
8245 1.75.2.2 snj fail2: iwm_dma_contig_free(&sc->kw_dma);
8246 1.75.2.2 snj fail1: iwm_dma_contig_free(&sc->fw_dma);
8247 1.75.2.2 snj }
8248 1.75.2.2 snj
8249 1.75.2.2 snj void
8250 1.75.2.2 snj iwm_radiotap_attach(struct iwm_softc *sc)
8251 1.75.2.2 snj {
8252 1.75.2.2 snj struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8253 1.75.2.2 snj
8254 1.75.2.2 snj bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
8255 1.75.2.2 snj sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
8256 1.75.2.2 snj &sc->sc_drvbpf);
8257 1.75.2.2 snj
8258 1.75.2.2 snj sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
8259 1.75.2.2 snj sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
8260 1.75.2.2 snj sc->sc_rxtap.wr_ihdr.it_present = htole32(IWM_RX_RADIOTAP_PRESENT);
8261 1.75.2.2 snj
8262 1.75.2.2 snj sc->sc_txtap_len = sizeof sc->sc_txtapu;
8263 1.75.2.2 snj sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
8264 1.75.2.2 snj sc->sc_txtap.wt_ihdr.it_present = htole32(IWM_TX_RADIOTAP_PRESENT);
8265 1.75.2.2 snj }
8266 1.75.2.2 snj
8267 1.75.2.2 snj #if 0
8268 1.75.2.2 snj static void
8269 1.75.2.2 snj iwm_init_task(void *arg)
8270 1.75.2.2 snj {
8271 1.75.2.2 snj struct iwm_softc *sc = arg;
8272 1.75.2.2 snj struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8273 1.75.2.2 snj int s;
8274 1.75.2.2 snj
8275 1.75.2.2 snj rw_enter_write(&sc->ioctl_rwl);
8276 1.75.2.2 snj s = splnet();
8277 1.75.2.2 snj
8278 1.75.2.2 snj iwm_stop(ifp, 0);
8279 1.75.2.2 snj if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) == IFF_UP)
8280 1.75.2.2 snj iwm_init(ifp);
8281 1.75.2.2 snj
8282 1.75.2.2 snj splx(s);
8283 1.75.2.2 snj rw_exit(&sc->ioctl_rwl);
8284 1.75.2.2 snj }
8285 1.75.2.2 snj
8286 1.75.2.2 snj static void
8287 1.75.2.2 snj iwm_wakeup(struct iwm_softc *sc)
8288 1.75.2.2 snj {
8289 1.75.2.2 snj pcireg_t reg;
8290 1.75.2.2 snj
8291 1.75.2.2 snj /* Clear device-specific "PCI retry timeout" register (41h). */
8292 1.75.2.2 snj reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
8293 1.75.2.2 snj pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
8294 1.75.2.2 snj
8295 1.75.2.2 snj iwm_init_task(sc);
8296 1.75.2.2 snj }
8297 1.75.2.2 snj
8298 1.75.2.2 snj static int
8299 1.75.2.2 snj iwm_activate(device_t self, enum devact act)
8300 1.75.2.2 snj {
8301 1.75.2.2 snj struct iwm_softc *sc = device_private(self);
8302 1.75.2.2 snj struct ifnet *ifp = IC2IFP(&sc->sc_ic);
8303 1.75.2.2 snj
8304 1.75.2.2 snj switch (act) {
8305 1.75.2.2 snj case DVACT_DEACTIVATE:
8306 1.75.2.2 snj if (ifp->if_flags & IFF_RUNNING)
8307 1.75.2.2 snj iwm_stop(ifp, 0);
8308 1.75.2.2 snj return 0;
8309 1.75.2.2 snj default:
8310 1.75.2.2 snj return EOPNOTSUPP;
8311 1.75.2.2 snj }
8312 1.75.2.2 snj }
8313 1.75.2.2 snj #endif
8314 1.75.2.2 snj
8315 1.75.2.2 snj CFATTACH_DECL_NEW(iwm, sizeof(struct iwm_softc), iwm_match, iwm_attach,
8316 1.75.2.2 snj NULL, NULL);
8317 1.75.2.2 snj
8318 1.75.2.2 snj static int
8319 1.75.2.2 snj iwm_sysctl_fw_loaded_handler(SYSCTLFN_ARGS)
8320 1.75.2.2 snj {
8321 1.75.2.2 snj struct sysctlnode node;
8322 1.75.2.2 snj struct iwm_softc *sc;
8323 1.75.2.2 snj int err, t;
8324 1.75.2.2 snj
8325 1.75.2.2 snj node = *rnode;
8326 1.75.2.2 snj sc = node.sysctl_data;
8327 1.75.2.2 snj t = ISSET(sc->sc_flags, IWM_FLAG_FW_LOADED) ? 1 : 0;
8328 1.75.2.2 snj node.sysctl_data = &t;
8329 1.75.2.2 snj err = sysctl_lookup(SYSCTLFN_CALL(&node));
8330 1.75.2.2 snj if (err || newp == NULL)
8331 1.75.2.2 snj return err;
8332 1.75.2.2 snj
8333 1.75.2.2 snj if (t == 0)
8334 1.75.2.2 snj CLR(sc->sc_flags, IWM_FLAG_FW_LOADED);
8335 1.75.2.2 snj return 0;
8336 1.75.2.2 snj }
8337 1.75.2.2 snj
8338 1.75.2.2 snj SYSCTL_SETUP(sysctl_iwm, "sysctl iwm(4) subtree setup")
8339 1.75.2.2 snj {
8340 1.75.2.2 snj const struct sysctlnode *rnode;
8341 1.75.2.2 snj #ifdef IWM_DEBUG
8342 1.75.2.2 snj const struct sysctlnode *cnode;
8343 1.75.2.2 snj #endif /* IWM_DEBUG */
8344 1.75.2.2 snj int rc;
8345 1.75.2.2 snj
8346 1.75.2.2 snj if ((rc = sysctl_createv(clog, 0, NULL, &rnode,
8347 1.75.2.2 snj CTLFLAG_PERMANENT, CTLTYPE_NODE, "iwm",
8348 1.75.2.2 snj SYSCTL_DESCR("iwm global controls"),
8349 1.75.2.2 snj NULL, 0, NULL, 0, CTL_HW, CTL_CREATE, CTL_EOL)) != 0)
8350 1.75.2.2 snj goto err;
8351 1.75.2.2 snj
8352 1.75.2.2 snj iwm_sysctl_root_num = rnode->sysctl_num;
8353 1.75.2.2 snj
8354 1.75.2.2 snj #ifdef IWM_DEBUG
8355 1.75.2.2 snj /* control debugging printfs */
8356 1.75.2.2 snj if ((rc = sysctl_createv(clog, 0, &rnode, &cnode,
8357 1.75.2.2 snj CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT,
8358 1.75.2.2 snj "debug", SYSCTL_DESCR("Enable debugging output"),
8359 1.75.2.2 snj NULL, 0, &iwm_debug, 0, CTL_CREATE, CTL_EOL)) != 0)
8360 1.75.2.2 snj goto err;
8361 1.75.2.2 snj #endif /* IWM_DEBUG */
8362 1.75.2.2 snj
8363 1.75.2.2 snj return;
8364 1.75.2.2 snj
8365 1.75.2.2 snj err:
8366 1.75.2.2 snj aprint_error("%s: sysctl_createv failed (rc = %d)\n", __func__, rc);
8367 1.75.2.2 snj }
8368