if_iwn.c revision 1.62.6.2 1 /* $NetBSD: if_iwn.c,v 1.62.6.2 2014/08/20 00:03:42 tls Exp $ */
2 /* $OpenBSD: if_iwn.c,v 1.119 2013/05/29 23:16:52 yuo Exp $ */
3
4 /*-
5 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
22 * adapters.
23 */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: if_iwn.c,v 1.62.6.2 2014/08/20 00:03:42 tls Exp $");
26
27 #define IWN_USE_RBUF /* Use local storage for RX */
28 #undef IWN_HWCRYPTO /* XXX does not even compile yet */
29
30 #include <sys/param.h>
31 #include <sys/sockio.h>
32 #include <sys/proc.h>
33 #include <sys/mbuf.h>
34 #include <sys/kernel.h>
35 #include <sys/socket.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #ifdef notyetMODULE
39 #include <sys/module.h>
40 #endif
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/kauth.h>
44 #include <sys/callout.h>
45
46 #include <dev/sysmon/sysmonvar.h>
47
48 #include <sys/bus.h>
49 #include <machine/endian.h>
50 #include <machine/intr.h>
51
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcidevs.h>
55
56 #include <net/bpf.h>
57 #include <net/if.h>
58 #include <net/if_arp.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <net/if_ether.h>
67 #include <netinet/ip.h>
68
69 #include <net80211/ieee80211_var.h>
70 #include <net80211/ieee80211_amrr.h>
71 #include <net80211/ieee80211_radiotap.h>
72
73 #include <dev/firmload.h>
74
75 #include <dev/pci/if_iwnreg.h>
76 #include <dev/pci/if_iwnvar.h>
77
78 static const pci_product_id_t iwn_devices[] = {
79 PCI_PRODUCT_INTEL_WIFI_LINK_1030_1,
80 PCI_PRODUCT_INTEL_WIFI_LINK_1030_2,
81 PCI_PRODUCT_INTEL_WIFI_LINK_4965_1,
82 PCI_PRODUCT_INTEL_WIFI_LINK_4965_2,
83 PCI_PRODUCT_INTEL_WIFI_LINK_4965_3,
84 PCI_PRODUCT_INTEL_WIFI_LINK_4965_4,
85 PCI_PRODUCT_INTEL_WIFI_LINK_5100_1,
86 PCI_PRODUCT_INTEL_WIFI_LINK_5100_2,
87 PCI_PRODUCT_INTEL_WIFI_LINK_5150_1,
88 PCI_PRODUCT_INTEL_WIFI_LINK_5150_2,
89 PCI_PRODUCT_INTEL_WIFI_LINK_5300_1,
90 PCI_PRODUCT_INTEL_WIFI_LINK_5300_2,
91 PCI_PRODUCT_INTEL_WIFI_LINK_5350_1,
92 PCI_PRODUCT_INTEL_WIFI_LINK_5350_2,
93 PCI_PRODUCT_INTEL_WIFI_LINK_1000_1,
94 PCI_PRODUCT_INTEL_WIFI_LINK_1000_2,
95 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_1,
96 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_2,
97 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1,
98 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2,
99 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_1,
100 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_2,
101 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1,
102 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2,
103 PCI_PRODUCT_INTEL_WIFI_LINK_6230_1,
104 PCI_PRODUCT_INTEL_WIFI_LINK_6230_2,
105 PCI_PRODUCT_INTEL_WIFI_LINK_6235,
106 };
107
108 /*
109 * Supported rates for 802.11a/b/g modes (in 500Kbps unit).
110 */
111 static const struct ieee80211_rateset iwn_rateset_11a =
112 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
113
114 static const struct ieee80211_rateset iwn_rateset_11b =
115 { 4, { 2, 4, 11, 22 } };
116
117 static const struct ieee80211_rateset iwn_rateset_11g =
118 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
119
120 static int iwn_match(device_t , struct cfdata *, void *);
121 static void iwn_attach(device_t , device_t , void *);
122 static int iwn4965_attach(struct iwn_softc *, pci_product_id_t);
123 static int iwn5000_attach(struct iwn_softc *, pci_product_id_t);
124 static void iwn_radiotap_attach(struct iwn_softc *);
125 static int iwn_detach(device_t , int);
126 #if 0
127 static void iwn_power(int, void *);
128 #endif
129 static bool iwn_resume(device_t, const pmf_qual_t *);
130 static int iwn_nic_lock(struct iwn_softc *);
131 static int iwn_eeprom_lock(struct iwn_softc *);
132 static int iwn_init_otprom(struct iwn_softc *);
133 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
134 static int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *,
135 void **, bus_size_t, bus_size_t);
136 static void iwn_dma_contig_free(struct iwn_dma_info *);
137 static int iwn_alloc_sched(struct iwn_softc *);
138 static void iwn_free_sched(struct iwn_softc *);
139 static int iwn_alloc_kw(struct iwn_softc *);
140 static void iwn_free_kw(struct iwn_softc *);
141 static int iwn_alloc_ict(struct iwn_softc *);
142 static void iwn_free_ict(struct iwn_softc *);
143 static int iwn_alloc_fwmem(struct iwn_softc *);
144 static void iwn_free_fwmem(struct iwn_softc *);
145 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
146 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
147 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
148 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
149 int);
150 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
151 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
152 static void iwn5000_ict_reset(struct iwn_softc *);
153 static int iwn_read_eeprom(struct iwn_softc *);
154 static void iwn4965_read_eeprom(struct iwn_softc *);
155
156 #ifdef IWN_DEBUG
157 static void iwn4965_print_power_group(struct iwn_softc *, int);
158 #endif
159 static void iwn5000_read_eeprom(struct iwn_softc *);
160 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
161 static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
162 static struct ieee80211_node *iwn_node_alloc(struct ieee80211_node_table *);
163 static void iwn_newassoc(struct ieee80211_node *, int);
164 static int iwn_media_change(struct ifnet *);
165 static int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int);
166 static void iwn_iter_func(void *, struct ieee80211_node *);
167 static void iwn_calib_timeout(void *);
168 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
169 struct iwn_rx_data *);
170 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
171 struct iwn_rx_data *);
172 #ifndef IEEE80211_NO_HT
173 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
174 struct iwn_rx_data *);
175 #endif
176 static void iwn5000_rx_calib_results(struct iwn_softc *,
177 struct iwn_rx_desc *, struct iwn_rx_data *);
178 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
179 struct iwn_rx_data *);
180 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
181 struct iwn_rx_data *);
182 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
183 struct iwn_rx_data *);
184 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
185 uint8_t);
186 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
187 static void iwn_notif_intr(struct iwn_softc *);
188 static void iwn_wakeup_intr(struct iwn_softc *);
189 static void iwn_fatal_intr(struct iwn_softc *);
190 static int iwn_intr(void *);
191 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
192 uint16_t);
193 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
194 uint16_t);
195 #ifdef notyet
196 static void iwn5000_reset_sched(struct iwn_softc *, int, int);
197 #endif
198 static int iwn_tx(struct iwn_softc *, struct mbuf *,
199 struct ieee80211_node *, int);
200 static void iwn_start(struct ifnet *);
201 static void iwn_watchdog(struct ifnet *);
202 static int iwn_ioctl(struct ifnet *, u_long, void *);
203 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
204 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
205 int);
206 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
207 int);
208 static int iwn_set_link_quality(struct iwn_softc *,
209 struct ieee80211_node *);
210 static int iwn_add_broadcast_node(struct iwn_softc *, int);
211 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
212 static int iwn_set_critical_temp(struct iwn_softc *);
213 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
214 static void iwn4965_power_calibration(struct iwn_softc *, int);
215 static int iwn4965_set_txpower(struct iwn_softc *, int);
216 static int iwn5000_set_txpower(struct iwn_softc *, int);
217 static int iwn4965_get_rssi(const struct iwn_rx_stat *);
218 static int iwn5000_get_rssi(const struct iwn_rx_stat *);
219 static int iwn_get_noise(const struct iwn_rx_general_stats *);
220 static int iwn4965_get_temperature(struct iwn_softc *);
221 static int iwn5000_get_temperature(struct iwn_softc *);
222 static int iwn_init_sensitivity(struct iwn_softc *);
223 static void iwn_collect_noise(struct iwn_softc *,
224 const struct iwn_rx_general_stats *);
225 static int iwn4965_init_gains(struct iwn_softc *);
226 static int iwn5000_init_gains(struct iwn_softc *);
227 static int iwn4965_set_gains(struct iwn_softc *);
228 static int iwn5000_set_gains(struct iwn_softc *);
229 static void iwn_tune_sensitivity(struct iwn_softc *,
230 const struct iwn_rx_stats *);
231 static int iwn_send_sensitivity(struct iwn_softc *);
232 static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
233 static int iwn5000_runtime_calib(struct iwn_softc *);
234
235 static int iwn_config_bt_coex_bluetooth(struct iwn_softc *);
236 static int iwn_config_bt_coex_prio_table(struct iwn_softc *);
237 static int iwn_config_bt_coex_adv1(struct iwn_softc *);
238
239 static int iwn_config(struct iwn_softc *);
240 static int iwn_scan(struct iwn_softc *, uint16_t);
241 static int iwn_auth(struct iwn_softc *);
242 static int iwn_run(struct iwn_softc *);
243 #ifdef IWN_HWCRYPTO
244 static int iwn_set_key(struct ieee80211com *, struct ieee80211_node *,
245 struct ieee80211_key *);
246 static void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *,
247 struct ieee80211_key *);
248 #endif
249 static int iwn_wme_update(struct ieee80211com *);
250 #ifndef IEEE80211_NO_HT
251 static int iwn_ampdu_rx_start(struct ieee80211com *,
252 struct ieee80211_node *, uint8_t);
253 static void iwn_ampdu_rx_stop(struct ieee80211com *,
254 struct ieee80211_node *, uint8_t);
255 static int iwn_ampdu_tx_start(struct ieee80211com *,
256 struct ieee80211_node *, uint8_t);
257 static void iwn_ampdu_tx_stop(struct ieee80211com *,
258 struct ieee80211_node *, uint8_t);
259 static void iwn4965_ampdu_tx_start(struct iwn_softc *,
260 struct ieee80211_node *, uint8_t, uint16_t);
261 static void iwn4965_ampdu_tx_stop(struct iwn_softc *,
262 uint8_t, uint16_t);
263 static void iwn5000_ampdu_tx_start(struct iwn_softc *,
264 struct ieee80211_node *, uint8_t, uint16_t);
265 static void iwn5000_ampdu_tx_stop(struct iwn_softc *,
266 uint8_t, uint16_t);
267 #endif
268 static int iwn5000_query_calibration(struct iwn_softc *);
269 static int iwn5000_send_calibration(struct iwn_softc *);
270 static int iwn5000_send_wimax_coex(struct iwn_softc *);
271 static int iwn4965_post_alive(struct iwn_softc *);
272 static int iwn5000_post_alive(struct iwn_softc *);
273 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
274 int);
275 static int iwn4965_load_firmware(struct iwn_softc *);
276 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
277 const uint8_t *, int);
278 static int iwn5000_load_firmware(struct iwn_softc *);
279 static int iwn_read_firmware_leg(struct iwn_softc *,
280 struct iwn_fw_info *);
281 static int iwn_read_firmware_tlv(struct iwn_softc *,
282 struct iwn_fw_info *, uint16_t);
283 static int iwn_read_firmware(struct iwn_softc *);
284 static int iwn_clock_wait(struct iwn_softc *);
285 static int iwn_apm_init(struct iwn_softc *);
286 static void iwn_apm_stop_master(struct iwn_softc *);
287 static void iwn_apm_stop(struct iwn_softc *);
288 static int iwn4965_nic_config(struct iwn_softc *);
289 static int iwn5000_nic_config(struct iwn_softc *);
290 static int iwn_hw_prepare(struct iwn_softc *);
291 static int iwn_hw_init(struct iwn_softc *);
292 static void iwn_hw_stop(struct iwn_softc *);
293 static int iwn_init(struct ifnet *);
294 static void iwn_stop(struct ifnet *, int);
295
296 /* XXX MCLGETI alternative */
297 static struct mbuf *MCLGETIalt(struct iwn_softc *, int,
298 struct ifnet *, u_int);
299 #ifdef IWN_USE_RBUF
300 static struct iwn_rbuf *iwn_alloc_rbuf(struct iwn_softc *);
301 static void iwn_free_rbuf(struct mbuf *, void *, size_t, void *);
302 static int iwn_alloc_rpool(struct iwn_softc *);
303 static void iwn_free_rpool(struct iwn_softc *);
304 #endif
305
306 /* XXX needed by iwn_scan */
307 static u_int8_t *ieee80211_add_ssid(u_int8_t *, const u_int8_t *, u_int);
308 static u_int8_t *ieee80211_add_rates(u_int8_t *,
309 const struct ieee80211_rateset *);
310 static u_int8_t *ieee80211_add_xrates(u_int8_t *,
311 const struct ieee80211_rateset *);
312
313 static void iwn_fix_channel(struct ieee80211com *, struct mbuf *);
314
315 #ifdef IWN_DEBUG
316 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0)
317 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0)
318 int iwn_debug = 0;
319 #else
320 #define DPRINTF(x)
321 #define DPRINTFN(n, x)
322 #endif
323
324 CFATTACH_DECL_NEW(iwn, sizeof(struct iwn_softc), iwn_match, iwn_attach,
325 iwn_detach, NULL);
326
327 static int
328 iwn_match(device_t parent, cfdata_t match __unused, void *aux)
329 {
330 struct pci_attach_args *pa = aux;
331 size_t i;
332
333 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
334 return 0;
335
336 for (i = 0; i < __arraycount(iwn_devices); i++)
337 if (PCI_PRODUCT(pa->pa_id) == iwn_devices[i])
338 return 1;
339
340 return 0;
341 }
342
343 static void
344 iwn_attach(device_t parent __unused, device_t self, void *aux)
345 {
346 struct iwn_softc *sc = device_private(self);
347 struct ieee80211com *ic = &sc->sc_ic;
348 struct ifnet *ifp = &sc->sc_ec.ec_if;
349 struct pci_attach_args *pa = aux;
350 const char *intrstr;
351 pci_intr_handle_t ih;
352 pcireg_t memtype, reg;
353 int i, error;
354 char intrbuf[PCI_INTRSTR_LEN];
355
356 sc->sc_dev = self;
357 sc->sc_pct = pa->pa_pc;
358 sc->sc_pcitag = pa->pa_tag;
359 sc->sc_dmat = pa->pa_dmat;
360 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
361
362 callout_init(&sc->calib_to, 0);
363 callout_setfunc(&sc->calib_to, iwn_calib_timeout, sc);
364
365 pci_aprint_devinfo(pa, NULL);
366
367 /*
368 * Get the offset of the PCI Express Capability Structure in PCI
369 * Configuration Space.
370 */
371 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
372 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
373 if (error == 0) {
374 aprint_error(": PCIe capability structure not found!\n");
375 return;
376 }
377
378 /* Clear device-specific "PCI retry timeout" register (41h). */
379 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
380 if (reg & 0xff00)
381 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
382
383 /* Enable bus-mastering and hardware bug workaround. */
384 /* XXX verify the bus-mastering is really needed (not in OpenBSD) */
385 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
386 reg |= PCI_COMMAND_MASTER_ENABLE;
387 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
388 DPRINTF(("PCIe INTx Disable set\n"));
389 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
390 }
391 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
392
393 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0);
394 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st,
395 &sc->sc_sh, NULL, &sc->sc_sz);
396 if (error != 0) {
397 aprint_error(": can't map mem space\n");
398 return;
399 }
400
401 /* Install interrupt handler. */
402 if (pci_intr_map(pa, &ih) != 0) {
403 aprint_error(": can't map interrupt\n");
404 return;
405 }
406 intrstr = pci_intr_string(sc->sc_pct, ih, intrbuf, sizeof(intrbuf));
407 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc);
408 if (sc->sc_ih == NULL) {
409 aprint_error(": can't establish interrupt");
410 if (intrstr != NULL)
411 aprint_error(" at %s", intrstr);
412 aprint_error("\n");
413 return;
414 }
415 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
416
417 /* Read hardware revision and attach. */
418 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
419 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
420 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id));
421 else
422 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id));
423 if (error != 0) {
424 aprint_error(": could not attach device\n");
425 return;
426 }
427
428 if ((error = iwn_hw_prepare(sc)) != 0) {
429 aprint_error(": hardware not ready\n");
430 return;
431 }
432
433 /* Read MAC address, channels, etc from EEPROM. */
434 if ((error = iwn_read_eeprom(sc)) != 0) {
435 aprint_error(": could not read EEPROM\n");
436 return;
437 }
438
439 /* Allocate DMA memory for firmware transfers. */
440 if ((error = iwn_alloc_fwmem(sc)) != 0) {
441 aprint_error(": could not allocate memory for firmware\n");
442 return;
443 }
444
445 /* Allocate "Keep Warm" page. */
446 if ((error = iwn_alloc_kw(sc)) != 0) {
447 aprint_error(": could not allocate keep warm page\n");
448 goto fail1;
449 }
450
451 /* Allocate ICT table for 5000 Series. */
452 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
453 (error = iwn_alloc_ict(sc)) != 0) {
454 aprint_error(": could not allocate ICT table\n");
455 goto fail2;
456 }
457
458 /* Allocate TX scheduler "rings". */
459 if ((error = iwn_alloc_sched(sc)) != 0) {
460 aprint_error(": could not allocate TX scheduler rings\n");
461 goto fail3;
462 }
463
464 #ifdef IWN_USE_RBUF
465 /* Allocate RX buffers. */
466 if ((error = iwn_alloc_rpool(sc)) != 0) {
467 aprint_error_dev(self, "could not allocate RX buffers\n");
468 goto fail3;
469 }
470 #endif
471
472 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
473 for (i = 0; i < sc->ntxqs; i++) {
474 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
475 aprint_error(": could not allocate TX ring %d\n", i);
476 goto fail4;
477 }
478 }
479
480 /* Allocate RX ring. */
481 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
482 aprint_error(": could not allocate RX ring\n");
483 goto fail4;
484 }
485
486 /* Clear pending interrupts. */
487 IWN_WRITE(sc, IWN_INT, 0xffffffff);
488
489 /* Count the number of available chains. */
490 sc->ntxchains =
491 ((sc->txchainmask >> 2) & 1) +
492 ((sc->txchainmask >> 1) & 1) +
493 ((sc->txchainmask >> 0) & 1);
494 sc->nrxchains =
495 ((sc->rxchainmask >> 2) & 1) +
496 ((sc->rxchainmask >> 1) & 1) +
497 ((sc->rxchainmask >> 0) & 1);
498 aprint_normal_dev(self, "MIMO %dT%dR, %.4s, address %s\n",
499 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
500 ether_sprintf(ic->ic_myaddr));
501
502 ic->ic_ifp = ifp;
503 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
504 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
505 ic->ic_state = IEEE80211_S_INIT;
506
507 /* Set device capabilities. */
508 /* XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN,
509 * and IEEE80211_C_PMGT too. */
510 ic->ic_caps =
511 IEEE80211_C_IBSS | /* IBSS mode support */
512 IEEE80211_C_WPA | /* 802.11i */
513 IEEE80211_C_MONITOR | /* monitor mode supported */
514 IEEE80211_C_TXPMGT | /* tx power management */
515 IEEE80211_C_SHSLOT | /* short slot time supported */
516 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
517 IEEE80211_C_WME; /* 802.11e */
518
519 #ifndef IEEE80211_NO_HT
520 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
521 /* Set HT capabilities. */
522 ic->ic_htcaps =
523 #if IWN_RBUF_SIZE == 8192
524 IEEE80211_HTCAP_AMSDU7935 |
525 #endif
526 IEEE80211_HTCAP_CBW20_40 |
527 IEEE80211_HTCAP_SGI20 |
528 IEEE80211_HTCAP_SGI40;
529 if (sc->hw_type != IWN_HW_REV_TYPE_4965)
530 ic->ic_htcaps |= IEEE80211_HTCAP_GF;
531 if (sc->hw_type == IWN_HW_REV_TYPE_6050)
532 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
533 else
534 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
535 }
536 #endif /* !IEEE80211_NO_HT */
537
538 /* Set supported legacy rates. */
539 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwn_rateset_11b;
540 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwn_rateset_11g;
541 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) {
542 ic->ic_sup_rates[IEEE80211_MODE_11A] = iwn_rateset_11a;
543 }
544 #ifndef IEEE80211_NO_HT
545 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
546 /* Set supported HT rates. */
547 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
548 if (sc->nrxchains > 1)
549 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */
550 if (sc->nrxchains > 2)
551 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
552 }
553 #endif
554
555 /* IBSS channel undefined for now. */
556 ic->ic_ibss_chan = &ic->ic_channels[0];
557
558 ifp->if_softc = sc;
559 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
560 ifp->if_init = iwn_init;
561 ifp->if_ioctl = iwn_ioctl;
562 ifp->if_start = iwn_start;
563 ifp->if_stop = iwn_stop;
564 ifp->if_watchdog = iwn_watchdog;
565 IFQ_SET_READY(&ifp->if_snd);
566 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
567
568 if_attach(ifp);
569 ieee80211_ifattach(ic);
570 ic->ic_node_alloc = iwn_node_alloc;
571 ic->ic_newassoc = iwn_newassoc;
572 #ifdef IWN_HWCRYPTO
573 ic->ic_crypto.cs_key_set = iwn_set_key;
574 ic->ic_crypto.cs_key_delete = iwn_delete_key;
575 #endif
576 ic->ic_wme.wme_update = iwn_wme_update;
577 #ifndef IEEE80211_NO_HT
578 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
579 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
580 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
581 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
582 #endif
583
584 /* Override 802.11 state transition machine. */
585 sc->sc_newstate = ic->ic_newstate;
586 ic->ic_newstate = iwn_newstate;
587 ieee80211_media_init(ic, iwn_media_change, ieee80211_media_status);
588
589 sc->amrr.amrr_min_success_threshold = 1;
590 sc->amrr.amrr_max_success_threshold = 15;
591
592 iwn_radiotap_attach(sc);
593
594 /*
595 * XXX for NetBSD, OpenBSD timeout_set replaced by
596 * callout_init and callout_setfunc, above.
597 */
598
599 if (pmf_device_register(self, NULL, iwn_resume))
600 pmf_class_network_register(self, ifp);
601 else
602 aprint_error_dev(self, "couldn't establish power handler\n");
603
604 /* XXX NetBSD add call to ieee80211_announce for dmesg. */
605 ieee80211_announce(ic);
606
607 return;
608
609 /* Free allocated memory if something failed during attachment. */
610 fail4: while (--i >= 0)
611 iwn_free_tx_ring(sc, &sc->txq[i]);
612 #ifdef IWN_USE_RBUF
613 iwn_free_rpool(sc);
614 #endif
615 iwn_free_sched(sc);
616 fail3: if (sc->ict != NULL)
617 iwn_free_ict(sc);
618 fail2: iwn_free_kw(sc);
619 fail1: iwn_free_fwmem(sc);
620 }
621
622 int
623 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid)
624 {
625 struct iwn_ops *ops = &sc->ops;
626
627 ops->load_firmware = iwn4965_load_firmware;
628 ops->read_eeprom = iwn4965_read_eeprom;
629 ops->post_alive = iwn4965_post_alive;
630 ops->nic_config = iwn4965_nic_config;
631 ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
632 ops->update_sched = iwn4965_update_sched;
633 ops->get_temperature = iwn4965_get_temperature;
634 ops->get_rssi = iwn4965_get_rssi;
635 ops->set_txpower = iwn4965_set_txpower;
636 ops->init_gains = iwn4965_init_gains;
637 ops->set_gains = iwn4965_set_gains;
638 ops->add_node = iwn4965_add_node;
639 ops->tx_done = iwn4965_tx_done;
640 #ifndef IEEE80211_NO_HT
641 ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
642 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
643 #endif
644 sc->ntxqs = IWN4965_NTXQUEUES;
645 sc->ndmachnls = IWN4965_NDMACHNLS;
646 sc->broadcast_id = IWN4965_ID_BROADCAST;
647 sc->rxonsz = IWN4965_RXONSZ;
648 sc->schedsz = IWN4965_SCHEDSZ;
649 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
650 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
651 sc->fwsz = IWN4965_FWSZ;
652 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
653 sc->limits = &iwn4965_sensitivity_limits;
654 sc->fwname = "iwlwifi-4965-2.ucode";
655 /* Override chains masks, ROM is known to be broken. */
656 sc->txchainmask = IWN_ANT_AB;
657 sc->rxchainmask = IWN_ANT_ABC;
658
659 return 0;
660 }
661
662 int
663 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid)
664 {
665 struct iwn_ops *ops = &sc->ops;
666
667 ops->load_firmware = iwn5000_load_firmware;
668 ops->read_eeprom = iwn5000_read_eeprom;
669 ops->post_alive = iwn5000_post_alive;
670 ops->nic_config = iwn5000_nic_config;
671 ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
672 ops->update_sched = iwn5000_update_sched;
673 ops->get_temperature = iwn5000_get_temperature;
674 ops->get_rssi = iwn5000_get_rssi;
675 ops->set_txpower = iwn5000_set_txpower;
676 ops->init_gains = iwn5000_init_gains;
677 ops->set_gains = iwn5000_set_gains;
678 ops->add_node = iwn5000_add_node;
679 ops->tx_done = iwn5000_tx_done;
680 #ifndef IEEE80211_NO_HT
681 ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
682 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
683 #endif
684 sc->ntxqs = IWN5000_NTXQUEUES;
685 sc->ndmachnls = IWN5000_NDMACHNLS;
686 sc->broadcast_id = IWN5000_ID_BROADCAST;
687 sc->rxonsz = IWN5000_RXONSZ;
688 sc->schedsz = IWN5000_SCHEDSZ;
689 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
690 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
691 sc->fwsz = IWN5000_FWSZ;
692 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
693
694 switch (sc->hw_type) {
695 case IWN_HW_REV_TYPE_5100:
696 sc->limits = &iwn5000_sensitivity_limits;
697 sc->fwname = "iwlwifi-5000-2.ucode";
698 /* Override chains masks, ROM is known to be broken. */
699 sc->txchainmask = IWN_ANT_B;
700 sc->rxchainmask = IWN_ANT_AB;
701 break;
702 case IWN_HW_REV_TYPE_5150:
703 sc->limits = &iwn5150_sensitivity_limits;
704 sc->fwname = "iwlwifi-5150-2.ucode";
705 break;
706 case IWN_HW_REV_TYPE_5300:
707 case IWN_HW_REV_TYPE_5350:
708 sc->limits = &iwn5000_sensitivity_limits;
709 sc->fwname = "iwlwifi-5000-2.ucode";
710 break;
711 case IWN_HW_REV_TYPE_1000:
712 sc->limits = &iwn1000_sensitivity_limits;
713 sc->fwname = "iwlwifi-1000-3.ucode";
714 break;
715 case IWN_HW_REV_TYPE_6000:
716 sc->limits = &iwn6000_sensitivity_limits;
717 sc->fwname = "iwlwifi-6000-4.ucode";
718 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 ||
719 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) {
720 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
721 /* Override chains masks, ROM is known to be broken. */
722 sc->txchainmask = IWN_ANT_BC;
723 sc->rxchainmask = IWN_ANT_BC;
724 }
725 break;
726 case IWN_HW_REV_TYPE_6050:
727 sc->limits = &iwn6000_sensitivity_limits;
728 sc->fwname = "iwlwifi-6050-5.ucode";
729 break;
730 case IWN_HW_REV_TYPE_6005:
731 sc->limits = &iwn6000_sensitivity_limits;
732 /* Type 6030 cards return IWN_HW_REV_TYPE_6005 */
733 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 ||
734 pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 ||
735 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 ||
736 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 ||
737 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235) {
738 sc->fwname = "iwlwifi-6000g2b-6.ucode";
739 ops->config_bt_coex = iwn_config_bt_coex_adv1;
740 }
741 else
742 sc->fwname = "iwlwifi-6000g2a-5.ucode";
743 break;
744 default:
745 aprint_normal(": adapter type %d not supported\n", sc->hw_type);
746 return ENOTSUP;
747 }
748 return 0;
749 }
750
751 /*
752 * Attach the interface to 802.11 radiotap.
753 */
754 static void
755 iwn_radiotap_attach(struct iwn_softc *sc)
756 {
757 struct ifnet *ifp = sc->sc_ic.ic_ifp;
758
759 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
760 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
761 &sc->sc_drvbpf);
762
763 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
764 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
765 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT);
766
767 sc->sc_txtap_len = sizeof sc->sc_txtapu;
768 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
769 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT);
770 }
771
772 static int
773 iwn_detach(device_t self, int flags __unused)
774 {
775 struct iwn_softc *sc = device_private(self);
776 struct ifnet *ifp = sc->sc_ic.ic_ifp;
777 int qid;
778
779 callout_stop(&sc->calib_to);
780
781 /* Uninstall interrupt handler. */
782 if (sc->sc_ih != NULL)
783 pci_intr_disestablish(sc->sc_pct, sc->sc_ih);
784
785 /* Free DMA resources. */
786 iwn_free_rx_ring(sc, &sc->rxq);
787 for (qid = 0; qid < sc->ntxqs; qid++)
788 iwn_free_tx_ring(sc, &sc->txq[qid]);
789 #ifdef IWN_USE_RBUF
790 iwn_free_rpool(sc);
791 #endif
792 iwn_free_sched(sc);
793 iwn_free_kw(sc);
794 if (sc->ict != NULL)
795 iwn_free_ict(sc);
796 iwn_free_fwmem(sc);
797
798 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
799
800 ieee80211_ifdetach(&sc->sc_ic);
801 if_detach(ifp);
802
803 return 0;
804 }
805
806 #if 0
807 /*
808 * XXX Investigate if clearing the PCI retry timeout could eliminate
809 * the repeated scan calls. Also the calls to if_init and if_start
810 * are similar to the effect of adding the call to ifioctl_common .
811 */
812 static void
813 iwn_power(int why, void *arg)
814 {
815 struct iwn_softc *sc = arg;
816 struct ifnet *ifp;
817 pcireg_t reg;
818 int s;
819
820 if (why != PWR_RESUME)
821 return;
822
823 /* Clear device-specific "PCI retry timeout" register (41h). */
824 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
825 if (reg & 0xff00)
826 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
827
828 s = splnet();
829 ifp = &sc->sc_ic.ic_if;
830 if (ifp->if_flags & IFF_UP) {
831 ifp->if_init(ifp);
832 if (ifp->if_flags & IFF_RUNNING)
833 ifp->if_start(ifp);
834 }
835 splx(s);
836 }
837 #endif
838
839 static bool
840 iwn_resume(device_t dv, const pmf_qual_t *qual)
841 {
842 return true;
843 }
844
845 static int
846 iwn_nic_lock(struct iwn_softc *sc)
847 {
848 int ntries;
849
850 /* Request exclusive access to NIC. */
851 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
852
853 /* Spin until we actually get the lock. */
854 for (ntries = 0; ntries < 1000; ntries++) {
855 if ((IWN_READ(sc, IWN_GP_CNTRL) &
856 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
857 IWN_GP_CNTRL_MAC_ACCESS_ENA)
858 return 0;
859 DELAY(10);
860 }
861 return ETIMEDOUT;
862 }
863
864 static __inline void
865 iwn_nic_unlock(struct iwn_softc *sc)
866 {
867 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
868 }
869
870 static __inline uint32_t
871 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
872 {
873 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
874 IWN_BARRIER_READ_WRITE(sc);
875 return IWN_READ(sc, IWN_PRPH_RDATA);
876 }
877
878 static __inline void
879 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
880 {
881 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
882 IWN_BARRIER_WRITE(sc);
883 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
884 }
885
886 static __inline void
887 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
888 {
889 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
890 }
891
892 static __inline void
893 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
894 {
895 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
896 }
897
898 static __inline void
899 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
900 const uint32_t *data, int count)
901 {
902 for (; count > 0; count--, data++, addr += 4)
903 iwn_prph_write(sc, addr, *data);
904 }
905
906 static __inline uint32_t
907 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
908 {
909 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
910 IWN_BARRIER_READ_WRITE(sc);
911 return IWN_READ(sc, IWN_MEM_RDATA);
912 }
913
914 static __inline void
915 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
916 {
917 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
918 IWN_BARRIER_WRITE(sc);
919 IWN_WRITE(sc, IWN_MEM_WDATA, data);
920 }
921
922 #ifndef IEEE80211_NO_HT
923 static __inline void
924 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
925 {
926 uint32_t tmp;
927
928 tmp = iwn_mem_read(sc, addr & ~3);
929 if (addr & 3)
930 tmp = (tmp & 0x0000ffff) | data << 16;
931 else
932 tmp = (tmp & 0xffff0000) | data;
933 iwn_mem_write(sc, addr & ~3, tmp);
934 }
935 #endif
936
937 static __inline void
938 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
939 int count)
940 {
941 for (; count > 0; count--, addr += 4)
942 *data++ = iwn_mem_read(sc, addr);
943 }
944
945 static __inline void
946 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
947 int count)
948 {
949 for (; count > 0; count--, addr += 4)
950 iwn_mem_write(sc, addr, val);
951 }
952
953 static int
954 iwn_eeprom_lock(struct iwn_softc *sc)
955 {
956 int i, ntries;
957
958 for (i = 0; i < 100; i++) {
959 /* Request exclusive access to EEPROM. */
960 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
961 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
962
963 /* Spin until we actually get the lock. */
964 for (ntries = 0; ntries < 100; ntries++) {
965 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
966 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
967 return 0;
968 DELAY(10);
969 }
970 }
971 return ETIMEDOUT;
972 }
973
974 static __inline void
975 iwn_eeprom_unlock(struct iwn_softc *sc)
976 {
977 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
978 }
979
980 /*
981 * Initialize access by host to One Time Programmable ROM.
982 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
983 */
984 static int
985 iwn_init_otprom(struct iwn_softc *sc)
986 {
987 uint16_t prev = 0, base, next;
988 int count, error;
989
990 /* Wait for clock stabilization before accessing prph. */
991 if ((error = iwn_clock_wait(sc)) != 0)
992 return error;
993
994 if ((error = iwn_nic_lock(sc)) != 0)
995 return error;
996 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
997 DELAY(5);
998 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
999 iwn_nic_unlock(sc);
1000
1001 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1002 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1003 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1004 IWN_RESET_LINK_PWR_MGMT_DIS);
1005 }
1006 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1007 /* Clear ECC status. */
1008 IWN_SETBITS(sc, IWN_OTP_GP,
1009 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1010
1011 /*
1012 * Find the block before last block (contains the EEPROM image)
1013 * for HW without OTP shadow RAM.
1014 */
1015 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1016 /* Switch to absolute addressing mode. */
1017 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1018 base = 0;
1019 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1020 error = iwn_read_prom_data(sc, base, &next, 2);
1021 if (error != 0)
1022 return error;
1023 if (next == 0) /* End of linked-list. */
1024 break;
1025 prev = base;
1026 base = le16toh(next);
1027 }
1028 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1029 return EIO;
1030 /* Skip "next" word. */
1031 sc->prom_base = prev + 1;
1032 }
1033 return 0;
1034 }
1035
1036 static int
1037 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1038 {
1039 uint8_t *out = data;
1040 uint32_t val, tmp;
1041 int ntries;
1042
1043 addr += sc->prom_base;
1044 for (; count > 0; count -= 2, addr++) {
1045 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1046 for (ntries = 0; ntries < 10; ntries++) {
1047 val = IWN_READ(sc, IWN_EEPROM);
1048 if (val & IWN_EEPROM_READ_VALID)
1049 break;
1050 DELAY(5);
1051 }
1052 if (ntries == 10) {
1053 aprint_error_dev(sc->sc_dev,
1054 "timeout reading ROM at 0x%x\n", addr);
1055 return ETIMEDOUT;
1056 }
1057 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1058 /* OTPROM, check for ECC errors. */
1059 tmp = IWN_READ(sc, IWN_OTP_GP);
1060 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1061 aprint_error_dev(sc->sc_dev,
1062 "OTPROM ECC error at 0x%x\n", addr);
1063 return EIO;
1064 }
1065 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1066 /* Correctable ECC error, clear bit. */
1067 IWN_SETBITS(sc, IWN_OTP_GP,
1068 IWN_OTP_GP_ECC_CORR_STTS);
1069 }
1070 }
1071 *out++ = val >> 16;
1072 if (count > 1)
1073 *out++ = val >> 24;
1074 }
1075 return 0;
1076 }
1077
1078 static int
1079 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap,
1080 bus_size_t size, bus_size_t alignment)
1081 {
1082 int nsegs, error;
1083
1084 dma->tag = tag;
1085 dma->size = size;
1086
1087 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1088 &dma->map);
1089 if (error != 0)
1090 goto fail;
1091
1092 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1093 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
1094 if (error != 0)
1095 goto fail;
1096
1097 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr,
1098 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
1099 if (error != 0)
1100 goto fail;
1101
1102 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1103 BUS_DMA_NOWAIT);
1104 if (error != 0)
1105 goto fail;
1106
1107 /* XXX Presumably needed because of missing BUS_DMA_ZERO, above. */
1108 memset(dma->vaddr, 0, size);
1109 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1110
1111 dma->paddr = dma->map->dm_segs[0].ds_addr;
1112 if (kvap != NULL)
1113 *kvap = dma->vaddr;
1114
1115 return 0;
1116
1117 fail: iwn_dma_contig_free(dma);
1118 return error;
1119 }
1120
1121 static void
1122 iwn_dma_contig_free(struct iwn_dma_info *dma)
1123 {
1124 if (dma->map != NULL) {
1125 if (dma->vaddr != NULL) {
1126 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1127 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1128 bus_dmamap_unload(dma->tag, dma->map);
1129 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1130 bus_dmamem_free(dma->tag, &dma->seg, 1);
1131 dma->vaddr = NULL;
1132 }
1133 bus_dmamap_destroy(dma->tag, dma->map);
1134 dma->map = NULL;
1135 }
1136 }
1137
1138 static int
1139 iwn_alloc_sched(struct iwn_softc *sc)
1140 {
1141 /* TX scheduler rings must be aligned on a 1KB boundary. */
1142 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1143 (void **)&sc->sched, sc->schedsz, 1024);
1144 }
1145
1146 static void
1147 iwn_free_sched(struct iwn_softc *sc)
1148 {
1149 iwn_dma_contig_free(&sc->sched_dma);
1150 }
1151
1152 static int
1153 iwn_alloc_kw(struct iwn_softc *sc)
1154 {
1155 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1156 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096,
1157 4096);
1158 }
1159
1160 static void
1161 iwn_free_kw(struct iwn_softc *sc)
1162 {
1163 iwn_dma_contig_free(&sc->kw_dma);
1164 }
1165
1166 static int
1167 iwn_alloc_ict(struct iwn_softc *sc)
1168 {
1169 /* ICT table must be aligned on a 4KB boundary. */
1170 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1171 (void **)&sc->ict, IWN_ICT_SIZE, 4096);
1172 }
1173
1174 static void
1175 iwn_free_ict(struct iwn_softc *sc)
1176 {
1177 iwn_dma_contig_free(&sc->ict_dma);
1178 }
1179
1180 static int
1181 iwn_alloc_fwmem(struct iwn_softc *sc)
1182 {
1183 /* Must be aligned on a 16-byte boundary. */
1184 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL,
1185 sc->fwsz, 16);
1186 }
1187
1188 static void
1189 iwn_free_fwmem(struct iwn_softc *sc)
1190 {
1191 iwn_dma_contig_free(&sc->fw_dma);
1192 }
1193
1194 static int
1195 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1196 {
1197 bus_size_t size;
1198 int i, error;
1199
1200 ring->cur = 0;
1201
1202 /* Allocate RX descriptors (256-byte aligned). */
1203 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1204 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1205 (void **)&ring->desc, size, 256);
1206 if (error != 0) {
1207 aprint_error_dev(sc->sc_dev,
1208 "could not allocate RX ring DMA memory\n");
1209 goto fail;
1210 }
1211
1212 /* Allocate RX status area (16-byte aligned). */
1213 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1214 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16);
1215 if (error != 0) {
1216 aprint_error_dev(sc->sc_dev,
1217 "could not allocate RX status DMA memory\n");
1218 goto fail;
1219 }
1220
1221 /*
1222 * Allocate and map RX buffers.
1223 */
1224 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1225 struct iwn_rx_data *data = &ring->data[i];
1226
1227 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1,
1228 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1229 &data->map);
1230 if (error != 0) {
1231 aprint_error_dev(sc->sc_dev,
1232 "could not create RX buf DMA map\n");
1233 goto fail;
1234 }
1235
1236 data->m = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
1237 if (data->m == NULL) {
1238 aprint_error_dev(sc->sc_dev,
1239 "could not allocate RX mbuf\n");
1240 error = ENOBUFS;
1241 goto fail;
1242 }
1243
1244 error = bus_dmamap_load(sc->sc_dmat, data->map,
1245 mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
1246 BUS_DMA_NOWAIT | BUS_DMA_READ);
1247 if (error != 0) {
1248 aprint_error_dev(sc->sc_dev,
1249 "can't not map mbuf (error %d)\n", error);
1250 goto fail;
1251 }
1252
1253 /* Set physical address of RX buffer (256-byte aligned). */
1254 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8);
1255 }
1256
1257 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size,
1258 BUS_DMASYNC_PREWRITE);
1259
1260 return 0;
1261
1262 fail: iwn_free_rx_ring(sc, ring);
1263 return error;
1264 }
1265
1266 static void
1267 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1268 {
1269 int ntries;
1270
1271 if (iwn_nic_lock(sc) == 0) {
1272 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1273 for (ntries = 0; ntries < 1000; ntries++) {
1274 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1275 IWN_FH_RX_STATUS_IDLE)
1276 break;
1277 DELAY(10);
1278 }
1279 iwn_nic_unlock(sc);
1280 }
1281 ring->cur = 0;
1282 sc->last_rx_valid = 0;
1283 }
1284
1285 static void
1286 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1287 {
1288 int i;
1289
1290 iwn_dma_contig_free(&ring->desc_dma);
1291 iwn_dma_contig_free(&ring->stat_dma);
1292
1293 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1294 struct iwn_rx_data *data = &ring->data[i];
1295
1296 if (data->m != NULL) {
1297 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1298 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1299 bus_dmamap_unload(sc->sc_dmat, data->map);
1300 m_freem(data->m);
1301 }
1302 if (data->map != NULL)
1303 bus_dmamap_destroy(sc->sc_dmat, data->map);
1304 }
1305 }
1306
1307 static int
1308 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1309 {
1310 bus_addr_t paddr;
1311 bus_size_t size;
1312 int i, error;
1313
1314 ring->qid = qid;
1315 ring->queued = 0;
1316 ring->cur = 0;
1317
1318 /* Allocate TX descriptors (256-byte aligned). */
1319 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1320 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1321 (void **)&ring->desc, size, 256);
1322 if (error != 0) {
1323 aprint_error_dev(sc->sc_dev,
1324 "could not allocate TX ring DMA memory\n");
1325 goto fail;
1326 }
1327 /*
1328 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1329 * to allocate commands space for other rings.
1330 * XXX Do we really need to allocate descriptors for other rings?
1331 */
1332 if (qid > 4)
1333 return 0;
1334
1335 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1336 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma,
1337 (void **)&ring->cmd, size, 4);
1338 if (error != 0) {
1339 aprint_error_dev(sc->sc_dev,
1340 "could not allocate TX cmd DMA memory\n");
1341 goto fail;
1342 }
1343
1344 paddr = ring->cmd_dma.paddr;
1345 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1346 struct iwn_tx_data *data = &ring->data[i];
1347
1348 data->cmd_paddr = paddr;
1349 data->scratch_paddr = paddr + 12;
1350 paddr += sizeof (struct iwn_tx_cmd);
1351
1352 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1353 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
1354 &data->map);
1355 if (error != 0) {
1356 aprint_error_dev(sc->sc_dev,
1357 "could not create TX buf DMA map\n");
1358 goto fail;
1359 }
1360 }
1361 return 0;
1362
1363 fail: iwn_free_tx_ring(sc, ring);
1364 return error;
1365 }
1366
1367 static void
1368 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1369 {
1370 int i;
1371
1372 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1373 struct iwn_tx_data *data = &ring->data[i];
1374
1375 if (data->m != NULL) {
1376 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1377 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1378 bus_dmamap_unload(sc->sc_dmat, data->map);
1379 m_freem(data->m);
1380 data->m = NULL;
1381 }
1382 }
1383 /* Clear TX descriptors. */
1384 memset(ring->desc, 0, ring->desc_dma.size);
1385 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1386 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1387 sc->qfullmsk &= ~(1 << ring->qid);
1388 ring->queued = 0;
1389 ring->cur = 0;
1390 }
1391
1392 static void
1393 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1394 {
1395 int i;
1396
1397 iwn_dma_contig_free(&ring->desc_dma);
1398 iwn_dma_contig_free(&ring->cmd_dma);
1399
1400 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1401 struct iwn_tx_data *data = &ring->data[i];
1402
1403 if (data->m != NULL) {
1404 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1405 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1406 bus_dmamap_unload(sc->sc_dmat, data->map);
1407 m_freem(data->m);
1408 }
1409 if (data->map != NULL)
1410 bus_dmamap_destroy(sc->sc_dmat, data->map);
1411 }
1412 }
1413
1414 static void
1415 iwn5000_ict_reset(struct iwn_softc *sc)
1416 {
1417 /* Disable interrupts. */
1418 IWN_WRITE(sc, IWN_INT_MASK, 0);
1419
1420 /* Reset ICT table. */
1421 memset(sc->ict, 0, IWN_ICT_SIZE);
1422 sc->ict_cur = 0;
1423
1424 /* Set physical address of ICT table (4KB aligned). */
1425 DPRINTF(("enabling ICT\n"));
1426 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1427 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1428
1429 /* Enable periodic RX interrupt. */
1430 sc->int_mask |= IWN_INT_RX_PERIODIC;
1431 /* Switch to ICT interrupt mode in driver. */
1432 sc->sc_flags |= IWN_FLAG_USE_ICT;
1433
1434 /* Re-enable interrupts. */
1435 IWN_WRITE(sc, IWN_INT, 0xffffffff);
1436 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1437 }
1438
1439 static int
1440 iwn_read_eeprom(struct iwn_softc *sc)
1441 {
1442 struct iwn_ops *ops = &sc->ops;
1443 struct ieee80211com *ic = &sc->sc_ic;
1444 uint16_t val;
1445 int error;
1446
1447 /* Check whether adapter has an EEPROM or an OTPROM. */
1448 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1449 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1450 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1451 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ?
1452 "OTPROM" : "EEPROM"));
1453
1454 /* Adapter has to be powered on for EEPROM access to work. */
1455 if ((error = iwn_apm_init(sc)) != 0) {
1456 aprint_error_dev(sc->sc_dev,
1457 "could not power ON adapter\n");
1458 return error;
1459 }
1460
1461 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1462 aprint_error_dev(sc->sc_dev,
1463 "bad ROM signature\n");
1464 return EIO;
1465 }
1466 if ((error = iwn_eeprom_lock(sc)) != 0) {
1467 aprint_error_dev(sc->sc_dev,
1468 "could not lock ROM (error=%d)\n", error);
1469 return error;
1470 }
1471 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1472 if ((error = iwn_init_otprom(sc)) != 0) {
1473 aprint_error_dev(sc->sc_dev,
1474 "could not initialize OTPROM\n");
1475 return error;
1476 }
1477 }
1478
1479 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1480 DPRINTF(("SKU capabilities=0x%04x\n", le16toh(val)));
1481 /* Check if HT support is bonded out. */
1482 if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1483 sc->sc_flags |= IWN_FLAG_HAS_11N;
1484
1485 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1486 sc->rfcfg = le16toh(val);
1487 DPRINTF(("radio config=0x%04x\n", sc->rfcfg));
1488 /* Read Tx/Rx chains from ROM unless it's known to be broken. */
1489 if (sc->txchainmask == 0)
1490 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1491 if (sc->rxchainmask == 0)
1492 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1493
1494 /* Read MAC address. */
1495 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6);
1496
1497 /* Read adapter-specific information from EEPROM. */
1498 ops->read_eeprom(sc);
1499
1500 iwn_apm_stop(sc); /* Power OFF adapter. */
1501
1502 iwn_eeprom_unlock(sc);
1503 return 0;
1504 }
1505
1506 static void
1507 iwn4965_read_eeprom(struct iwn_softc *sc)
1508 {
1509 uint32_t addr;
1510 uint16_t val;
1511 int i;
1512
1513 /* Read regulatory domain (4 ASCII characters). */
1514 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1515
1516 /* Read the list of authorized channels (20MHz ones only). */
1517 for (i = 0; i < 5; i++) {
1518 addr = iwn4965_regulatory_bands[i];
1519 iwn_read_eeprom_channels(sc, i, addr);
1520 }
1521
1522 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1523 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1524 sc->maxpwr2GHz = val & 0xff;
1525 sc->maxpwr5GHz = val >> 8;
1526 /* Check that EEPROM values are within valid range. */
1527 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1528 sc->maxpwr5GHz = 38;
1529 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1530 sc->maxpwr2GHz = 38;
1531 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz));
1532
1533 /* Read samples for each TX power group. */
1534 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1535 sizeof sc->bands);
1536
1537 /* Read voltage at which samples were taken. */
1538 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1539 sc->eeprom_voltage = (int16_t)le16toh(val);
1540 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage));
1541
1542 #ifdef IWN_DEBUG
1543 /* Print samples. */
1544 if (iwn_debug > 0) {
1545 for (i = 0; i < IWN_NBANDS; i++)
1546 iwn4965_print_power_group(sc, i);
1547 }
1548 #endif
1549 }
1550
1551 #ifdef IWN_DEBUG
1552 static void
1553 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1554 {
1555 struct iwn4965_eeprom_band *band = &sc->bands[i];
1556 struct iwn4965_eeprom_chan_samples *chans = band->chans;
1557 int j, c;
1558
1559 aprint_normal("===band %d===\n", i);
1560 aprint_normal("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1561 aprint_normal("chan1 num=%d\n", chans[0].num);
1562 for (c = 0; c < 2; c++) {
1563 for (j = 0; j < IWN_NSAMPLES; j++) {
1564 aprint_normal("chain %d, sample %d: temp=%d gain=%d "
1565 "power=%d pa_det=%d\n", c, j,
1566 chans[0].samples[c][j].temp,
1567 chans[0].samples[c][j].gain,
1568 chans[0].samples[c][j].power,
1569 chans[0].samples[c][j].pa_det);
1570 }
1571 }
1572 aprint_normal("chan2 num=%d\n", chans[1].num);
1573 for (c = 0; c < 2; c++) {
1574 for (j = 0; j < IWN_NSAMPLES; j++) {
1575 aprint_normal("chain %d, sample %d: temp=%d gain=%d "
1576 "power=%d pa_det=%d\n", c, j,
1577 chans[1].samples[c][j].temp,
1578 chans[1].samples[c][j].gain,
1579 chans[1].samples[c][j].power,
1580 chans[1].samples[c][j].pa_det);
1581 }
1582 }
1583 }
1584 #endif
1585
1586 static void
1587 iwn5000_read_eeprom(struct iwn_softc *sc)
1588 {
1589 struct iwn5000_eeprom_calib_hdr hdr;
1590 int32_t volt;
1591 uint32_t base, addr;
1592 uint16_t val;
1593 int i;
1594
1595 /* Read regulatory domain (4 ASCII characters). */
1596 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1597 base = le16toh(val);
1598 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1599 sc->eeprom_domain, 4);
1600
1601 /* Read the list of authorized channels (20MHz ones only). */
1602 for (i = 0; i < 5; i++) {
1603 addr = base + iwn5000_regulatory_bands[i];
1604 iwn_read_eeprom_channels(sc, i, addr);
1605 }
1606
1607 /* Read enhanced TX power information for 6000 Series. */
1608 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1609 iwn_read_eeprom_enhinfo(sc);
1610
1611 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1612 base = le16toh(val);
1613 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1614 DPRINTF(("calib version=%u pa type=%u voltage=%u\n",
1615 hdr.version, hdr.pa_type, le16toh(hdr.volt)));
1616 sc->calib_ver = hdr.version;
1617
1618 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1619 /* Compute temperature offset. */
1620 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1621 sc->eeprom_temp = le16toh(val);
1622 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1623 volt = le16toh(val);
1624 sc->temp_off = sc->eeprom_temp - (volt / -5);
1625 DPRINTF(("temp=%d volt=%d offset=%dK\n",
1626 sc->eeprom_temp, volt, sc->temp_off));
1627 } else {
1628 /* Read crystal calibration. */
1629 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1630 &sc->eeprom_crystal, sizeof (uint32_t));
1631 DPRINTF(("crystal calibration 0x%08x\n",
1632 le32toh(sc->eeprom_crystal)));
1633 }
1634 }
1635
1636 static void
1637 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1638 {
1639 struct ieee80211com *ic = &sc->sc_ic;
1640 const struct iwn_chan_band *band = &iwn_bands[n];
1641 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND];
1642 uint8_t chan;
1643 int i;
1644
1645 iwn_read_prom_data(sc, addr, channels,
1646 band->nchan * sizeof (struct iwn_eeprom_chan));
1647
1648 for (i = 0; i < band->nchan; i++) {
1649 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID))
1650 continue;
1651
1652 chan = band->chan[i];
1653
1654 if (n == 0) { /* 2GHz band */
1655 ic->ic_channels[chan].ic_freq =
1656 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
1657 ic->ic_channels[chan].ic_flags =
1658 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
1659 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
1660
1661 } else { /* 5GHz band */
1662 /*
1663 * Some adapters support channels 7, 8, 11 and 12
1664 * both in the 2GHz and 4.9GHz bands.
1665 * Because of limitations in our net80211 layer,
1666 * we don't support them in the 4.9GHz band.
1667 */
1668 if (chan <= 14)
1669 continue;
1670
1671 ic->ic_channels[chan].ic_freq =
1672 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
1673 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A;
1674 /* We have at least one valid 5GHz channel. */
1675 sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1676 }
1677
1678 /* Is active scan allowed on this channel? */
1679 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) {
1680 ic->ic_channels[chan].ic_flags |=
1681 IEEE80211_CHAN_PASSIVE;
1682 }
1683
1684 /* Save maximum allowed TX power for this channel. */
1685 sc->maxpwr[chan] = channels[i].maxpwr;
1686
1687 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n",
1688 chan, channels[i].flags, sc->maxpwr[chan]));
1689 }
1690 }
1691
1692 static void
1693 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1694 {
1695 struct iwn_eeprom_enhinfo enhinfo[35];
1696 uint16_t val, base;
1697 int8_t maxpwr;
1698 int i;
1699
1700 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1701 base = le16toh(val);
1702 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1703 enhinfo, sizeof enhinfo);
1704
1705 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1706 for (i = 0; i < __arraycount(enhinfo); i++) {
1707 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1708 continue; /* Skip invalid entries. */
1709
1710 maxpwr = 0;
1711 if (sc->txchainmask & IWN_ANT_A)
1712 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1713 if (sc->txchainmask & IWN_ANT_B)
1714 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1715 if (sc->txchainmask & IWN_ANT_C)
1716 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1717 if (sc->ntxchains == 2)
1718 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1719 else if (sc->ntxchains == 3)
1720 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1721 maxpwr /= 2; /* Convert half-dBm to dBm. */
1722
1723 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr));
1724 sc->enh_maxpwr[i] = maxpwr;
1725 }
1726 }
1727
1728 static struct ieee80211_node *
1729 iwn_node_alloc(struct ieee80211_node_table *ic __unused)
1730 {
1731 return malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO);
1732 }
1733
1734 static void
1735 iwn_newassoc(struct ieee80211_node *ni, int isnew)
1736 {
1737 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
1738 struct iwn_node *wn = (void *)ni;
1739 uint8_t rate;
1740 int ridx, i;
1741
1742 ieee80211_amrr_node_init(&sc->amrr, &wn->amn);
1743 /* Start at lowest available bit-rate, AMRR will raise. */
1744 ni->ni_txrate = 0;
1745
1746 for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
1747 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
1748 /* Map 802.11 rate to HW rate index. */
1749 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
1750 if (iwn_rates[ridx].rate == rate)
1751 break;
1752 wn->ridx[i] = ridx;
1753 }
1754 }
1755
1756 static int
1757 iwn_media_change(struct ifnet *ifp)
1758 {
1759 struct iwn_softc *sc = ifp->if_softc;
1760 struct ieee80211com *ic = &sc->sc_ic;
1761 uint8_t rate, ridx;
1762 int error;
1763
1764 error = ieee80211_media_change(ifp);
1765 if (error != ENETRESET)
1766 return error;
1767
1768 if (ic->ic_fixed_rate != -1) {
1769 rate = ic->ic_sup_rates[ic->ic_curmode].
1770 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
1771 /* Map 802.11 rate to HW rate index. */
1772 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
1773 if (iwn_rates[ridx].rate == rate)
1774 break;
1775 sc->fixed_ridx = ridx;
1776 }
1777
1778 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1779 (IFF_UP | IFF_RUNNING)) {
1780 iwn_stop(ifp, 0);
1781 error = iwn_init(ifp);
1782 }
1783 return error;
1784 }
1785
1786 static int
1787 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
1788 {
1789 struct ifnet *ifp = ic->ic_ifp;
1790 struct iwn_softc *sc = ifp->if_softc;
1791 int error;
1792
1793 callout_stop(&sc->calib_to);
1794
1795 switch (nstate) {
1796 case IEEE80211_S_SCAN:
1797 /* XXX Do not abort a running scan. */
1798 if (sc->sc_flags & IWN_FLAG_SCANNING) {
1799 if (ic->ic_state != nstate)
1800 aprint_error_dev(sc->sc_dev, "scan request(%d) "
1801 "while scanning(%d) ignored\n", nstate,
1802 ic->ic_state);
1803 break;
1804 }
1805
1806 /* XXX Not sure if call and flags are needed. */
1807 ieee80211_node_table_reset(&ic->ic_scan);
1808 ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
1809 sc->sc_flags |= IWN_FLAG_SCANNING;
1810
1811 /* Make the link LED blink while we're scanning. */
1812 iwn_set_led(sc, IWN_LED_LINK, 10, 10);
1813
1814 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) {
1815 aprint_error_dev(sc->sc_dev,
1816 "could not initiate scan\n");
1817 return error;
1818 }
1819 ic->ic_state = nstate;
1820 return 0;
1821
1822 case IEEE80211_S_ASSOC:
1823 if (ic->ic_state != IEEE80211_S_RUN)
1824 break;
1825 /* FALLTHROUGH */
1826 case IEEE80211_S_AUTH:
1827 /* Reset state to handle reassociations correctly. */
1828 sc->rxon.associd = 0;
1829 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1830 sc->calib.state = IWN_CALIB_STATE_INIT;
1831
1832 if ((error = iwn_auth(sc)) != 0) {
1833 aprint_error_dev(sc->sc_dev,
1834 "could not move to auth state\n");
1835 return error;
1836 }
1837 break;
1838
1839 case IEEE80211_S_RUN:
1840 if ((error = iwn_run(sc)) != 0) {
1841 aprint_error_dev(sc->sc_dev,
1842 "could not move to run state\n");
1843 return error;
1844 }
1845 break;
1846
1847 case IEEE80211_S_INIT:
1848 sc->sc_flags &= ~IWN_FLAG_SCANNING;
1849 sc->calib.state = IWN_CALIB_STATE_INIT;
1850 break;
1851 }
1852
1853 return sc->sc_newstate(ic, nstate, arg);
1854 }
1855
1856 static void
1857 iwn_iter_func(void *arg, struct ieee80211_node *ni)
1858 {
1859 struct iwn_softc *sc = arg;
1860 struct iwn_node *wn = (struct iwn_node *)ni;
1861
1862 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn);
1863 }
1864
1865 static void
1866 iwn_calib_timeout(void *arg)
1867 {
1868 struct iwn_softc *sc = arg;
1869 struct ieee80211com *ic = &sc->sc_ic;
1870 int s;
1871
1872 s = splnet();
1873 if (ic->ic_fixed_rate == -1) {
1874 if (ic->ic_opmode == IEEE80211_M_STA)
1875 iwn_iter_func(sc, ic->ic_bss);
1876 else
1877 ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc);
1878 }
1879 /* Force automatic TX power calibration every 60 secs. */
1880 if (++sc->calib_cnt >= 120) {
1881 uint32_t flags = 0;
1882
1883 DPRINTF(("sending request for statistics\n"));
1884 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
1885 sizeof flags, 1);
1886 sc->calib_cnt = 0;
1887 }
1888 splx(s);
1889
1890 /* Automatic rate control triggered every 500ms. */
1891 callout_schedule(&sc->calib_to, hz/2);
1892 }
1893
1894 /*
1895 * Process an RX_PHY firmware notification. This is usually immediately
1896 * followed by an MPDU_RX_DONE notification.
1897 */
1898 static void
1899 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1900 struct iwn_rx_data *data)
1901 {
1902 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
1903
1904 DPRINTFN(2, ("received PHY stats\n"));
1905 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
1906 sizeof (*stat), BUS_DMASYNC_POSTREAD);
1907
1908 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
1909 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
1910 sc->last_rx_valid = 1;
1911 }
1912
1913 /*
1914 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
1915 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
1916 */
1917 static void
1918 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1919 struct iwn_rx_data *data)
1920 {
1921 struct iwn_ops *ops = &sc->ops;
1922 struct ieee80211com *ic = &sc->sc_ic;
1923 struct ifnet *ifp = ic->ic_ifp;
1924 struct iwn_rx_ring *ring = &sc->rxq;
1925 struct ieee80211_frame *wh;
1926 struct ieee80211_node *ni;
1927 struct mbuf *m, *m1;
1928 struct iwn_rx_stat *stat;
1929 char *head;
1930 uint32_t flags;
1931 int error, len, rssi;
1932
1933 if (desc->type == IWN_MPDU_RX_DONE) {
1934 /* Check for prior RX_PHY notification. */
1935 if (!sc->last_rx_valid) {
1936 DPRINTF(("missing RX_PHY\n"));
1937 return;
1938 }
1939 sc->last_rx_valid = 0;
1940 stat = &sc->last_rx_stat;
1941 } else
1942 stat = (struct iwn_rx_stat *)(desc + 1);
1943
1944 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE,
1945 BUS_DMASYNC_POSTREAD);
1946
1947 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
1948 aprint_error_dev(sc->sc_dev,
1949 "invalid RX statistic header\n");
1950 return;
1951 }
1952 if (desc->type == IWN_MPDU_RX_DONE) {
1953 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
1954 head = (char *)(mpdu + 1);
1955 len = le16toh(mpdu->len);
1956 } else {
1957 head = (char *)(stat + 1) + stat->cfg_phy_len;
1958 len = le16toh(stat->len);
1959 }
1960
1961 flags = le32toh(*(uint32_t *)(head + len));
1962
1963 /* Discard frames with a bad FCS early. */
1964 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
1965 DPRINTFN(2, ("RX flags error %x\n", flags));
1966 ifp->if_ierrors++;
1967 return;
1968 }
1969 /* Discard frames that are too short. */
1970 if (len < sizeof (*wh)) {
1971 DPRINTF(("frame too short: %d\n", len));
1972 ic->ic_stats.is_rx_tooshort++;
1973 ifp->if_ierrors++;
1974 return;
1975 }
1976
1977 m1 = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
1978 if (m1 == NULL) {
1979 ic->ic_stats.is_rx_nobuf++;
1980 ifp->if_ierrors++;
1981 return;
1982 }
1983 bus_dmamap_unload(sc->sc_dmat, data->map);
1984
1985 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *),
1986 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ);
1987 if (error != 0) {
1988 m_freem(m1);
1989
1990 /* Try to reload the old mbuf. */
1991 error = bus_dmamap_load(sc->sc_dmat, data->map,
1992 mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
1993 BUS_DMA_NOWAIT | BUS_DMA_READ);
1994 if (error != 0) {
1995 panic("%s: could not load old RX mbuf",
1996 device_xname(sc->sc_dev));
1997 }
1998 /* Physical address may have changed. */
1999 ring->desc[ring->cur] =
2000 htole32(data->map->dm_segs[0].ds_addr >> 8);
2001 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2002 ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2003 BUS_DMASYNC_PREWRITE);
2004 ifp->if_ierrors++;
2005 return;
2006 }
2007
2008 m = data->m;
2009 data->m = m1;
2010 /* Update RX descriptor. */
2011 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8);
2012 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2013 ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2014 BUS_DMASYNC_PREWRITE);
2015
2016 /* Finalize mbuf. */
2017 m->m_pkthdr.rcvif = ifp;
2018 m->m_data = head;
2019 m->m_pkthdr.len = m->m_len = len;
2020
2021 /* Grab a reference to the source node. */
2022 wh = mtod(m, struct ieee80211_frame *);
2023 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2024
2025 /* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */
2026 /* NetBSD does decryption in ieee80211_input. */
2027
2028 rssi = ops->get_rssi(stat);
2029
2030 /* XXX Added for NetBSD: scans never stop without it */
2031 if (ic->ic_state == IEEE80211_S_SCAN)
2032 iwn_fix_channel(ic, m);
2033
2034 if (sc->sc_drvbpf != NULL) {
2035 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2036
2037 tap->wr_flags = 0;
2038 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2039 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2040 tap->wr_chan_freq =
2041 htole16(ic->ic_channels[stat->chan].ic_freq);
2042 tap->wr_chan_flags =
2043 htole16(ic->ic_channels[stat->chan].ic_flags);
2044 tap->wr_dbm_antsignal = (int8_t)rssi;
2045 tap->wr_dbm_antnoise = (int8_t)sc->noise;
2046 tap->wr_tsft = stat->tstamp;
2047 switch (stat->rate) {
2048 /* CCK rates. */
2049 case 10: tap->wr_rate = 2; break;
2050 case 20: tap->wr_rate = 4; break;
2051 case 55: tap->wr_rate = 11; break;
2052 case 110: tap->wr_rate = 22; break;
2053 /* OFDM rates. */
2054 case 0xd: tap->wr_rate = 12; break;
2055 case 0xf: tap->wr_rate = 18; break;
2056 case 0x5: tap->wr_rate = 24; break;
2057 case 0x7: tap->wr_rate = 36; break;
2058 case 0x9: tap->wr_rate = 48; break;
2059 case 0xb: tap->wr_rate = 72; break;
2060 case 0x1: tap->wr_rate = 96; break;
2061 case 0x3: tap->wr_rate = 108; break;
2062 /* Unknown rate: should not happen. */
2063 default: tap->wr_rate = 0;
2064 }
2065
2066 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
2067 }
2068
2069 /* Send the frame to the 802.11 layer. */
2070 ieee80211_input(ic, m, ni, rssi, 0);
2071
2072 /* Node is no longer needed. */
2073 ieee80211_free_node(ni);
2074 }
2075
2076 #ifndef IEEE80211_NO_HT
2077 /* Process an incoming Compressed BlockAck. */
2078 static void
2079 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2080 struct iwn_rx_data *data)
2081 {
2082 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2083 struct iwn_tx_ring *txq;
2084
2085 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba),
2086 BUS_DMASYNC_POSTREAD);
2087
2088 txq = &sc->txq[le16toh(ba->qid)];
2089 /* XXX TBD */
2090 }
2091 #endif
2092
2093 /*
2094 * Process a CALIBRATION_RESULT notification sent by the initialization
2095 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2096 */
2097 static void
2098 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2099 struct iwn_rx_data *data)
2100 {
2101 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2102 int len, idx = -1;
2103
2104 /* Runtime firmware should not send such a notification. */
2105 if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2106 return;
2107
2108 len = (le32toh(desc->len) & 0x3fff) - 4;
2109 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len,
2110 BUS_DMASYNC_POSTREAD);
2111
2112 switch (calib->code) {
2113 case IWN5000_PHY_CALIB_DC:
2114 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
2115 idx = 0;
2116 break;
2117 case IWN5000_PHY_CALIB_LO:
2118 idx = 1;
2119 break;
2120 case IWN5000_PHY_CALIB_TX_IQ:
2121 idx = 2;
2122 break;
2123 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2124 if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2125 sc->hw_type != IWN_HW_REV_TYPE_5150)
2126 idx = 3;
2127 break;
2128 case IWN5000_PHY_CALIB_BASE_BAND:
2129 idx = 4;
2130 break;
2131 }
2132 if (idx == -1) /* Ignore other results. */
2133 return;
2134
2135 /* Save calibration result. */
2136 if (sc->calibcmd[idx].buf != NULL)
2137 free(sc->calibcmd[idx].buf, M_DEVBUF);
2138 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2139 if (sc->calibcmd[idx].buf == NULL) {
2140 DPRINTF(("not enough memory for calibration result %d\n",
2141 calib->code));
2142 return;
2143 }
2144 DPRINTF(("saving calibration result code=%d len=%d\n",
2145 calib->code, len));
2146 sc->calibcmd[idx].len = len;
2147 memcpy(sc->calibcmd[idx].buf, calib, len);
2148 }
2149
2150 /*
2151 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2152 * The latter is sent by the firmware after each received beacon.
2153 */
2154 static void
2155 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2156 struct iwn_rx_data *data)
2157 {
2158 struct iwn_ops *ops = &sc->ops;
2159 struct ieee80211com *ic = &sc->sc_ic;
2160 struct iwn_calib_state *calib = &sc->calib;
2161 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2162 int temp;
2163
2164 /* Ignore statistics received during a scan. */
2165 if (ic->ic_state != IEEE80211_S_RUN)
2166 return;
2167
2168 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2169 sizeof (*stats), BUS_DMASYNC_POSTREAD);
2170
2171 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type));
2172 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
2173
2174 /* Test if temperature has changed. */
2175 if (stats->general.temp != sc->rawtemp) {
2176 /* Convert "raw" temperature to degC. */
2177 sc->rawtemp = stats->general.temp;
2178 temp = ops->get_temperature(sc);
2179 DPRINTFN(2, ("temperature=%dC\n", temp));
2180
2181 /* Update TX power if need be (4965AGN only). */
2182 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2183 iwn4965_power_calibration(sc, temp);
2184 }
2185
2186 if (desc->type != IWN_BEACON_STATISTICS)
2187 return; /* Reply to a statistics request. */
2188
2189 sc->noise = iwn_get_noise(&stats->rx.general);
2190
2191 /* Test that RSSI and noise are present in stats report. */
2192 if (le32toh(stats->rx.general.flags) != 1) {
2193 DPRINTF(("received statistics without RSSI\n"));
2194 return;
2195 }
2196
2197 /*
2198 * XXX Differential gain calibration makes the 6005 firmware
2199 * crap out, so skip it for now. This effectively disables
2200 * sensitivity tuning as well.
2201 */
2202 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
2203 return;
2204
2205 if (calib->state == IWN_CALIB_STATE_ASSOC)
2206 iwn_collect_noise(sc, &stats->rx.general);
2207 else if (calib->state == IWN_CALIB_STATE_RUN)
2208 iwn_tune_sensitivity(sc, &stats->rx);
2209 }
2210
2211 /*
2212 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2213 * and 5000 adapters have different incompatible TX status formats.
2214 */
2215 static void
2216 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2217 struct iwn_rx_data *data)
2218 {
2219 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2220
2221 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2222 sizeof (*stat), BUS_DMASYNC_POSTREAD);
2223 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2224 }
2225
2226 static void
2227 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2228 struct iwn_rx_data *data)
2229 {
2230 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2231
2232 #ifdef notyet
2233 /* Reset TX scheduler slot. */
2234 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2235 #endif
2236
2237 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2238 sizeof (*stat), BUS_DMASYNC_POSTREAD);
2239 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2240 }
2241
2242 /*
2243 * Adapter-independent backend for TX_DONE firmware notifications.
2244 */
2245 static void
2246 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2247 uint8_t status)
2248 {
2249 struct ieee80211com *ic = &sc->sc_ic;
2250 struct ifnet *ifp = ic->ic_ifp;
2251 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2252 struct iwn_tx_data *data = &ring->data[desc->idx];
2253 struct iwn_node *wn = (struct iwn_node *)data->ni;
2254
2255 /* Update rate control statistics. */
2256 wn->amn.amn_txcnt++;
2257 if (ackfailcnt > 0)
2258 wn->amn.amn_retrycnt++;
2259
2260 if (status != 1 && status != 2)
2261 ifp->if_oerrors++;
2262 else
2263 ifp->if_opackets++;
2264
2265 /* Unmap and free mbuf. */
2266 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
2267 BUS_DMASYNC_POSTWRITE);
2268 bus_dmamap_unload(sc->sc_dmat, data->map);
2269 m_freem(data->m);
2270 data->m = NULL;
2271 ieee80211_free_node(data->ni);
2272 data->ni = NULL;
2273
2274 sc->sc_tx_timer = 0;
2275 if (--ring->queued < IWN_TX_RING_LOMARK) {
2276 sc->qfullmsk &= ~(1 << ring->qid);
2277 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
2278 ifp->if_flags &= ~IFF_OACTIVE;
2279 (*ifp->if_start)(ifp);
2280 }
2281 }
2282 }
2283
2284 /*
2285 * Process a "command done" firmware notification. This is where we wakeup
2286 * processes waiting for a synchronous command completion.
2287 */
2288 static void
2289 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2290 {
2291 struct iwn_tx_ring *ring = &sc->txq[4];
2292 struct iwn_tx_data *data;
2293
2294 if ((desc->qid & 0xf) != 4)
2295 return; /* Not a command ack. */
2296
2297 data = &ring->data[desc->idx];
2298
2299 /* If the command was mapped in an mbuf, free it. */
2300 if (data->m != NULL) {
2301 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2302 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2303 bus_dmamap_unload(sc->sc_dmat, data->map);
2304 m_freem(data->m);
2305 data->m = NULL;
2306 }
2307 wakeup(&ring->desc[desc->idx]);
2308 }
2309
2310 /*
2311 * Process an INT_FH_RX or INT_SW_RX interrupt.
2312 */
2313 static void
2314 iwn_notif_intr(struct iwn_softc *sc)
2315 {
2316 struct iwn_ops *ops = &sc->ops;
2317 struct ieee80211com *ic = &sc->sc_ic;
2318 struct ifnet *ifp = ic->ic_ifp;
2319 uint16_t hw;
2320
2321 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
2322 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
2323
2324 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2325 while (sc->rxq.cur != hw) {
2326 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2327 struct iwn_rx_desc *desc;
2328
2329 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc),
2330 BUS_DMASYNC_POSTREAD);
2331 desc = mtod(data->m, struct iwn_rx_desc *);
2332
2333 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n",
2334 desc->qid & 0xf, desc->idx, desc->flags, desc->type));
2335
2336 if (!(desc->qid & 0x80)) /* Reply to a command. */
2337 iwn_cmd_done(sc, desc);
2338
2339 switch (desc->type) {
2340 case IWN_RX_PHY:
2341 iwn_rx_phy(sc, desc, data);
2342 break;
2343
2344 case IWN_RX_DONE: /* 4965AGN only. */
2345 case IWN_MPDU_RX_DONE:
2346 /* An 802.11 frame has been received. */
2347 iwn_rx_done(sc, desc, data);
2348 break;
2349 #ifndef IEEE80211_NO_HT
2350 case IWN_RX_COMPRESSED_BA:
2351 /* A Compressed BlockAck has been received. */
2352 iwn_rx_compressed_ba(sc, desc, data);
2353 break;
2354 #endif
2355 case IWN_TX_DONE:
2356 /* An 802.11 frame has been transmitted. */
2357 ops->tx_done(sc, desc, data);
2358 break;
2359
2360 case IWN_RX_STATISTICS:
2361 case IWN_BEACON_STATISTICS:
2362 iwn_rx_statistics(sc, desc, data);
2363 break;
2364
2365 case IWN_BEACON_MISSED:
2366 {
2367 struct iwn_beacon_missed *miss =
2368 (struct iwn_beacon_missed *)(desc + 1);
2369
2370 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2371 sizeof (*miss), BUS_DMASYNC_POSTREAD);
2372 /*
2373 * If more than 5 consecutive beacons are missed,
2374 * reinitialize the sensitivity state machine.
2375 */
2376 DPRINTF(("beacons missed %d/%d\n",
2377 le32toh(miss->consecutive), le32toh(miss->total)));
2378 if (ic->ic_state == IEEE80211_S_RUN &&
2379 le32toh(miss->consecutive) > 5)
2380 (void)iwn_init_sensitivity(sc);
2381 break;
2382 }
2383 case IWN_UC_READY:
2384 {
2385 struct iwn_ucode_info *uc =
2386 (struct iwn_ucode_info *)(desc + 1);
2387
2388 /* The microcontroller is ready. */
2389 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2390 sizeof (*uc), BUS_DMASYNC_POSTREAD);
2391 DPRINTF(("microcode alive notification version=%d.%d "
2392 "subtype=%x alive=%x\n", uc->major, uc->minor,
2393 uc->subtype, le32toh(uc->valid)));
2394
2395 if (le32toh(uc->valid) != 1) {
2396 aprint_error_dev(sc->sc_dev,
2397 "microcontroller initialization "
2398 "failed\n");
2399 break;
2400 }
2401 if (uc->subtype == IWN_UCODE_INIT) {
2402 /* Save microcontroller report. */
2403 memcpy(&sc->ucode_info, uc, sizeof (*uc));
2404 }
2405 /* Save the address of the error log in SRAM. */
2406 sc->errptr = le32toh(uc->errptr);
2407 break;
2408 }
2409 case IWN_STATE_CHANGED:
2410 {
2411 uint32_t *status = (uint32_t *)(desc + 1);
2412
2413 /* Enabled/disabled notification. */
2414 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2415 sizeof (*status), BUS_DMASYNC_POSTREAD);
2416 DPRINTF(("state changed to %x\n", le32toh(*status)));
2417
2418 if (le32toh(*status) & 1) {
2419 /* The radio button has to be pushed. */
2420 aprint_error_dev(sc->sc_dev,
2421 "Radio transmitter is off\n");
2422 /* Turn the interface down. */
2423 ifp->if_flags &= ~IFF_UP;
2424 iwn_stop(ifp, 1);
2425 return; /* No further processing. */
2426 }
2427 break;
2428 }
2429 case IWN_START_SCAN:
2430 {
2431 struct iwn_start_scan *scan =
2432 (struct iwn_start_scan *)(desc + 1);
2433
2434 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2435 sizeof (*scan), BUS_DMASYNC_POSTREAD);
2436 DPRINTFN(2, ("scanning channel %d status %x\n",
2437 scan->chan, le32toh(scan->status)));
2438
2439 /* Fix current channel. */
2440 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan];
2441 break;
2442 }
2443 case IWN_STOP_SCAN:
2444 {
2445 struct iwn_stop_scan *scan =
2446 (struct iwn_stop_scan *)(desc + 1);
2447
2448 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2449 sizeof (*scan), BUS_DMASYNC_POSTREAD);
2450 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n",
2451 scan->nchan, scan->status, scan->chan));
2452
2453 if (scan->status == 1 && scan->chan <= 14 &&
2454 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) {
2455 /*
2456 * We just finished scanning 2GHz channels,
2457 * start scanning 5GHz ones.
2458 */
2459 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0)
2460 break;
2461 }
2462 sc->sc_flags &= ~IWN_FLAG_SCANNING;
2463 ieee80211_end_scan(ic);
2464 break;
2465 }
2466 case IWN5000_CALIBRATION_RESULT:
2467 iwn5000_rx_calib_results(sc, desc, data);
2468 break;
2469
2470 case IWN5000_CALIBRATION_DONE:
2471 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2472 wakeup(sc);
2473 break;
2474 }
2475
2476 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2477 }
2478
2479 /* Tell the firmware what we have processed. */
2480 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2481 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2482 }
2483
2484 /*
2485 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2486 * from power-down sleep mode.
2487 */
2488 static void
2489 iwn_wakeup_intr(struct iwn_softc *sc)
2490 {
2491 int qid;
2492
2493 DPRINTF(("ucode wakeup from power-down sleep\n"));
2494
2495 /* Wakeup RX and TX rings. */
2496 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2497 for (qid = 0; qid < sc->ntxqs; qid++) {
2498 struct iwn_tx_ring *ring = &sc->txq[qid];
2499 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2500 }
2501 }
2502
2503 /*
2504 * Dump the error log of the firmware when a firmware panic occurs. Although
2505 * we can't debug the firmware because it is neither open source nor free, it
2506 * can help us to identify certain classes of problems.
2507 */
2508 static void
2509 iwn_fatal_intr(struct iwn_softc *sc)
2510 {
2511 struct iwn_fw_dump dump;
2512 int i;
2513
2514 /* Force a complete recalibration on next init. */
2515 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2516
2517 /* Check that the error log address is valid. */
2518 if (sc->errptr < IWN_FW_DATA_BASE ||
2519 sc->errptr + sizeof (dump) >
2520 IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
2521 aprint_error_dev(sc->sc_dev,
2522 "bad firmware error log address 0x%08x\n", sc->errptr);
2523 return;
2524 }
2525 if (iwn_nic_lock(sc) != 0) {
2526 aprint_error_dev(sc->sc_dev,
2527 "could not read firmware error log\n");
2528 return;
2529 }
2530 /* Read firmware error log from SRAM. */
2531 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2532 sizeof (dump) / sizeof (uint32_t));
2533 iwn_nic_unlock(sc);
2534
2535 if (dump.valid == 0) {
2536 aprint_error_dev(sc->sc_dev,
2537 "firmware error log is empty\n");
2538 return;
2539 }
2540 aprint_error("firmware error log:\n");
2541 aprint_error(" error type = \"%s\" (0x%08X)\n",
2542 (dump.id < __arraycount(iwn_fw_errmsg)) ?
2543 iwn_fw_errmsg[dump.id] : "UNKNOWN",
2544 dump.id);
2545 aprint_error(" program counter = 0x%08X\n", dump.pc);
2546 aprint_error(" source line = 0x%08X\n", dump.src_line);
2547 aprint_error(" error data = 0x%08X%08X\n",
2548 dump.error_data[0], dump.error_data[1]);
2549 aprint_error(" branch link = 0x%08X%08X\n",
2550 dump.branch_link[0], dump.branch_link[1]);
2551 aprint_error(" interrupt link = 0x%08X%08X\n",
2552 dump.interrupt_link[0], dump.interrupt_link[1]);
2553 aprint_error(" time = %u\n", dump.time[0]);
2554
2555 /* Dump driver status (TX and RX rings) while we're here. */
2556 aprint_error("driver status:\n");
2557 for (i = 0; i < sc->ntxqs; i++) {
2558 struct iwn_tx_ring *ring = &sc->txq[i];
2559 aprint_error(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2560 i, ring->qid, ring->cur, ring->queued);
2561 }
2562 aprint_error(" rx ring: cur=%d\n", sc->rxq.cur);
2563 aprint_error(" 802.11 state %d\n", sc->sc_ic.ic_state);
2564 }
2565
2566 static int
2567 iwn_intr(void *arg)
2568 {
2569 struct iwn_softc *sc = arg;
2570 struct ifnet *ifp = sc->sc_ic.ic_ifp;
2571 uint32_t r1, r2, tmp;
2572
2573 /* Disable interrupts. */
2574 IWN_WRITE(sc, IWN_INT_MASK, 0);
2575
2576 /* Read interrupts from ICT (fast) or from registers (slow). */
2577 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2578 tmp = 0;
2579 while (sc->ict[sc->ict_cur] != 0) {
2580 tmp |= sc->ict[sc->ict_cur];
2581 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
2582 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2583 }
2584 tmp = le32toh(tmp);
2585 if (tmp == 0xffffffff) /* Shouldn't happen. */
2586 tmp = 0;
2587 else if (tmp & 0xc0000) /* Workaround a HW bug. */
2588 tmp |= 0x8000;
2589 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2590 r2 = 0; /* Unused. */
2591 } else {
2592 r1 = IWN_READ(sc, IWN_INT);
2593 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2594 return 0; /* Hardware gone! */
2595 r2 = IWN_READ(sc, IWN_FH_INT);
2596 }
2597 if (r1 == 0 && r2 == 0) {
2598 if (ifp->if_flags & IFF_UP)
2599 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2600 return 0; /* Interrupt not for us. */
2601 }
2602
2603 /* Acknowledge interrupts. */
2604 IWN_WRITE(sc, IWN_INT, r1);
2605 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2606 IWN_WRITE(sc, IWN_FH_INT, r2);
2607
2608 if (r1 & IWN_INT_RF_TOGGLED) {
2609 tmp = IWN_READ(sc, IWN_GP_CNTRL);
2610 aprint_error_dev(sc->sc_dev,
2611 "RF switch: radio %s\n",
2612 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2613 }
2614 if (r1 & IWN_INT_CT_REACHED) {
2615 aprint_error_dev(sc->sc_dev,
2616 "critical temperature reached!\n");
2617 }
2618 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2619 aprint_error_dev(sc->sc_dev,
2620 "fatal firmware error\n");
2621 /* Dump firmware error log and stop. */
2622 iwn_fatal_intr(sc);
2623 ifp->if_flags &= ~IFF_UP;
2624 iwn_stop(ifp, 1);
2625 return 1;
2626 }
2627 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2628 (r2 & IWN_FH_INT_RX)) {
2629 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2630 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2631 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2632 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2633 IWN_INT_PERIODIC_DIS);
2634 iwn_notif_intr(sc);
2635 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2636 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2637 IWN_INT_PERIODIC_ENA);
2638 }
2639 } else
2640 iwn_notif_intr(sc);
2641 }
2642
2643 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2644 if (sc->sc_flags & IWN_FLAG_USE_ICT)
2645 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2646 wakeup(sc); /* FH DMA transfer completed. */
2647 }
2648
2649 if (r1 & IWN_INT_ALIVE)
2650 wakeup(sc); /* Firmware is alive. */
2651
2652 if (r1 & IWN_INT_WAKEUP)
2653 iwn_wakeup_intr(sc);
2654
2655 /* Re-enable interrupts. */
2656 if (ifp->if_flags & IFF_UP)
2657 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2658
2659 return 1;
2660 }
2661
2662 /*
2663 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2664 * 5000 adapters use a slightly different format).
2665 */
2666 static void
2667 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2668 uint16_t len)
2669 {
2670 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2671
2672 *w = htole16(len + 8);
2673 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2674 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
2675 sizeof (uint16_t),
2676 BUS_DMASYNC_PREWRITE);
2677 if (idx < IWN_SCHED_WINSZ) {
2678 *(w + IWN_TX_RING_COUNT) = *w;
2679 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2680 (char *)(void *)(w + IWN_TX_RING_COUNT) -
2681 (char *)(void *)sc->sched_dma.vaddr,
2682 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2683 }
2684 }
2685
2686 static void
2687 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2688 uint16_t len)
2689 {
2690 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2691
2692 *w = htole16(id << 12 | (len + 8));
2693 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2694 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
2695 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2696 if (idx < IWN_SCHED_WINSZ) {
2697 *(w + IWN_TX_RING_COUNT) = *w;
2698 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2699 (char *)(void *)(w + IWN_TX_RING_COUNT) -
2700 (char *)(void *)sc->sched_dma.vaddr,
2701 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2702 }
2703 }
2704
2705 #ifdef notyet
2706 static void
2707 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2708 {
2709 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2710
2711 *w = (*w & htole16(0xf000)) | htole16(1);
2712 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2713 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
2714 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2715 if (idx < IWN_SCHED_WINSZ) {
2716 *(w + IWN_TX_RING_COUNT) = *w;
2717 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2718 (char *)(void *)(w + IWN_TX_RING_COUNT) -
2719 (char *)(void *)sc->sched_dma.vaddr,
2720 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2721 }
2722 }
2723 #endif
2724
2725 static int
2726 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2727 {
2728 struct ieee80211com *ic = &sc->sc_ic;
2729 struct iwn_node *wn = (void *)ni;
2730 struct iwn_tx_ring *ring;
2731 struct iwn_tx_desc *desc;
2732 struct iwn_tx_data *data;
2733 struct iwn_tx_cmd *cmd;
2734 struct iwn_cmd_data *tx;
2735 const struct iwn_rate *rinfo;
2736 struct ieee80211_frame *wh;
2737 struct ieee80211_key *k = NULL;
2738 struct mbuf *m1;
2739 uint32_t flags;
2740 u_int hdrlen;
2741 bus_dma_segment_t *seg;
2742 uint8_t tid, ridx, txant, type;
2743 int i, totlen, error, pad;
2744
2745 const struct chanAccParams *cap;
2746 int noack;
2747 int hdrlen2;
2748
2749 wh = mtod(m, struct ieee80211_frame *);
2750 hdrlen = ieee80211_anyhdrsize(wh);
2751 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2752
2753 hdrlen2 = (ieee80211_has_qos(wh)) ?
2754 sizeof (struct ieee80211_qosframe) :
2755 sizeof (struct ieee80211_frame);
2756
2757 if (hdrlen != hdrlen2)
2758 aprint_error_dev(sc->sc_dev, "hdrlen error (%d != %d)\n",
2759 hdrlen, hdrlen2);
2760
2761 /* XXX OpenBSD sets a different tid when using QOS */
2762 tid = 0;
2763 if (ieee80211_has_qos(wh)) {
2764 cap = &ic->ic_wme.wme_chanParams;
2765 noack = cap->cap_wmeParams[ac].wmep_noackPolicy;
2766 }
2767 else
2768 noack = 0;
2769
2770 ring = &sc->txq[ac];
2771 desc = &ring->desc[ring->cur];
2772 data = &ring->data[ring->cur];
2773
2774 /* Choose a TX rate index. */
2775 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2776 type != IEEE80211_FC0_TYPE_DATA) {
2777 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
2778 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
2779 } else if (ic->ic_fixed_rate != -1) {
2780 ridx = sc->fixed_ridx;
2781 } else
2782 ridx = wn->ridx[ni->ni_txrate];
2783 rinfo = &iwn_rates[ridx];
2784
2785 /* Encrypt the frame if need be. */
2786 /*
2787 * XXX For now, NetBSD swaps the encryption and bpf sections
2788 * in order to match old code and other drivers. Tests with
2789 * tcpdump indicates that the order is irrelevant, however,
2790 * as bpf produces unencrypted data for both ordering choices.
2791 */
2792 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2793 k = ieee80211_crypto_encap(ic, ni, m);
2794 if (k == NULL) {
2795 m_freem(m);
2796 return ENOBUFS;
2797 }
2798 /* Packet header may have moved, reset our local pointer. */
2799 wh = mtod(m, struct ieee80211_frame *);
2800 }
2801 totlen = m->m_pkthdr.len;
2802
2803 if (sc->sc_drvbpf != NULL) {
2804 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2805
2806 tap->wt_flags = 0;
2807 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2808 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2809 tap->wt_rate = rinfo->rate;
2810 tap->wt_hwqueue = ac;
2811 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2812 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2813
2814 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
2815 }
2816
2817 /* Prepare TX firmware command. */
2818 cmd = &ring->cmd[ring->cur];
2819 cmd->code = IWN_CMD_TX_DATA;
2820 cmd->flags = 0;
2821 cmd->qid = ring->qid;
2822 cmd->idx = ring->cur;
2823
2824 tx = (struct iwn_cmd_data *)cmd->data;
2825 /* NB: No need to clear tx, all fields are reinitialized here. */
2826 tx->scratch = 0; /* clear "scratch" area */
2827
2828 flags = 0;
2829 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2830 /* Unicast frame, check if an ACK is expected. */
2831 if (!noack)
2832 flags |= IWN_TX_NEED_ACK;
2833 }
2834
2835 #ifdef notyet
2836 /* XXX NetBSD does not define IEEE80211_FC0_SUBTYPE_BAR */
2837 if ((wh->i_fc[0] &
2838 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
2839 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
2840 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
2841 #endif
2842
2843 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2844 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
2845
2846 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2847 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2848 /* NB: Group frames are sent using CCK in 802.11b/g. */
2849 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
2850 flags |= IWN_TX_NEED_RTS;
2851 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2852 ridx >= IWN_RIDX_OFDM6) {
2853 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2854 flags |= IWN_TX_NEED_CTS;
2855 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2856 flags |= IWN_TX_NEED_RTS;
2857 }
2858 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
2859 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
2860 /* 5000 autoselects RTS/CTS or CTS-to-self. */
2861 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
2862 flags |= IWN_TX_NEED_PROTECTION;
2863 } else
2864 flags |= IWN_TX_FULL_TXOP;
2865 }
2866 }
2867
2868 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2869 type != IEEE80211_FC0_TYPE_DATA)
2870 tx->id = sc->broadcast_id;
2871 else
2872 tx->id = wn->id;
2873
2874 if (type == IEEE80211_FC0_TYPE_MGT) {
2875 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2876
2877 #ifndef IEEE80211_STA_ONLY
2878 /* Tell HW to set timestamp in probe responses. */
2879 /* XXX NetBSD rev 1.11 added probe requests here but */
2880 /* probe requests do not take timestamps (from Bergamini). */
2881 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2882 flags |= IWN_TX_INSERT_TSTAMP;
2883 #endif
2884 /* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */
2885 /* changes here. These are not needed (from Bergamini). */
2886 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2887 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2888 tx->timeout = htole16(3);
2889 else
2890 tx->timeout = htole16(2);
2891 } else
2892 tx->timeout = htole16(0);
2893
2894 if (hdrlen & 3) {
2895 /* First segment length must be a multiple of 4. */
2896 flags |= IWN_TX_NEED_PADDING;
2897 pad = 4 - (hdrlen & 3);
2898 } else
2899 pad = 0;
2900
2901 tx->len = htole16(totlen);
2902 tx->tid = tid;
2903 tx->rts_ntries = 60;
2904 tx->data_ntries = 15;
2905 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
2906 tx->plcp = rinfo->plcp;
2907 tx->rflags = rinfo->flags;
2908 if (tx->id == sc->broadcast_id) {
2909 /* Group or management frame. */
2910 tx->linkq = 0;
2911 /* XXX Alternate between antenna A and B? */
2912 txant = IWN_LSB(sc->txchainmask);
2913 tx->rflags |= IWN_RFLAG_ANT(txant);
2914 } else {
2915 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1;
2916 flags |= IWN_TX_LINKQ; /* enable MRR */
2917 }
2918 /* Set physical address of "scratch area". */
2919 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
2920 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
2921
2922 /* Copy 802.11 header in TX command. */
2923 /* XXX NetBSD changed this in rev 1.20 */
2924 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2925
2926 /* Trim 802.11 header. */
2927 m_adj(m, hdrlen);
2928 tx->security = 0;
2929 tx->flags = htole32(flags);
2930
2931 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
2932 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
2933 if (error != 0) {
2934 if (error != EFBIG) {
2935 aprint_error_dev(sc->sc_dev,
2936 "can't map mbuf (error %d)\n", error);
2937 m_freem(m);
2938 return error;
2939 }
2940 /* Too many DMA segments, linearize mbuf. */
2941 MGETHDR(m1, M_DONTWAIT, MT_DATA);
2942 if (m1 == NULL) {
2943 m_freem(m);
2944 return ENOBUFS;
2945 }
2946 if (m->m_pkthdr.len > MHLEN) {
2947 MCLGET(m1, M_DONTWAIT);
2948 if (!(m1->m_flags & M_EXT)) {
2949 m_freem(m);
2950 m_freem(m1);
2951 return ENOBUFS;
2952 }
2953 }
2954 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
2955 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
2956 m_freem(m);
2957 m = m1;
2958
2959 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
2960 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
2961 if (error != 0) {
2962 aprint_error_dev(sc->sc_dev,
2963 "can't map mbuf (error %d)\n", error);
2964 m_freem(m);
2965 return error;
2966 }
2967 }
2968
2969 data->m = m;
2970 data->ni = ni;
2971
2972 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
2973 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs));
2974
2975 /* Fill TX descriptor. */
2976 desc->nsegs = 1 + data->map->dm_nsegs;
2977 /* First DMA segment is used by the TX command. */
2978 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
2979 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
2980 (4 + sizeof (*tx) + hdrlen + pad) << 4);
2981 /* Other DMA segments are for data payload. */
2982 seg = data->map->dm_segs;
2983 for (i = 1; i <= data->map->dm_nsegs; i++) {
2984 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
2985 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
2986 seg->ds_len << 4);
2987 seg++;
2988 }
2989
2990 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
2991 BUS_DMASYNC_PREWRITE);
2992 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
2993 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
2994 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
2995 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2996 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
2997 sizeof (*desc), BUS_DMASYNC_PREWRITE);
2998
2999 #ifdef notyet
3000 /* Update TX scheduler. */
3001 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3002 #endif
3003
3004 /* Kick TX ring. */
3005 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3006 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3007
3008 /* Mark TX ring as full if we reach a certain threshold. */
3009 if (++ring->queued > IWN_TX_RING_HIMARK)
3010 sc->qfullmsk |= 1 << ring->qid;
3011
3012 return 0;
3013 }
3014
3015 static void
3016 iwn_start(struct ifnet *ifp)
3017 {
3018 struct iwn_softc *sc = ifp->if_softc;
3019 struct ieee80211com *ic = &sc->sc_ic;
3020 struct ieee80211_node *ni;
3021 struct ether_header *eh;
3022 struct mbuf *m;
3023 int ac;
3024
3025 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3026 return;
3027
3028 for (;;) {
3029 if (sc->qfullmsk != 0) {
3030 ifp->if_flags |= IFF_OACTIVE;
3031 break;
3032 }
3033 /* Send pending management frames first. */
3034 IF_DEQUEUE(&ic->ic_mgtq, m);
3035 if (m != NULL) {
3036 ni = (void *)m->m_pkthdr.rcvif;
3037 ac = 0;
3038 goto sendit;
3039 }
3040 if (ic->ic_state != IEEE80211_S_RUN)
3041 break;
3042
3043 /* Encapsulate and send data frames. */
3044 IFQ_DEQUEUE(&ifp->if_snd, m);
3045 if (m == NULL)
3046 break;
3047 if (m->m_len < sizeof (*eh) &&
3048 (m = m_pullup(m, sizeof (*eh))) == NULL) {
3049 ifp->if_oerrors++;
3050 continue;
3051 }
3052 eh = mtod(m, struct ether_header *);
3053 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
3054 if (ni == NULL) {
3055 m_freem(m);
3056 ifp->if_oerrors++;
3057 continue;
3058 }
3059 /* classify mbuf so we can find which tx ring to use */
3060 if (ieee80211_classify(ic, m, ni) != 0) {
3061 m_freem(m);
3062 ieee80211_free_node(ni);
3063 ifp->if_oerrors++;
3064 continue;
3065 }
3066
3067 /* No QoS encapsulation for EAPOL frames. */
3068 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
3069 M_WME_GETAC(m) : WME_AC_BE;
3070
3071 bpf_mtap(ifp, m);
3072
3073 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
3074 ieee80211_free_node(ni);
3075 ifp->if_oerrors++;
3076 continue;
3077 }
3078 sendit:
3079 bpf_mtap3(ic->ic_rawbpf, m);
3080
3081 if (iwn_tx(sc, m, ni, ac) != 0) {
3082 ieee80211_free_node(ni);
3083 ifp->if_oerrors++;
3084 continue;
3085 }
3086
3087 sc->sc_tx_timer = 5;
3088 ifp->if_timer = 1;
3089 }
3090 }
3091
3092 static void
3093 iwn_watchdog(struct ifnet *ifp)
3094 {
3095 struct iwn_softc *sc = ifp->if_softc;
3096
3097 ifp->if_timer = 0;
3098
3099 if (sc->sc_tx_timer > 0) {
3100 if (--sc->sc_tx_timer == 0) {
3101 aprint_error_dev(sc->sc_dev,
3102 "device timeout\n");
3103 ifp->if_flags &= ~IFF_UP;
3104 iwn_stop(ifp, 1);
3105 ifp->if_oerrors++;
3106 return;
3107 }
3108 ifp->if_timer = 1;
3109 }
3110
3111 ieee80211_watchdog(&sc->sc_ic);
3112 }
3113
3114 static int
3115 iwn_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3116 {
3117 struct iwn_softc *sc = ifp->if_softc;
3118 struct ieee80211com *ic = &sc->sc_ic;
3119 const struct sockaddr *sa;
3120 int s, error = 0;
3121
3122 s = splnet();
3123
3124 switch (cmd) {
3125 case SIOCSIFADDR:
3126 ifp->if_flags |= IFF_UP;
3127 #ifdef INET
3128 struct ifaddr *ifa = (struct ifaddr *)data;
3129 if (ifa->ifa_addr->sa_family == AF_INET)
3130 arp_ifinit(&ic->ic_ac, ifa);
3131 #endif
3132 /* FALLTHROUGH */
3133 case SIOCSIFFLAGS:
3134 /* XXX Added as it is in every NetBSD driver */
3135 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
3136 break;
3137 if (ifp->if_flags & IFF_UP) {
3138 if (!(ifp->if_flags & IFF_RUNNING))
3139 error = iwn_init(ifp);
3140 } else {
3141 if (ifp->if_flags & IFF_RUNNING)
3142 iwn_stop(ifp, 1);
3143 }
3144 break;
3145
3146 case SIOCADDMULTI:
3147 case SIOCDELMULTI:
3148 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
3149 error = (cmd == SIOCADDMULTI) ?
3150 ether_addmulti(sa, &sc->sc_ec) :
3151 ether_delmulti(sa, &sc->sc_ec);
3152
3153 if (error == ENETRESET)
3154 error = 0;
3155 break;
3156
3157 default:
3158 error = ieee80211_ioctl(ic, cmd, data);
3159 }
3160
3161 if (error == ENETRESET) {
3162 error = 0;
3163 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
3164 (IFF_UP | IFF_RUNNING)) {
3165 iwn_stop(ifp, 0);
3166 error = iwn_init(ifp);
3167 }
3168 }
3169
3170 splx(s);
3171 return error;
3172 }
3173
3174 /*
3175 * Send a command to the firmware.
3176 */
3177 static int
3178 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3179 {
3180 struct iwn_tx_ring *ring = &sc->txq[4];
3181 struct iwn_tx_desc *desc;
3182 struct iwn_tx_data *data;
3183 struct iwn_tx_cmd *cmd;
3184 struct mbuf *m;
3185 bus_addr_t paddr;
3186 int totlen, error;
3187
3188 desc = &ring->desc[ring->cur];
3189 data = &ring->data[ring->cur];
3190 totlen = 4 + size;
3191
3192 if (size > sizeof cmd->data) {
3193 /* Command is too large to fit in a descriptor. */
3194 if (totlen > MCLBYTES)
3195 return EINVAL;
3196 MGETHDR(m, M_DONTWAIT, MT_DATA);
3197 if (m == NULL)
3198 return ENOMEM;
3199 if (totlen > MHLEN) {
3200 MCLGET(m, M_DONTWAIT);
3201 if (!(m->m_flags & M_EXT)) {
3202 m_freem(m);
3203 return ENOMEM;
3204 }
3205 }
3206 cmd = mtod(m, struct iwn_tx_cmd *);
3207 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen,
3208 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3209 if (error != 0) {
3210 m_freem(m);
3211 return error;
3212 }
3213 data->m = m;
3214 paddr = data->map->dm_segs[0].ds_addr;
3215 } else {
3216 cmd = &ring->cmd[ring->cur];
3217 paddr = data->cmd_paddr;
3218 }
3219
3220 cmd->code = code;
3221 cmd->flags = 0;
3222 cmd->qid = ring->qid;
3223 cmd->idx = ring->cur;
3224 memcpy(cmd->data, buf, size);
3225
3226 desc->nsegs = 1;
3227 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3228 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
3229
3230 if (size > sizeof cmd->data) {
3231 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen,
3232 BUS_DMASYNC_PREWRITE);
3233 } else {
3234 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3235 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3236 totlen, BUS_DMASYNC_PREWRITE);
3237 }
3238 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3239 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3240 sizeof (*desc), BUS_DMASYNC_PREWRITE);
3241
3242 #ifdef notyet
3243 /* Update TX scheduler. */
3244 ops->update_sched(sc, ring->qid, ring->cur, 0, 0);
3245 #endif
3246 DPRINTFN(4, ("iwn_cmd %d size=%d %s\n", code, size, async ? " (async)" : ""));
3247
3248 /* Kick command ring. */
3249 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3250 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3251
3252 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz);
3253 }
3254
3255 static int
3256 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3257 {
3258 struct iwn4965_node_info hnode;
3259 char *src, *dst;
3260
3261 /*
3262 * We use the node structure for 5000 Series internally (it is
3263 * a superset of the one for 4965AGN). We thus copy the common
3264 * fields before sending the command.
3265 */
3266 src = (char *)node;
3267 dst = (char *)&hnode;
3268 memcpy(dst, src, 48);
3269 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3270 memcpy(dst + 48, src + 72, 20);
3271 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3272 }
3273
3274 static int
3275 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3276 {
3277 /* Direct mapping. */
3278 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3279 }
3280
3281 static int
3282 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
3283 {
3284 struct iwn_node *wn = (void *)ni;
3285 struct ieee80211_rateset *rs = &ni->ni_rates;
3286 struct iwn_cmd_link_quality linkq;
3287 const struct iwn_rate *rinfo;
3288 uint8_t txant;
3289 int i, txrate;
3290
3291 /* Use the first valid TX antenna. */
3292 txant = IWN_LSB(sc->txchainmask);
3293
3294 memset(&linkq, 0, sizeof linkq);
3295 linkq.id = wn->id;
3296 linkq.antmsk_1stream = txant;
3297 linkq.antmsk_2stream = IWN_ANT_AB;
3298 linkq.ampdu_max = 31;
3299 linkq.ampdu_threshold = 3;
3300 linkq.ampdu_limit = htole16(4000); /* 4ms */
3301
3302 /* Start at highest available bit-rate. */
3303 txrate = rs->rs_nrates - 1;
3304 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3305 rinfo = &iwn_rates[wn->ridx[txrate]];
3306 linkq.retry[i].plcp = rinfo->plcp;
3307 linkq.retry[i].rflags = rinfo->flags;
3308 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3309 /* Next retry at immediate lower bit-rate. */
3310 if (txrate > 0)
3311 txrate--;
3312 }
3313 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
3314 }
3315
3316 /*
3317 * Broadcast node is used to send group-addressed and management frames.
3318 */
3319 static int
3320 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3321 {
3322 struct iwn_ops *ops = &sc->ops;
3323 struct iwn_node_info node;
3324 struct iwn_cmd_link_quality linkq;
3325 const struct iwn_rate *rinfo;
3326 uint8_t txant;
3327 int i, error;
3328
3329 memset(&node, 0, sizeof node);
3330 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr);
3331 node.id = sc->broadcast_id;
3332 DPRINTF(("adding broadcast node\n"));
3333 if ((error = ops->add_node(sc, &node, async)) != 0)
3334 return error;
3335
3336 /* Use the first valid TX antenna. */
3337 txant = IWN_LSB(sc->txchainmask);
3338
3339 memset(&linkq, 0, sizeof linkq);
3340 linkq.id = sc->broadcast_id;
3341 linkq.antmsk_1stream = txant;
3342 linkq.antmsk_2stream = IWN_ANT_AB;
3343 linkq.ampdu_max = 64;
3344 linkq.ampdu_threshold = 3;
3345 linkq.ampdu_limit = htole16(4000); /* 4ms */
3346
3347 /* Use lowest mandatory bit-rate. */
3348 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ?
3349 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6];
3350 linkq.retry[0].plcp = rinfo->plcp;
3351 linkq.retry[0].rflags = rinfo->flags;
3352 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
3353 /* Use same bit-rate for all TX retries. */
3354 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
3355 linkq.retry[i].plcp = linkq.retry[0].plcp;
3356 linkq.retry[i].rflags = linkq.retry[0].rflags;
3357 }
3358 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3359 }
3360
3361 static void
3362 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3363 {
3364 struct iwn_cmd_led led;
3365
3366 /* Clear microcode LED ownership. */
3367 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3368
3369 led.which = which;
3370 led.unit = htole32(10000); /* on/off in unit of 100ms */
3371 led.off = off;
3372 led.on = on;
3373 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3374 }
3375
3376 /*
3377 * Set the critical temperature at which the firmware will stop the radio
3378 * and notify us.
3379 */
3380 static int
3381 iwn_set_critical_temp(struct iwn_softc *sc)
3382 {
3383 struct iwn_critical_temp crit;
3384 int32_t temp;
3385
3386 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3387
3388 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3389 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3390 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3391 temp = IWN_CTOK(110);
3392 else
3393 temp = 110;
3394 memset(&crit, 0, sizeof crit);
3395 crit.tempR = htole32(temp);
3396 DPRINTF(("setting critical temperature to %d\n", temp));
3397 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3398 }
3399
3400 static int
3401 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3402 {
3403 struct iwn_cmd_timing cmd;
3404 uint64_t val, mod;
3405
3406 memset(&cmd, 0, sizeof cmd);
3407 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3408 cmd.bintval = htole16(ni->ni_intval);
3409 cmd.lintval = htole16(10);
3410
3411 /* Compute remaining time until next beacon. */
3412 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */
3413 mod = le64toh(cmd.tstamp) % val;
3414 cmd.binitval = htole32((uint32_t)(val - mod));
3415
3416 DPRINTF(("timing bintval=%u, tstamp=%" PRIu64 ", init=%" PRIu32 "\n",
3417 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)));
3418
3419 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3420 }
3421
3422 static void
3423 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3424 {
3425 /* Adjust TX power if need be (delta >= 3 degC). */
3426 DPRINTF(("temperature %d->%d\n", sc->temp, temp));
3427 if (abs(temp - sc->temp) >= 3) {
3428 /* Record temperature of last calibration. */
3429 sc->temp = temp;
3430 (void)iwn4965_set_txpower(sc, 1);
3431 }
3432 }
3433
3434 /*
3435 * Set TX power for current channel (each rate has its own power settings).
3436 * This function takes into account the regulatory information from EEPROM,
3437 * the current temperature and the current voltage.
3438 */
3439 static int
3440 iwn4965_set_txpower(struct iwn_softc *sc, int async)
3441 {
3442 /* Fixed-point arithmetic division using a n-bit fractional part. */
3443 #define fdivround(a, b, n) \
3444 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3445 /* Linear interpolation. */
3446 #define interpolate(x, x1, y1, x2, y2, n) \
3447 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3448
3449 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3450 struct ieee80211com *ic = &sc->sc_ic;
3451 struct iwn_ucode_info *uc = &sc->ucode_info;
3452 struct ieee80211_channel *ch;
3453 struct iwn4965_cmd_txpower cmd;
3454 struct iwn4965_eeprom_chan_samples *chans;
3455 const uint8_t *rf_gain, *dsp_gain;
3456 int32_t vdiff, tdiff;
3457 int i, c, grp, maxpwr;
3458 uint8_t chan;
3459
3460 /* Retrieve current channel from last RXON. */
3461 chan = sc->rxon.chan;
3462 DPRINTF(("setting TX power for channel %d\n", chan));
3463 ch = &ic->ic_channels[chan];
3464
3465 memset(&cmd, 0, sizeof cmd);
3466 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3467 cmd.chan = chan;
3468
3469 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3470 maxpwr = sc->maxpwr5GHz;
3471 rf_gain = iwn4965_rf_gain_5ghz;
3472 dsp_gain = iwn4965_dsp_gain_5ghz;
3473 } else {
3474 maxpwr = sc->maxpwr2GHz;
3475 rf_gain = iwn4965_rf_gain_2ghz;
3476 dsp_gain = iwn4965_dsp_gain_2ghz;
3477 }
3478
3479 /* Compute voltage compensation. */
3480 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
3481 if (vdiff > 0)
3482 vdiff *= 2;
3483 if (abs(vdiff) > 2)
3484 vdiff = 0;
3485 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3486 vdiff, le32toh(uc->volt), sc->eeprom_voltage));
3487
3488 /* Get channel attenuation group. */
3489 if (chan <= 20) /* 1-20 */
3490 grp = 4;
3491 else if (chan <= 43) /* 34-43 */
3492 grp = 0;
3493 else if (chan <= 70) /* 44-70 */
3494 grp = 1;
3495 else if (chan <= 124) /* 71-124 */
3496 grp = 2;
3497 else /* 125-200 */
3498 grp = 3;
3499 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp));
3500
3501 /* Get channel sub-band. */
3502 for (i = 0; i < IWN_NBANDS; i++)
3503 if (sc->bands[i].lo != 0 &&
3504 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3505 break;
3506 if (i == IWN_NBANDS) /* Can't happen in real-life. */
3507 return EINVAL;
3508 chans = sc->bands[i].chans;
3509 DPRINTF(("chan %d sub-band=%d\n", chan, i));
3510
3511 for (c = 0; c < 2; c++) {
3512 uint8_t power, gain, temp;
3513 int maxchpwr, pwr, ridx, idx;
3514
3515 power = interpolate(chan,
3516 chans[0].num, chans[0].samples[c][1].power,
3517 chans[1].num, chans[1].samples[c][1].power, 1);
3518 gain = interpolate(chan,
3519 chans[0].num, chans[0].samples[c][1].gain,
3520 chans[1].num, chans[1].samples[c][1].gain, 1);
3521 temp = interpolate(chan,
3522 chans[0].num, chans[0].samples[c][1].temp,
3523 chans[1].num, chans[1].samples[c][1].temp, 1);
3524 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n",
3525 c, power, gain, temp));
3526
3527 /* Compute temperature compensation. */
3528 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3529 DPRINTF(("temperature compensation=%d (current=%d, "
3530 "EEPROM=%d)\n", tdiff, sc->temp, temp));
3531
3532 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3533 /* Convert dBm to half-dBm. */
3534 maxchpwr = sc->maxpwr[chan] * 2;
3535 if ((ridx / 8) & 1)
3536 maxchpwr -= 6; /* MIMO 2T: -3dB */
3537
3538 pwr = maxpwr;
3539
3540 /* Adjust TX power based on rate. */
3541 if ((ridx % 8) == 5)
3542 pwr -= 15; /* OFDM48: -7.5dB */
3543 else if ((ridx % 8) == 6)
3544 pwr -= 17; /* OFDM54: -8.5dB */
3545 else if ((ridx % 8) == 7)
3546 pwr -= 20; /* OFDM60: -10dB */
3547 else
3548 pwr -= 10; /* Others: -5dB */
3549
3550 /* Do not exceed channel max TX power. */
3551 if (pwr > maxchpwr)
3552 pwr = maxchpwr;
3553
3554 idx = gain - (pwr - power) - tdiff - vdiff;
3555 if ((ridx / 8) & 1) /* MIMO */
3556 idx += (int32_t)le32toh(uc->atten[grp][c]);
3557
3558 if (cmd.band == 0)
3559 idx += 9; /* 5GHz */
3560 if (ridx == IWN_RIDX_MAX)
3561 idx += 5; /* CCK */
3562
3563 /* Make sure idx stays in a valid range. */
3564 if (idx < 0)
3565 idx = 0;
3566 else if (idx > IWN4965_MAX_PWR_INDEX)
3567 idx = IWN4965_MAX_PWR_INDEX;
3568
3569 DPRINTF(("TX chain %d, rate idx %d: power=%d\n",
3570 c, ridx, idx));
3571 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3572 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3573 }
3574 }
3575
3576 DPRINTF(("setting TX power for chan %d\n", chan));
3577 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3578
3579 #undef interpolate
3580 #undef fdivround
3581 }
3582
3583 static int
3584 iwn5000_set_txpower(struct iwn_softc *sc, int async)
3585 {
3586 struct iwn5000_cmd_txpower cmd;
3587
3588 /*
3589 * TX power calibration is handled automatically by the firmware
3590 * for 5000 Series.
3591 */
3592 memset(&cmd, 0, sizeof cmd);
3593 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
3594 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3595 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3596 DPRINTF(("setting TX power\n"));
3597 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3598 }
3599
3600 /*
3601 * Retrieve the maximum RSSI (in dBm) among receivers.
3602 */
3603 static int
3604 iwn4965_get_rssi(const struct iwn_rx_stat *stat)
3605 {
3606 const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf;
3607 uint8_t mask, agc;
3608 int rssi;
3609
3610 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
3611 agc = (le16toh(phy->agc) >> 7) & 0x7f;
3612
3613 rssi = 0;
3614 if (mask & IWN_ANT_A)
3615 rssi = MAX(rssi, phy->rssi[0]);
3616 if (mask & IWN_ANT_B)
3617 rssi = MAX(rssi, phy->rssi[2]);
3618 if (mask & IWN_ANT_C)
3619 rssi = MAX(rssi, phy->rssi[4]);
3620
3621 return rssi - agc - IWN_RSSI_TO_DBM;
3622 }
3623
3624 static int
3625 iwn5000_get_rssi(const struct iwn_rx_stat *stat)
3626 {
3627 const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf;
3628 uint8_t agc;
3629 int rssi;
3630
3631 agc = (le32toh(phy->agc) >> 9) & 0x7f;
3632
3633 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
3634 le16toh(phy->rssi[1]) & 0xff);
3635 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
3636
3637 return rssi - agc - IWN_RSSI_TO_DBM;
3638 }
3639
3640 /*
3641 * Retrieve the average noise (in dBm) among receivers.
3642 */
3643 static int
3644 iwn_get_noise(const struct iwn_rx_general_stats *stats)
3645 {
3646 int i, total, nbant, noise;
3647
3648 total = nbant = 0;
3649 for (i = 0; i < 3; i++) {
3650 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
3651 continue;
3652 total += noise;
3653 nbant++;
3654 }
3655 /* There should be at least one antenna but check anyway. */
3656 return (nbant == 0) ? -127 : (total / nbant) - 107;
3657 }
3658
3659 /*
3660 * Compute temperature (in degC) from last received statistics.
3661 */
3662 static int
3663 iwn4965_get_temperature(struct iwn_softc *sc)
3664 {
3665 struct iwn_ucode_info *uc = &sc->ucode_info;
3666 int32_t r1, r2, r3, r4, temp;
3667
3668 r1 = le32toh(uc->temp[0].chan20MHz);
3669 r2 = le32toh(uc->temp[1].chan20MHz);
3670 r3 = le32toh(uc->temp[2].chan20MHz);
3671 r4 = le32toh(sc->rawtemp);
3672
3673 if (r1 == r3) /* Prevents division by 0 (should not happen). */
3674 return 0;
3675
3676 /* Sign-extend 23-bit R4 value to 32-bit. */
3677 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
3678 /* Compute temperature in Kelvin. */
3679 temp = (259 * (r4 - r2)) / (r3 - r1);
3680 temp = (temp * 97) / 100 + 8;
3681
3682 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp)));
3683 return IWN_KTOC(temp);
3684 }
3685
3686 static int
3687 iwn5000_get_temperature(struct iwn_softc *sc)
3688 {
3689 int32_t temp;
3690
3691 /*
3692 * Temperature is not used by the driver for 5000 Series because
3693 * TX power calibration is handled by firmware. We export it to
3694 * users through the sensor framework though.
3695 */
3696 temp = le32toh(sc->rawtemp);
3697 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
3698 temp = (temp / -5) + sc->temp_off;
3699 temp = IWN_KTOC(temp);
3700 }
3701 return temp;
3702 }
3703
3704 /*
3705 * Initialize sensitivity calibration state machine.
3706 */
3707 static int
3708 iwn_init_sensitivity(struct iwn_softc *sc)
3709 {
3710 struct iwn_ops *ops = &sc->ops;
3711 struct iwn_calib_state *calib = &sc->calib;
3712 uint32_t flags;
3713 int error;
3714
3715 /* Reset calibration state machine. */
3716 memset(calib, 0, sizeof (*calib));
3717 calib->state = IWN_CALIB_STATE_INIT;
3718 calib->cck_state = IWN_CCK_STATE_HIFA;
3719 /* Set initial correlation values. */
3720 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
3721 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
3722 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
3723 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
3724 calib->cck_x4 = 125;
3725 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
3726 calib->energy_cck = sc->limits->energy_cck;
3727
3728 /* Write initial sensitivity. */
3729 if ((error = iwn_send_sensitivity(sc)) != 0)
3730 return error;
3731
3732 /* Write initial gains. */
3733 if ((error = ops->init_gains(sc)) != 0)
3734 return error;
3735
3736 /* Request statistics at each beacon interval. */
3737 flags = 0;
3738 DPRINTF(("sending request for statistics\n"));
3739 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
3740 }
3741
3742 /*
3743 * Collect noise and RSSI statistics for the first 20 beacons received
3744 * after association and use them to determine connected antennas and
3745 * to set differential gains.
3746 */
3747 static void
3748 iwn_collect_noise(struct iwn_softc *sc,
3749 const struct iwn_rx_general_stats *stats)
3750 {
3751 struct iwn_ops *ops = &sc->ops;
3752 struct iwn_calib_state *calib = &sc->calib;
3753 uint32_t val;
3754 int i;
3755
3756 /* Accumulate RSSI and noise for all 3 antennas. */
3757 for (i = 0; i < 3; i++) {
3758 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
3759 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
3760 }
3761 /* NB: We update differential gains only once after 20 beacons. */
3762 if (++calib->nbeacons < 20)
3763 return;
3764
3765 /* Determine highest average RSSI. */
3766 val = MAX(calib->rssi[0], calib->rssi[1]);
3767 val = MAX(calib->rssi[2], val);
3768
3769 /* Determine which antennas are connected. */
3770 sc->chainmask = sc->rxchainmask;
3771 for (i = 0; i < 3; i++)
3772 if (val - calib->rssi[i] > 15 * 20)
3773 sc->chainmask &= ~(1 << i);
3774 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n",
3775 sc->rxchainmask, sc->chainmask));
3776
3777 /* If none of the TX antennas are connected, keep at least one. */
3778 if ((sc->chainmask & sc->txchainmask) == 0)
3779 sc->chainmask |= IWN_LSB(sc->txchainmask);
3780
3781 (void)ops->set_gains(sc);
3782 calib->state = IWN_CALIB_STATE_RUN;
3783
3784 #ifdef notyet
3785 /* XXX Disable RX chains with no antennas connected. */
3786 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
3787 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
3788 #endif
3789
3790 /* Enable power-saving mode if requested by user. */
3791 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
3792 (void)iwn_set_pslevel(sc, 0, 3, 1);
3793 }
3794
3795 static int
3796 iwn4965_init_gains(struct iwn_softc *sc)
3797 {
3798 struct iwn_phy_calib_gain cmd;
3799
3800 memset(&cmd, 0, sizeof cmd);
3801 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
3802 /* Differential gains initially set to 0 for all 3 antennas. */
3803 DPRINTF(("setting initial differential gains\n"));
3804 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3805 }
3806
3807 static int
3808 iwn5000_init_gains(struct iwn_softc *sc)
3809 {
3810 struct iwn_phy_calib cmd;
3811
3812 memset(&cmd, 0, sizeof cmd);
3813 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
3814 cmd.ngroups = 1;
3815 cmd.isvalid = 1;
3816 DPRINTF(("setting initial differential gains\n"));
3817 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3818 }
3819
3820 static int
3821 iwn4965_set_gains(struct iwn_softc *sc)
3822 {
3823 struct iwn_calib_state *calib = &sc->calib;
3824 struct iwn_phy_calib_gain cmd;
3825 int i, delta, noise;
3826
3827 /* Get minimal noise among connected antennas. */
3828 noise = INT_MAX; /* NB: There's at least one antenna. */
3829 for (i = 0; i < 3; i++)
3830 if (sc->chainmask & (1 << i))
3831 noise = MIN(calib->noise[i], noise);
3832
3833 memset(&cmd, 0, sizeof cmd);
3834 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
3835 /* Set differential gains for connected antennas. */
3836 for (i = 0; i < 3; i++) {
3837 if (sc->chainmask & (1 << i)) {
3838 /* Compute attenuation (in unit of 1.5dB). */
3839 delta = (noise - (int32_t)calib->noise[i]) / 30;
3840 /* NB: delta <= 0 */
3841 /* Limit to [-4.5dB,0]. */
3842 cmd.gain[i] = MIN(abs(delta), 3);
3843 if (delta < 0)
3844 cmd.gain[i] |= 1 << 2; /* sign bit */
3845 }
3846 }
3847 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
3848 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask));
3849 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3850 }
3851
3852 static int
3853 iwn5000_set_gains(struct iwn_softc *sc)
3854 {
3855 struct iwn_calib_state *calib = &sc->calib;
3856 struct iwn_phy_calib_gain cmd;
3857 int i, ant, div, delta;
3858
3859 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
3860 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
3861
3862 memset(&cmd, 0, sizeof cmd);
3863 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN;
3864 cmd.ngroups = 1;
3865 cmd.isvalid = 1;
3866 /* Get first available RX antenna as referential. */
3867 ant = IWN_LSB(sc->rxchainmask);
3868 /* Set differential gains for other antennas. */
3869 for (i = ant + 1; i < 3; i++) {
3870 if (sc->chainmask & (1 << i)) {
3871 /* The delta is relative to antenna "ant". */
3872 delta = ((int32_t)calib->noise[ant] -
3873 (int32_t)calib->noise[i]) / div;
3874 /* Limit to [-4.5dB,+4.5dB]. */
3875 cmd.gain[i - 1] = MIN(abs(delta), 3);
3876 if (delta < 0)
3877 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
3878 }
3879 }
3880 DPRINTF(("setting differential gains: %x/%x (%x)\n",
3881 cmd.gain[0], cmd.gain[1], sc->chainmask));
3882 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3883 }
3884
3885 /*
3886 * Tune RF RX sensitivity based on the number of false alarms detected
3887 * during the last beacon period.
3888 */
3889 static void
3890 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
3891 {
3892 #define inc(val, inc, max) \
3893 if ((val) < (max)) { \
3894 if ((val) < (max) - (inc)) \
3895 (val) += (inc); \
3896 else \
3897 (val) = (max); \
3898 needs_update = 1; \
3899 }
3900 #define dec(val, dec, min) \
3901 if ((val) > (min)) { \
3902 if ((val) > (min) + (dec)) \
3903 (val) -= (dec); \
3904 else \
3905 (val) = (min); \
3906 needs_update = 1; \
3907 }
3908
3909 const struct iwn_sensitivity_limits *limits = sc->limits;
3910 struct iwn_calib_state *calib = &sc->calib;
3911 uint32_t val, rxena, fa;
3912 uint32_t energy[3], energy_min;
3913 uint8_t noise[3], noise_ref;
3914 int i, needs_update = 0;
3915
3916 /* Check that we've been enabled long enough. */
3917 if ((rxena = le32toh(stats->general.load)) == 0)
3918 return;
3919
3920 /* Compute number of false alarms since last call for OFDM. */
3921 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
3922 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
3923 fa *= 200 * 1024; /* 200TU */
3924
3925 /* Save counters values for next call. */
3926 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
3927 calib->fa_ofdm = le32toh(stats->ofdm.fa);
3928
3929 if (fa > 50 * rxena) {
3930 /* High false alarm count, decrease sensitivity. */
3931 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa));
3932 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
3933 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
3934 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
3935 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
3936
3937 } else if (fa < 5 * rxena) {
3938 /* Low false alarm count, increase sensitivity. */
3939 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa));
3940 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
3941 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
3942 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
3943 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
3944 }
3945
3946 /* Compute maximum noise among 3 receivers. */
3947 for (i = 0; i < 3; i++)
3948 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
3949 val = MAX(noise[0], noise[1]);
3950 val = MAX(noise[2], val);
3951 /* Insert it into our samples table. */
3952 calib->noise_samples[calib->cur_noise_sample] = val;
3953 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
3954
3955 /* Compute maximum noise among last 20 samples. */
3956 noise_ref = calib->noise_samples[0];
3957 for (i = 1; i < 20; i++)
3958 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
3959
3960 /* Compute maximum energy among 3 receivers. */
3961 for (i = 0; i < 3; i++)
3962 energy[i] = le32toh(stats->general.energy[i]);
3963 val = MIN(energy[0], energy[1]);
3964 val = MIN(energy[2], val);
3965 /* Insert it into our samples table. */
3966 calib->energy_samples[calib->cur_energy_sample] = val;
3967 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
3968
3969 /* Compute minimum energy among last 10 samples. */
3970 energy_min = calib->energy_samples[0];
3971 for (i = 1; i < 10; i++)
3972 energy_min = MAX(energy_min, calib->energy_samples[i]);
3973 energy_min += 6;
3974
3975 /* Compute number of false alarms since last call for CCK. */
3976 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
3977 fa += le32toh(stats->cck.fa) - calib->fa_cck;
3978 fa *= 200 * 1024; /* 200TU */
3979
3980 /* Save counters values for next call. */
3981 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
3982 calib->fa_cck = le32toh(stats->cck.fa);
3983
3984 if (fa > 50 * rxena) {
3985 /* High false alarm count, decrease sensitivity. */
3986 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa));
3987 calib->cck_state = IWN_CCK_STATE_HIFA;
3988 calib->low_fa = 0;
3989
3990 if (calib->cck_x4 > 160) {
3991 calib->noise_ref = noise_ref;
3992 if (calib->energy_cck > 2)
3993 dec(calib->energy_cck, 2, energy_min);
3994 }
3995 if (calib->cck_x4 < 160) {
3996 calib->cck_x4 = 161;
3997 needs_update = 1;
3998 } else
3999 inc(calib->cck_x4, 3, limits->max_cck_x4);
4000
4001 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4002
4003 } else if (fa < 5 * rxena) {
4004 /* Low false alarm count, increase sensitivity. */
4005 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa));
4006 calib->cck_state = IWN_CCK_STATE_LOFA;
4007 calib->low_fa++;
4008
4009 if (calib->cck_state != IWN_CCK_STATE_INIT &&
4010 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4011 calib->low_fa > 100)) {
4012 inc(calib->energy_cck, 2, limits->min_energy_cck);
4013 dec(calib->cck_x4, 3, limits->min_cck_x4);
4014 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4015 }
4016 } else {
4017 /* Not worth to increase or decrease sensitivity. */
4018 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa));
4019 calib->low_fa = 0;
4020 calib->noise_ref = noise_ref;
4021
4022 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4023 /* Previous interval had many false alarms. */
4024 dec(calib->energy_cck, 8, energy_min);
4025 }
4026 calib->cck_state = IWN_CCK_STATE_INIT;
4027 }
4028
4029 if (needs_update)
4030 (void)iwn_send_sensitivity(sc);
4031 #undef dec
4032 #undef inc
4033 }
4034
4035 static int
4036 iwn_send_sensitivity(struct iwn_softc *sc)
4037 {
4038 struct iwn_calib_state *calib = &sc->calib;
4039 struct iwn_sensitivity_cmd cmd;
4040
4041 memset(&cmd, 0, sizeof cmd);
4042 cmd.which = IWN_SENSITIVITY_WORKTBL;
4043 /* OFDM modulation. */
4044 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
4045 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4046 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
4047 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4048 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
4049 cmd.energy_ofdm_th = htole16(62);
4050 /* CCK modulation. */
4051 cmd.corr_cck_x4 = htole16(calib->cck_x4);
4052 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
4053 cmd.energy_cck = htole16(calib->energy_cck);
4054 /* Barker modulation: use default values. */
4055 cmd.corr_barker = htole16(190);
4056 cmd.corr_barker_mrc = htole16(390);
4057
4058 DPRINTFN(2, ("setting sensitivity %d/%d/%d/%d/%d/%d/%d\n",
4059 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4060 calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4,
4061 calib->energy_cck));
4062 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1);
4063 }
4064
4065 /*
4066 * Set STA mode power saving level (between 0 and 5).
4067 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4068 */
4069 static int
4070 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4071 {
4072 struct iwn_pmgt_cmd cmd;
4073 const struct iwn_pmgt *pmgt;
4074 uint32_t maxp, skip_dtim;
4075 pcireg_t reg;
4076 int i;
4077
4078 /* Select which PS parameters to use. */
4079 if (dtim <= 2)
4080 pmgt = &iwn_pmgt[0][level];
4081 else if (dtim <= 10)
4082 pmgt = &iwn_pmgt[1][level];
4083 else
4084 pmgt = &iwn_pmgt[2][level];
4085
4086 memset(&cmd, 0, sizeof cmd);
4087 if (level != 0) /* not CAM */
4088 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4089 if (level == 5)
4090 cmd.flags |= htole16(IWN_PS_FAST_PD);
4091 /* Retrieve PCIe Active State Power Management (ASPM). */
4092 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
4093 sc->sc_cap_off + PCIE_LCSR);
4094 if (!(reg & PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */
4095 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4096 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4097 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4098
4099 if (dtim == 0) {
4100 dtim = 1;
4101 skip_dtim = 0;
4102 } else
4103 skip_dtim = pmgt->skip_dtim;
4104 if (skip_dtim != 0) {
4105 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4106 maxp = pmgt->intval[4];
4107 if (maxp == (uint32_t)-1)
4108 maxp = dtim * (skip_dtim + 1);
4109 else if (maxp > dtim)
4110 maxp = (maxp / dtim) * dtim;
4111 } else
4112 maxp = dtim;
4113 for (i = 0; i < 5; i++)
4114 cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i]));
4115
4116 DPRINTF(("setting power saving level to %d\n", level));
4117 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4118 }
4119
4120 int
4121 iwn5000_runtime_calib(struct iwn_softc *sc)
4122 {
4123 struct iwn5000_calib_config cmd;
4124
4125 memset(&cmd, 0, sizeof cmd);
4126 cmd.ucode.once.enable = 0xffffffff;
4127 cmd.ucode.once.start = IWN5000_CALIB_DC;
4128 DPRINTF(("configuring runtime calibration\n"));
4129 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
4130 }
4131
4132 static int
4133 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc)
4134 {
4135 struct iwn_bluetooth bluetooth;
4136
4137 memset(&bluetooth, 0, sizeof bluetooth);
4138 bluetooth.flags = IWN_BT_COEX_ENABLE;
4139 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF;
4140 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF;
4141
4142 DPRINTF(("configuring bluetooth coexistence\n"));
4143 return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0);
4144 }
4145
4146 static int
4147 iwn_config_bt_coex_prio_table(struct iwn_softc *sc)
4148 {
4149 uint8_t prio_table[16];
4150
4151 memset(&prio_table, 0, sizeof prio_table);
4152 prio_table[ 0] = 6; /* init calibration 1 */
4153 prio_table[ 1] = 7; /* init calibration 2 */
4154 prio_table[ 2] = 2; /* periodic calib low 1 */
4155 prio_table[ 3] = 3; /* periodic calib low 2 */
4156 prio_table[ 4] = 4; /* periodic calib high 1 */
4157 prio_table[ 5] = 5; /* periodic calib high 2 */
4158 prio_table[ 6] = 6; /* dtim */
4159 prio_table[ 7] = 8; /* scan52 */
4160 prio_table[ 8] = 10; /* scan24 */
4161
4162 DPRINTF(("sending priority lookup table\n"));
4163 return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE,
4164 &prio_table, sizeof prio_table, 0);
4165 }
4166
4167 static int
4168 iwn_config_bt_coex_adv1(struct iwn_softc *sc)
4169 {
4170 int error;
4171 struct iwn_bt_adv1 d;
4172
4173 memset(&d, 0, sizeof d);
4174 d.basic.bt.flags = IWN_BT_COEX_ENABLE;
4175 d.basic.bt.lead_time = IWN_BT_LEAD_TIME_DEF;
4176 d.basic.bt.max_kill = IWN_BT_MAX_KILL_DEF;
4177 d.basic.bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF;
4178 d.basic.bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF;
4179 d.basic.bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF;
4180 d.basic.bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF;
4181 d.basic.bt3_timer_t2_value = IWN_BT_BT3_T2_DEF;
4182 d.basic.bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */
4183 d.basic.bt3_lookup_table[ 1] = htole32(0xaaaaaaaa);
4184 d.basic.bt3_lookup_table[ 2] = htole32(0xaeaaaaaa);
4185 d.basic.bt3_lookup_table[ 3] = htole32(0xaaaaaaaa);
4186 d.basic.bt3_lookup_table[ 4] = htole32(0xcc00ff28);
4187 d.basic.bt3_lookup_table[ 5] = htole32(0x0000aaaa);
4188 d.basic.bt3_lookup_table[ 6] = htole32(0xcc00aaaa);
4189 d.basic.bt3_lookup_table[ 7] = htole32(0x0000aaaa);
4190 d.basic.bt3_lookup_table[ 8] = htole32(0xc0004000);
4191 d.basic.bt3_lookup_table[ 9] = htole32(0x00004000);
4192 d.basic.bt3_lookup_table[10] = htole32(0xf0005000);
4193 d.basic.bt3_lookup_table[11] = htole32(0xf0005000);
4194 d.basic.reduce_txpower = 0; /* as not implemented */
4195 d.basic.valid = IWN_BT_ALL_VALID_MASK;
4196 d.prio_boost = IWN_BT_PRIO_BOOST_DEF;
4197 d.tx_prio_boost = 0;
4198 d.rx_prio_boost = 0;
4199
4200 DPRINTF(("configuring advanced bluetooth coexistence v1\n"));
4201 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &d, sizeof d, 0);
4202 if (error != 0) {
4203 aprint_error_dev(sc->sc_dev,
4204 "could not configure advanced bluetooth coexistence\n");
4205 return error;
4206 }
4207
4208 error = iwn_config_bt_coex_prio_table(sc);
4209 if (error != 0) {
4210 aprint_error_dev(sc->sc_dev,
4211 "could not configure send BT priority table\n");
4212 return error;
4213 }
4214
4215 return error;
4216 }
4217
4218 static int
4219 iwn_config(struct iwn_softc *sc)
4220 {
4221 struct iwn_ops *ops = &sc->ops;
4222 struct ieee80211com *ic = &sc->sc_ic;
4223 struct ifnet *ifp = ic->ic_ifp;
4224 uint32_t txmask;
4225 uint16_t rxchain;
4226 int error;
4227
4228 error = ops->config_bt_coex(sc);
4229 if (error != 0) {
4230 aprint_error_dev(sc->sc_dev,
4231 "could not configure bluetooth coexistence\n");
4232 return error;
4233 }
4234
4235 if (sc->hw_type == IWN_HW_REV_TYPE_6050 ||
4236 sc->hw_type == IWN_HW_REV_TYPE_6005) {
4237 /* Configure runtime DC calibration. */
4238 error = iwn5000_runtime_calib(sc);
4239 if (error != 0) {
4240 aprint_error_dev(sc->sc_dev,
4241 "could not configure runtime calibration\n");
4242 return error;
4243 }
4244 }
4245
4246 /* Configure valid TX chains for 5000 Series. */
4247 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4248 txmask = htole32(sc->txchainmask);
4249 DPRINTF(("configuring valid TX chains 0x%x\n", txmask));
4250 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4251 sizeof txmask, 0);
4252 if (error != 0) {
4253 aprint_error_dev(sc->sc_dev,
4254 "could not configure valid TX chains\n");
4255 return error;
4256 }
4257 }
4258
4259 /* Set mode, channel, RX filter and enable RX. */
4260 memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4261 IEEE80211_ADDR_COPY(ic->ic_myaddr, CLLADDR(ifp->if_sadl));
4262 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr);
4263 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr);
4264 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4265 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4266 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan))
4267 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4268 switch (ic->ic_opmode) {
4269 case IEEE80211_M_STA:
4270 sc->rxon.mode = IWN_MODE_STA;
4271 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4272 break;
4273 case IEEE80211_M_MONITOR:
4274 sc->rxon.mode = IWN_MODE_MONITOR;
4275 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4276 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4277 break;
4278 default:
4279 /* Should not get there. */
4280 break;
4281 }
4282 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
4283 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
4284 sc->rxon.ht_single_mask = 0xff;
4285 sc->rxon.ht_dual_mask = 0xff;
4286 sc->rxon.ht_triple_mask = 0xff;
4287 rxchain =
4288 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4289 IWN_RXCHAIN_MIMO_COUNT(2) |
4290 IWN_RXCHAIN_IDLE_COUNT(2);
4291 sc->rxon.rxchain = htole16(rxchain);
4292 DPRINTF(("setting configuration\n"));
4293 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
4294 if (error != 0) {
4295 aprint_error_dev(sc->sc_dev,
4296 "RXON command failed\n");
4297 return error;
4298 }
4299
4300 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
4301 aprint_error_dev(sc->sc_dev,
4302 "could not add broadcast node\n");
4303 return error;
4304 }
4305
4306 /* Configuration has changed, set TX power accordingly. */
4307 if ((error = ops->set_txpower(sc, 0)) != 0) {
4308 aprint_error_dev(sc->sc_dev,
4309 "could not set TX power\n");
4310 return error;
4311 }
4312
4313 if ((error = iwn_set_critical_temp(sc)) != 0) {
4314 aprint_error_dev(sc->sc_dev,
4315 "could not set critical temperature\n");
4316 return error;
4317 }
4318
4319 /* Set power saving level to CAM during initialization. */
4320 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
4321 aprint_error_dev(sc->sc_dev,
4322 "could not set power saving level\n");
4323 return error;
4324 }
4325 return 0;
4326 }
4327
4328 static int
4329 iwn_scan(struct iwn_softc *sc, uint16_t flags)
4330 {
4331 struct ieee80211com *ic = &sc->sc_ic;
4332 struct iwn_scan_hdr *hdr;
4333 struct iwn_cmd_data *tx;
4334 struct iwn_scan_essid *essid;
4335 struct iwn_scan_chan *chan;
4336 struct ieee80211_frame *wh;
4337 struct ieee80211_rateset *rs;
4338 struct ieee80211_channel *c;
4339 uint8_t *buf, *frm;
4340 uint16_t rxchain;
4341 uint8_t txant;
4342 int buflen, error;
4343
4344 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4345 if (buf == NULL) {
4346 aprint_error_dev(sc->sc_dev,
4347 "could not allocate buffer for scan command\n");
4348 return ENOMEM;
4349 }
4350 hdr = (struct iwn_scan_hdr *)buf;
4351 /*
4352 * Move to the next channel if no frames are received within 10ms
4353 * after sending the probe request.
4354 */
4355 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
4356 hdr->quiet_threshold = htole16(1); /* min # of packets */
4357
4358 /* Select antennas for scanning. */
4359 rxchain =
4360 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4361 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4362 IWN_RXCHAIN_DRIVER_FORCE;
4363 if ((flags & IEEE80211_CHAN_5GHZ) &&
4364 sc->hw_type == IWN_HW_REV_TYPE_4965) {
4365 /* Ant A must be avoided in 5GHz because of an HW bug. */
4366 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
4367 } else /* Use all available RX antennas. */
4368 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4369 hdr->rxchain = htole16(rxchain);
4370 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4371
4372 tx = (struct iwn_cmd_data *)(hdr + 1);
4373 tx->flags = htole32(IWN_TX_AUTO_SEQ);
4374 tx->id = sc->broadcast_id;
4375 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4376
4377 if (flags & IEEE80211_CHAN_5GHZ) {
4378 hdr->crc_threshold = 0xffff;
4379 /* Send probe requests at 6Mbps. */
4380 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4381 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4382 } else {
4383 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4384 /* Send probe requests at 1Mbps. */
4385 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4386 tx->rflags = IWN_RFLAG_CCK;
4387 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4388 }
4389 /* Use the first valid TX antenna. */
4390 txant = IWN_LSB(sc->txchainmask);
4391 tx->rflags |= IWN_RFLAG_ANT(txant);
4392
4393 essid = (struct iwn_scan_essid *)(tx + 1);
4394 if (ic->ic_des_esslen != 0) {
4395 essid[0].id = IEEE80211_ELEMID_SSID;
4396 essid[0].len = ic->ic_des_esslen;
4397 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen);
4398 }
4399 /*
4400 * Build a probe request frame. Most of the following code is a
4401 * copy & paste of what is done in net80211.
4402 */
4403 wh = (struct ieee80211_frame *)(essid + 20);
4404 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4405 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4406 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4407 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4408 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4409 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4410 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4411 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4412
4413 frm = (uint8_t *)(wh + 1);
4414 frm = ieee80211_add_ssid(frm, NULL, 0);
4415 frm = ieee80211_add_rates(frm, rs);
4416 #ifndef IEEE80211_NO_HT
4417 if (ic->ic_flags & IEEE80211_F_HTON)
4418 frm = ieee80211_add_htcaps(frm, ic);
4419 #endif
4420 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4421 frm = ieee80211_add_xrates(frm, rs);
4422
4423 /* Set length of probe request. */
4424 tx->len = htole16(frm - (uint8_t *)wh);
4425
4426 chan = (struct iwn_scan_chan *)frm;
4427 for (c = &ic->ic_channels[1];
4428 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) {
4429 if ((c->ic_flags & flags) != flags)
4430 continue;
4431
4432 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4433 DPRINTFN(2, ("adding channel %d\n", chan->chan));
4434 chan->flags = 0;
4435 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE))
4436 chan->flags |= htole32(IWN_CHAN_ACTIVE);
4437 if (ic->ic_des_esslen != 0)
4438 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
4439 chan->dsp_gain = 0x6e;
4440 if (IEEE80211_IS_CHAN_5GHZ(c)) {
4441 chan->rf_gain = 0x3b;
4442 chan->active = htole16(24);
4443 chan->passive = htole16(110);
4444 } else {
4445 chan->rf_gain = 0x28;
4446 chan->active = htole16(36);
4447 chan->passive = htole16(120);
4448 }
4449 hdr->nchan++;
4450 chan++;
4451 }
4452
4453 buflen = (uint8_t *)chan - buf;
4454 hdr->len = htole16(buflen);
4455
4456 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan));
4457 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
4458 free(buf, M_DEVBUF);
4459 return error;
4460 }
4461
4462 static int
4463 iwn_auth(struct iwn_softc *sc)
4464 {
4465 struct iwn_ops *ops = &sc->ops;
4466 struct ieee80211com *ic = &sc->sc_ic;
4467 struct ieee80211_node *ni = ic->ic_bss;
4468 int error;
4469
4470 /* Update adapter configuration. */
4471 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4472 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
4473 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4474 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4475 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4476 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4477 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4478 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4479 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4480 switch (ic->ic_curmode) {
4481 case IEEE80211_MODE_11A:
4482 sc->rxon.cck_mask = 0;
4483 sc->rxon.ofdm_mask = 0x15;
4484 break;
4485 case IEEE80211_MODE_11B:
4486 sc->rxon.cck_mask = 0x03;
4487 sc->rxon.ofdm_mask = 0;
4488 break;
4489 default: /* Assume 802.11b/g. */
4490 sc->rxon.cck_mask = 0x0f;
4491 sc->rxon.ofdm_mask = 0x15;
4492 }
4493 DPRINTF(("rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan,
4494 sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask));
4495 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4496 if (error != 0) {
4497 aprint_error_dev(sc->sc_dev,
4498 "RXON command failed\n");
4499 return error;
4500 }
4501
4502 /* Configuration has changed, set TX power accordingly. */
4503 if ((error = ops->set_txpower(sc, 1)) != 0) {
4504 aprint_error_dev(sc->sc_dev,
4505 "could not set TX power\n");
4506 return error;
4507 }
4508 /*
4509 * Reconfiguring RXON clears the firmware nodes table so we must
4510 * add the broadcast node again.
4511 */
4512 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
4513 aprint_error_dev(sc->sc_dev,
4514 "could not add broadcast node\n");
4515 return error;
4516 }
4517 return 0;
4518 }
4519
4520 static int
4521 iwn_run(struct iwn_softc *sc)
4522 {
4523 struct iwn_ops *ops = &sc->ops;
4524 struct ieee80211com *ic = &sc->sc_ic;
4525 struct ieee80211_node *ni = ic->ic_bss;
4526 struct iwn_node_info node;
4527 int error;
4528
4529 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4530 /* Link LED blinks while monitoring. */
4531 iwn_set_led(sc, IWN_LED_LINK, 5, 5);
4532 return 0;
4533 }
4534 if ((error = iwn_set_timing(sc, ni)) != 0) {
4535 aprint_error_dev(sc->sc_dev,
4536 "could not set timing\n");
4537 return error;
4538 }
4539
4540 /* Update adapter configuration. */
4541 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
4542 /* Short preamble and slot time are negotiated when associating. */
4543 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
4544 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4545 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4546 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4547 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4548 sc->rxon.filter |= htole32(IWN_FILTER_BSS);
4549 DPRINTF(("rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags));
4550 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4551 if (error != 0) {
4552 aprint_error_dev(sc->sc_dev,
4553 "could not update configuration\n");
4554 return error;
4555 }
4556
4557 /* Configuration has changed, set TX power accordingly. */
4558 if ((error = ops->set_txpower(sc, 1)) != 0) {
4559 aprint_error_dev(sc->sc_dev,
4560 "could not set TX power\n");
4561 return error;
4562 }
4563
4564 /* Fake a join to initialize the TX rate. */
4565 ((struct iwn_node *)ni)->id = IWN_ID_BSS;
4566 iwn_newassoc(ni, 1);
4567
4568 /* Add BSS node. */
4569 memset(&node, 0, sizeof node);
4570 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
4571 node.id = IWN_ID_BSS;
4572 #ifdef notyet
4573 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
4574 IWN_AMDPU_DENSITY(5)); /* 2us */
4575 #endif
4576 DPRINTF(("adding BSS node\n"));
4577 error = ops->add_node(sc, &node, 1);
4578 if (error != 0) {
4579 aprint_error_dev(sc->sc_dev,
4580 "could not add BSS node\n");
4581 return error;
4582 }
4583 DPRINTF(("setting link quality for node %d\n", node.id));
4584 if ((error = iwn_set_link_quality(sc, ni)) != 0) {
4585 aprint_error_dev(sc->sc_dev,
4586 "could not setup link quality for node %d\n", node.id);
4587 return error;
4588 }
4589
4590 if ((error = iwn_init_sensitivity(sc)) != 0) {
4591 aprint_error_dev(sc->sc_dev,
4592 "could not set sensitivity\n");
4593 return error;
4594 }
4595 /* Start periodic calibration timer. */
4596 sc->calib.state = IWN_CALIB_STATE_ASSOC;
4597 sc->calib_cnt = 0;
4598 callout_schedule(&sc->calib_to, hz/2);
4599
4600 /* Link LED always on while associated. */
4601 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
4602 return 0;
4603 }
4604
4605 #ifdef IWN_HWCRYPTO
4606 /*
4607 * We support CCMP hardware encryption/decryption of unicast frames only.
4608 * HW support for TKIP really sucks. We should let TKIP die anyway.
4609 */
4610 static int
4611 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
4612 struct ieee80211_key *k)
4613 {
4614 struct iwn_softc *sc = ic->ic_softc;
4615 struct iwn_ops *ops = &sc->ops;
4616 struct iwn_node *wn = (void *)ni;
4617 struct iwn_node_info node;
4618 uint16_t kflags;
4619
4620 if ((k->k_flags & IEEE80211_KEY_GROUP) ||
4621 k->k_cipher != IEEE80211_CIPHER_CCMP)
4622 return ieee80211_set_key(ic, ni, k);
4623
4624 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id);
4625 if (k->k_flags & IEEE80211_KEY_GROUP)
4626 kflags |= IWN_KFLAG_GROUP;
4627
4628 memset(&node, 0, sizeof node);
4629 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
4630 sc->broadcast_id : wn->id;
4631 node.control = IWN_NODE_UPDATE;
4632 node.flags = IWN_FLAG_SET_KEY;
4633 node.kflags = htole16(kflags);
4634 node.kid = k->k_id;
4635 memcpy(node.key, k->k_key, k->k_len);
4636 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id));
4637 return ops->add_node(sc, &node, 1);
4638 }
4639
4640 static void
4641 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
4642 struct ieee80211_key *k)
4643 {
4644 struct iwn_softc *sc = ic->ic_softc;
4645 struct iwn_ops *ops = &sc->ops;
4646 struct iwn_node *wn = (void *)ni;
4647 struct iwn_node_info node;
4648
4649 if ((k->k_flags & IEEE80211_KEY_GROUP) ||
4650 k->k_cipher != IEEE80211_CIPHER_CCMP) {
4651 /* See comment about other ciphers above. */
4652 ieee80211_delete_key(ic, ni, k);
4653 return;
4654 }
4655 if (ic->ic_state != IEEE80211_S_RUN)
4656 return; /* Nothing to do. */
4657 memset(&node, 0, sizeof node);
4658 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
4659 sc->broadcast_id : wn->id;
4660 node.control = IWN_NODE_UPDATE;
4661 node.flags = IWN_FLAG_SET_KEY;
4662 node.kflags = htole16(IWN_KFLAG_INVALID);
4663 node.kid = 0xff;
4664 DPRINTF(("delete keys for node %d\n", node.id));
4665 (void)ops->add_node(sc, &node, 1);
4666 }
4667 #endif
4668
4669 /* XXX Added for NetBSD (copied from rev 1.39). */
4670
4671 static int
4672 iwn_wme_update(struct ieee80211com *ic)
4673 {
4674 #define IWN_EXP2(v) htole16((1 << (v)) - 1)
4675 #define IWN_USEC(v) htole16(IEEE80211_TXOP_TO_US(v))
4676 struct iwn_softc *sc = ic->ic_ifp->if_softc;
4677 const struct wmeParams *wmep;
4678 struct iwn_edca_params cmd;
4679 int ac;
4680
4681 /* don't override default WME values if WME is not actually enabled */
4682 if (!(ic->ic_flags & IEEE80211_F_WME))
4683 return 0;
4684 cmd.flags = 0;
4685 for (ac = 0; ac < WME_NUM_AC; ac++) {
4686 wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4687 cmd.ac[ac].aifsn = wmep->wmep_aifsn;
4688 cmd.ac[ac].cwmin = IWN_EXP2(wmep->wmep_logcwmin);
4689 cmd.ac[ac].cwmax = IWN_EXP2(wmep->wmep_logcwmax);
4690 cmd.ac[ac].txoplimit = IWN_USEC(wmep->wmep_txopLimit);
4691
4692 DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
4693 "txop=%d\n", ac, cmd.ac[ac].aifsn,
4694 cmd.ac[ac].cwmin,
4695 cmd.ac[ac].cwmax, cmd.ac[ac].txoplimit));
4696 }
4697 return iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4698 #undef IWN_USEC
4699 #undef IWN_EXP2
4700 }
4701
4702 #ifndef IEEE80211_NO_HT
4703 /*
4704 * This function is called by upper layer when an ADDBA request is received
4705 * from another STA and before the ADDBA response is sent.
4706 */
4707 static int
4708 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
4709 uint8_t tid)
4710 {
4711 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
4712 struct iwn_softc *sc = ic->ic_softc;
4713 struct iwn_ops *ops = &sc->ops;
4714 struct iwn_node *wn = (void *)ni;
4715 struct iwn_node_info node;
4716
4717 memset(&node, 0, sizeof node);
4718 node.id = wn->id;
4719 node.control = IWN_NODE_UPDATE;
4720 node.flags = IWN_FLAG_SET_ADDBA;
4721 node.addba_tid = tid;
4722 node.addba_ssn = htole16(ba->ba_winstart);
4723 DPRINTFN(2, ("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid,
4724 ba->ba_winstart));
4725 return ops->add_node(sc, &node, 1);
4726 }
4727
4728 /*
4729 * This function is called by upper layer on teardown of an HT-immediate
4730 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
4731 */
4732 static void
4733 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
4734 uint8_t tid)
4735 {
4736 struct iwn_softc *sc = ic->ic_softc;
4737 struct iwn_ops *ops = &sc->ops;
4738 struct iwn_node *wn = (void *)ni;
4739 struct iwn_node_info node;
4740
4741 memset(&node, 0, sizeof node);
4742 node.id = wn->id;
4743 node.control = IWN_NODE_UPDATE;
4744 node.flags = IWN_FLAG_SET_DELBA;
4745 node.delba_tid = tid;
4746 DPRINTFN(2, ("DELBA RA=%d TID=%d\n", wn->id, tid));
4747 (void)ops->add_node(sc, &node, 1);
4748 }
4749
4750 /*
4751 * This function is called by upper layer when an ADDBA response is received
4752 * from another STA.
4753 */
4754 static int
4755 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
4756 uint8_t tid)
4757 {
4758 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
4759 struct iwn_softc *sc = ic->ic_softc;
4760 struct iwn_ops *ops = &sc->ops;
4761 struct iwn_node *wn = (void *)ni;
4762 struct iwn_node_info node;
4763 int error;
4764
4765 /* Enable TX for the specified RA/TID. */
4766 wn->disable_tid &= ~(1 << tid);
4767 memset(&node, 0, sizeof node);
4768 node.id = wn->id;
4769 node.control = IWN_NODE_UPDATE;
4770 node.flags = IWN_FLAG_SET_DISABLE_TID;
4771 node.disable_tid = htole16(wn->disable_tid);
4772 error = ops->add_node(sc, &node, 1);
4773 if (error != 0)
4774 return error;
4775
4776 if ((error = iwn_nic_lock(sc)) != 0)
4777 return error;
4778 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
4779 iwn_nic_unlock(sc);
4780 return 0;
4781 }
4782
4783 static void
4784 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
4785 uint8_t tid)
4786 {
4787 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
4788 struct iwn_softc *sc = ic->ic_softc;
4789 struct iwn_ops *ops = &sc->ops;
4790
4791 if (iwn_nic_lock(sc) != 0)
4792 return;
4793 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
4794 iwn_nic_unlock(sc);
4795 }
4796
4797 static void
4798 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
4799 uint8_t tid, uint16_t ssn)
4800 {
4801 struct iwn_node *wn = (void *)ni;
4802 int qid = 7 + tid;
4803
4804 /* Stop TX scheduler while we're changing its configuration. */
4805 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4806 IWN4965_TXQ_STATUS_CHGACT);
4807
4808 /* Assign RA/TID translation to the queue. */
4809 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
4810 wn->id << 4 | tid);
4811
4812 /* Enable chain-building mode for the queue. */
4813 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
4814
4815 /* Set starting sequence number from the ADDBA request. */
4816 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4817 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
4818
4819 /* Set scheduler window size. */
4820 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
4821 IWN_SCHED_WINSZ);
4822 /* Set scheduler frame limit. */
4823 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
4824 IWN_SCHED_LIMIT << 16);
4825
4826 /* Enable interrupts for the queue. */
4827 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
4828
4829 /* Mark the queue as active. */
4830 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4831 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
4832 iwn_tid2fifo[tid] << 1);
4833 }
4834
4835 static void
4836 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
4837 {
4838 int qid = 7 + tid;
4839
4840 /* Stop TX scheduler while we're changing its configuration. */
4841 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4842 IWN4965_TXQ_STATUS_CHGACT);
4843
4844 /* Set starting sequence number from the ADDBA request. */
4845 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4846 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
4847
4848 /* Disable interrupts for the queue. */
4849 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
4850
4851 /* Mark the queue as inactive. */
4852 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4853 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
4854 }
4855
4856 static void
4857 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
4858 uint8_t tid, uint16_t ssn)
4859 {
4860 struct iwn_node *wn = (void *)ni;
4861 int qid = 10 + tid;
4862
4863 /* Stop TX scheduler while we're changing its configuration. */
4864 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4865 IWN5000_TXQ_STATUS_CHGACT);
4866
4867 /* Assign RA/TID translation to the queue. */
4868 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
4869 wn->id << 4 | tid);
4870
4871 /* Enable chain-building mode for the queue. */
4872 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
4873
4874 /* Enable aggregation for the queue. */
4875 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
4876
4877 /* Set starting sequence number from the ADDBA request. */
4878 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4879 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
4880
4881 /* Set scheduler window size and frame limit. */
4882 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
4883 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
4884
4885 /* Enable interrupts for the queue. */
4886 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
4887
4888 /* Mark the queue as active. */
4889 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4890 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
4891 }
4892
4893 static void
4894 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
4895 {
4896 int qid = 10 + tid;
4897
4898 /* Stop TX scheduler while we're changing its configuration. */
4899 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4900 IWN5000_TXQ_STATUS_CHGACT);
4901
4902 /* Disable aggregation for the queue. */
4903 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
4904
4905 /* Set starting sequence number from the ADDBA request. */
4906 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4907 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
4908
4909 /* Disable interrupts for the queue. */
4910 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
4911
4912 /* Mark the queue as inactive. */
4913 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4914 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
4915 }
4916 #endif /* !IEEE80211_NO_HT */
4917
4918 /*
4919 * Query calibration tables from the initialization firmware. We do this
4920 * only once at first boot. Called from a process context.
4921 */
4922 static int
4923 iwn5000_query_calibration(struct iwn_softc *sc)
4924 {
4925 struct iwn5000_calib_config cmd;
4926 int error;
4927
4928 memset(&cmd, 0, sizeof cmd);
4929 cmd.ucode.once.enable = 0xffffffff;
4930 cmd.ucode.once.start = 0xffffffff;
4931 cmd.ucode.once.send = 0xffffffff;
4932 cmd.ucode.flags = 0xffffffff;
4933 DPRINTF(("sending calibration query\n"));
4934 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
4935 if (error != 0)
4936 return error;
4937
4938 /* Wait at most two seconds for calibration to complete. */
4939 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
4940 error = tsleep(sc, PCATCH, "iwncal", 2 * hz);
4941 return error;
4942 }
4943
4944 /*
4945 * Send calibration results to the runtime firmware. These results were
4946 * obtained on first boot from the initialization firmware.
4947 */
4948 static int
4949 iwn5000_send_calibration(struct iwn_softc *sc)
4950 {
4951 int idx, error;
4952
4953 for (idx = 0; idx < 5; idx++) {
4954 if (sc->calibcmd[idx].buf == NULL)
4955 continue; /* No results available. */
4956 DPRINTF(("send calibration result idx=%d len=%d\n",
4957 idx, sc->calibcmd[idx].len));
4958 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
4959 sc->calibcmd[idx].len, 0);
4960 if (error != 0) {
4961 aprint_error_dev(sc->sc_dev,
4962 "could not send calibration result\n");
4963 return error;
4964 }
4965 }
4966 return 0;
4967 }
4968
4969 static int
4970 iwn5000_send_wimax_coex(struct iwn_softc *sc)
4971 {
4972 struct iwn5000_wimax_coex wimax;
4973
4974 #ifdef notyet
4975 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
4976 /* Enable WiMAX coexistence for combo adapters. */
4977 wimax.flags =
4978 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
4979 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
4980 IWN_WIMAX_COEX_STA_TABLE_VALID |
4981 IWN_WIMAX_COEX_ENABLE;
4982 memcpy(wimax.events, iwn6050_wimax_events,
4983 sizeof iwn6050_wimax_events);
4984 } else
4985 #endif
4986 {
4987 /* Disable WiMAX coexistence. */
4988 wimax.flags = 0;
4989 memset(wimax.events, 0, sizeof wimax.events);
4990 }
4991 DPRINTF(("Configuring WiMAX coexistence\n"));
4992 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
4993 }
4994
4995 /*
4996 * This function is called after the runtime firmware notifies us of its
4997 * readiness (called in a process context).
4998 */
4999 static int
5000 iwn4965_post_alive(struct iwn_softc *sc)
5001 {
5002 int error, qid;
5003
5004 if ((error = iwn_nic_lock(sc)) != 0)
5005 return error;
5006
5007 /* Clear TX scheduler state in SRAM. */
5008 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5009 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5010 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5011
5012 /* Set physical address of TX scheduler rings (1KB aligned). */
5013 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5014
5015 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5016
5017 /* Disable chain mode for all our 16 queues. */
5018 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5019
5020 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5021 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5022 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5023
5024 /* Set scheduler window size. */
5025 iwn_mem_write(sc, sc->sched_base +
5026 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5027 /* Set scheduler frame limit. */
5028 iwn_mem_write(sc, sc->sched_base +
5029 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5030 IWN_SCHED_LIMIT << 16);
5031 }
5032
5033 /* Enable interrupts for all our 16 queues. */
5034 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5035 /* Identify TX FIFO rings (0-7). */
5036 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5037
5038 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5039 for (qid = 0; qid < 7; qid++) {
5040 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5041 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5042 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5043 }
5044 iwn_nic_unlock(sc);
5045 return 0;
5046 }
5047
5048 /*
5049 * This function is called after the initialization or runtime firmware
5050 * notifies us of its readiness (called in a process context).
5051 */
5052 static int
5053 iwn5000_post_alive(struct iwn_softc *sc)
5054 {
5055 int error, qid;
5056
5057 /* Switch to using ICT interrupt mode. */
5058 iwn5000_ict_reset(sc);
5059
5060 if ((error = iwn_nic_lock(sc)) != 0)
5061 return error;
5062
5063 /* Clear TX scheduler state in SRAM. */
5064 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5065 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5066 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5067
5068 /* Set physical address of TX scheduler rings (1KB aligned). */
5069 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5070
5071 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5072
5073 /* Enable chain mode for all queues, except command queue. */
5074 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5075 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5076
5077 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5078 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5079 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5080
5081 iwn_mem_write(sc, sc->sched_base +
5082 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5083 /* Set scheduler window size and frame limit. */
5084 iwn_mem_write(sc, sc->sched_base +
5085 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5086 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5087 }
5088
5089 /* Enable interrupts for all our 20 queues. */
5090 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5091 /* Identify TX FIFO rings (0-7). */
5092 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5093
5094 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5095 for (qid = 0; qid < 7; qid++) {
5096 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5097 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5098 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5099 }
5100 iwn_nic_unlock(sc);
5101
5102 /* Configure WiMAX coexistence for combo adapters. */
5103 error = iwn5000_send_wimax_coex(sc);
5104 if (error != 0) {
5105 aprint_error_dev(sc->sc_dev,
5106 "could not configure WiMAX coexistence\n");
5107 return error;
5108 }
5109 if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
5110 struct iwn5000_phy_calib_crystal cmd;
5111
5112 /* Perform crystal calibration. */
5113 memset(&cmd, 0, sizeof cmd);
5114 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5115 cmd.ngroups = 1;
5116 cmd.isvalid = 1;
5117 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
5118 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
5119 DPRINTF(("sending crystal calibration %d, %d\n",
5120 cmd.cap_pin[0], cmd.cap_pin[1]));
5121 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5122 if (error != 0) {
5123 aprint_error_dev(sc->sc_dev,
5124 "crystal calibration failed\n");
5125 return error;
5126 }
5127 }
5128 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5129 /* Query calibration from the initialization firmware. */
5130 if ((error = iwn5000_query_calibration(sc)) != 0) {
5131 aprint_error_dev(sc->sc_dev,
5132 "could not query calibration\n");
5133 return error;
5134 }
5135 /*
5136 * We have the calibration results now, reboot with the
5137 * runtime firmware (call ourselves recursively!)
5138 */
5139 iwn_hw_stop(sc);
5140 error = iwn_hw_init(sc);
5141 } else {
5142 /* Send calibration results to runtime firmware. */
5143 error = iwn5000_send_calibration(sc);
5144 }
5145 return error;
5146 }
5147
5148 /*
5149 * The firmware boot code is small and is intended to be copied directly into
5150 * the NIC internal memory (no DMA transfer).
5151 */
5152 static int
5153 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5154 {
5155 int error, ntries;
5156
5157 size /= sizeof (uint32_t);
5158
5159 if ((error = iwn_nic_lock(sc)) != 0)
5160 return error;
5161
5162 /* Copy microcode image into NIC memory. */
5163 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5164 (const uint32_t *)ucode, size);
5165
5166 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5167 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5168 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5169
5170 /* Start boot load now. */
5171 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5172
5173 /* Wait for transfer to complete. */
5174 for (ntries = 0; ntries < 1000; ntries++) {
5175 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5176 IWN_BSM_WR_CTRL_START))
5177 break;
5178 DELAY(10);
5179 }
5180 if (ntries == 1000) {
5181 aprint_error_dev(sc->sc_dev,
5182 "could not load boot firmware\n");
5183 iwn_nic_unlock(sc);
5184 return ETIMEDOUT;
5185 }
5186
5187 /* Enable boot after power up. */
5188 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5189
5190 iwn_nic_unlock(sc);
5191 return 0;
5192 }
5193
5194 static int
5195 iwn4965_load_firmware(struct iwn_softc *sc)
5196 {
5197 struct iwn_fw_info *fw = &sc->fw;
5198 struct iwn_dma_info *dma = &sc->fw_dma;
5199 int error;
5200
5201 /* Copy initialization sections into pre-allocated DMA-safe memory. */
5202 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5203 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz,
5204 BUS_DMASYNC_PREWRITE);
5205 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5206 fw->init.text, fw->init.textsz);
5207 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
5208 fw->init.textsz, BUS_DMASYNC_PREWRITE);
5209
5210 /* Tell adapter where to find initialization sections. */
5211 if ((error = iwn_nic_lock(sc)) != 0)
5212 return error;
5213 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5214 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5215 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5216 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5217 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5218 iwn_nic_unlock(sc);
5219
5220 /* Load firmware boot code. */
5221 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5222 if (error != 0) {
5223 aprint_error_dev(sc->sc_dev,
5224 "could not load boot firmware\n");
5225 return error;
5226 }
5227 /* Now press "execute". */
5228 IWN_WRITE(sc, IWN_RESET, 0);
5229
5230 /* Wait at most one second for first alive notification. */
5231 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) {
5232 aprint_error_dev(sc->sc_dev,
5233 "timeout waiting for adapter to initialize\n");
5234 return error;
5235 }
5236
5237 /* Retrieve current temperature for initial TX power calibration. */
5238 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5239 sc->temp = iwn4965_get_temperature(sc);
5240
5241 /* Copy runtime sections into pre-allocated DMA-safe memory. */
5242 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5243 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz,
5244 BUS_DMASYNC_PREWRITE);
5245 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5246 fw->main.text, fw->main.textsz);
5247 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
5248 fw->main.textsz, BUS_DMASYNC_PREWRITE);
5249
5250 /* Tell adapter where to find runtime sections. */
5251 if ((error = iwn_nic_lock(sc)) != 0)
5252 return error;
5253 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5254 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5255 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5256 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5257 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5258 IWN_FW_UPDATED | fw->main.textsz);
5259 iwn_nic_unlock(sc);
5260
5261 return 0;
5262 }
5263
5264 static int
5265 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5266 const uint8_t *section, int size)
5267 {
5268 struct iwn_dma_info *dma = &sc->fw_dma;
5269 int error;
5270
5271 /* Copy firmware section into pre-allocated DMA-safe memory. */
5272 memcpy(dma->vaddr, section, size);
5273 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
5274
5275 if ((error = iwn_nic_lock(sc)) != 0)
5276 return error;
5277
5278 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5279 IWN_FH_TX_CONFIG_DMA_PAUSE);
5280
5281 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5282 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5283 IWN_LOADDR(dma->paddr));
5284 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5285 IWN_HIADDR(dma->paddr) << 28 | size);
5286 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5287 IWN_FH_TXBUF_STATUS_TBNUM(1) |
5288 IWN_FH_TXBUF_STATUS_TBIDX(1) |
5289 IWN_FH_TXBUF_STATUS_TFBD_VALID);
5290
5291 /* Kick Flow Handler to start DMA transfer. */
5292 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5293 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5294
5295 iwn_nic_unlock(sc);
5296
5297 /* Wait at most five seconds for FH DMA transfer to complete. */
5298 return tsleep(sc, PCATCH, "iwninit", 5 * hz);
5299 }
5300
5301 static int
5302 iwn5000_load_firmware(struct iwn_softc *sc)
5303 {
5304 struct iwn_fw_part *fw;
5305 int error;
5306
5307 /* Load the initialization firmware on first boot only. */
5308 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5309 &sc->fw.main : &sc->fw.init;
5310
5311 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5312 fw->text, fw->textsz);
5313 if (error != 0) {
5314 aprint_error_dev(sc->sc_dev,
5315 "could not load firmware %s section\n", ".text");
5316 return error;
5317 }
5318 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5319 fw->data, fw->datasz);
5320 if (error != 0) {
5321 aprint_error_dev(sc->sc_dev,
5322 "could not load firmware %s section\n", ".data");
5323 return error;
5324 }
5325
5326 /* Now press "execute". */
5327 IWN_WRITE(sc, IWN_RESET, 0);
5328 return 0;
5329 }
5330
5331 /*
5332 * Extract text and data sections from a legacy firmware image.
5333 */
5334 static int
5335 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5336 {
5337 const uint32_t *ptr;
5338 size_t hdrlen = 24;
5339 uint32_t rev;
5340
5341 ptr = (const uint32_t *)fw->data;
5342 rev = le32toh(*ptr++);
5343
5344 /* Check firmware API version. */
5345 if (IWN_FW_API(rev) <= 1) {
5346 aprint_error_dev(sc->sc_dev,
5347 "bad firmware, need API version >=2\n");
5348 return EINVAL;
5349 }
5350 if (IWN_FW_API(rev) >= 3) {
5351 /* Skip build number (version 2 header). */
5352 hdrlen += 4;
5353 ptr++;
5354 }
5355 if (fw->size < hdrlen) {
5356 aprint_error_dev(sc->sc_dev,
5357 "firmware too short: %zd bytes\n", fw->size);
5358 return EINVAL;
5359 }
5360 fw->main.textsz = le32toh(*ptr++);
5361 fw->main.datasz = le32toh(*ptr++);
5362 fw->init.textsz = le32toh(*ptr++);
5363 fw->init.datasz = le32toh(*ptr++);
5364 fw->boot.textsz = le32toh(*ptr++);
5365
5366 /* Check that all firmware sections fit. */
5367 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
5368 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5369 aprint_error_dev(sc->sc_dev,
5370 "firmware too short: %zd bytes\n", fw->size);
5371 return EINVAL;
5372 }
5373
5374 /* Get pointers to firmware sections. */
5375 fw->main.text = (const uint8_t *)ptr;
5376 fw->main.data = fw->main.text + fw->main.textsz;
5377 fw->init.text = fw->main.data + fw->main.datasz;
5378 fw->init.data = fw->init.text + fw->init.textsz;
5379 fw->boot.text = fw->init.data + fw->init.datasz;
5380 return 0;
5381 }
5382
5383 /*
5384 * Extract text and data sections from a TLV firmware image.
5385 */
5386 static int
5387 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
5388 uint16_t alt)
5389 {
5390 const struct iwn_fw_tlv_hdr *hdr;
5391 const struct iwn_fw_tlv *tlv;
5392 const uint8_t *ptr, *end;
5393 uint64_t altmask;
5394 uint32_t len;
5395
5396 if (fw->size < sizeof (*hdr)) {
5397 aprint_error_dev(sc->sc_dev,
5398 "firmware too short: %zd bytes\n", fw->size);
5399 return EINVAL;
5400 }
5401 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
5402 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
5403 aprint_error_dev(sc->sc_dev,
5404 "bad firmware signature 0x%08x\n", le32toh(hdr->signature));
5405 return EINVAL;
5406 }
5407 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr,
5408 le32toh(hdr->build)));
5409
5410 /*
5411 * Select the closest supported alternative that is less than
5412 * or equal to the specified one.
5413 */
5414 altmask = le64toh(hdr->altmask);
5415 while (alt > 0 && !(altmask & (1ULL << alt)))
5416 alt--; /* Downgrade. */
5417 DPRINTF(("using alternative %d\n", alt));
5418
5419 ptr = (const uint8_t *)(hdr + 1);
5420 end = (const uint8_t *)(fw->data + fw->size);
5421
5422 /* Parse type-length-value fields. */
5423 while (ptr + sizeof (*tlv) <= end) {
5424 tlv = (const struct iwn_fw_tlv *)ptr;
5425 len = le32toh(tlv->len);
5426
5427 ptr += sizeof (*tlv);
5428 if (ptr + len > end) {
5429 aprint_error_dev(sc->sc_dev,
5430 "firmware too short: %zd bytes\n", fw->size);
5431 return EINVAL;
5432 }
5433 /* Skip other alternatives. */
5434 if (tlv->alt != 0 && tlv->alt != htole16(alt))
5435 goto next;
5436
5437 switch (le16toh(tlv->type)) {
5438 case IWN_FW_TLV_MAIN_TEXT:
5439 fw->main.text = ptr;
5440 fw->main.textsz = len;
5441 break;
5442 case IWN_FW_TLV_MAIN_DATA:
5443 fw->main.data = ptr;
5444 fw->main.datasz = len;
5445 break;
5446 case IWN_FW_TLV_INIT_TEXT:
5447 fw->init.text = ptr;
5448 fw->init.textsz = len;
5449 break;
5450 case IWN_FW_TLV_INIT_DATA:
5451 fw->init.data = ptr;
5452 fw->init.datasz = len;
5453 break;
5454 case IWN_FW_TLV_BOOT_TEXT:
5455 fw->boot.text = ptr;
5456 fw->boot.textsz = len;
5457 break;
5458 default:
5459 DPRINTF(("TLV type %d not handled\n",
5460 le16toh(tlv->type)));
5461 break;
5462 }
5463 next: /* TLV fields are 32-bit aligned. */
5464 ptr += (len + 3) & ~3;
5465 }
5466 return 0;
5467 }
5468
5469 static int
5470 iwn_read_firmware(struct iwn_softc *sc)
5471 {
5472 struct iwn_fw_info *fw = &sc->fw;
5473 firmware_handle_t fwh;
5474 int error;
5475
5476 /* Initialize for error returns */
5477 fw->data = NULL;
5478 fw->size = 0;
5479
5480 /* Open firmware image. */
5481 if ((error = firmware_open("if_iwn", sc->fwname, &fwh)) != 0) {
5482 aprint_error_dev(sc->sc_dev,
5483 "could not get firmware handle %s\n", sc->fwname);
5484 return error;
5485 }
5486 fw->size = firmware_get_size(fwh);
5487 if (fw->size < sizeof (uint32_t)) {
5488 aprint_error_dev(sc->sc_dev,
5489 "firmware too short: %zd bytes\n", fw->size);
5490 firmware_close(fwh);
5491 return EINVAL;
5492 }
5493
5494 /* Read the firmware. */
5495 fw->data = firmware_malloc(fw->size);
5496 if (fw->data == NULL) {
5497 aprint_error_dev(sc->sc_dev,
5498 "not enough memory to stock firmware %s\n", sc->fwname);
5499 firmware_close(fwh);
5500 return ENOMEM;
5501 }
5502 error = firmware_read(fwh, 0, fw->data, fw->size);
5503 firmware_close(fwh);
5504 if (error != 0) {
5505 aprint_error_dev(sc->sc_dev,
5506 "could not read firmware %s\n", sc->fwname);
5507 goto out;
5508 }
5509
5510 /* Retrieve text and data sections. */
5511 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
5512 error = iwn_read_firmware_leg(sc, fw);
5513 else
5514 error = iwn_read_firmware_tlv(sc, fw, 1);
5515 if (error != 0) {
5516 aprint_error_dev(sc->sc_dev,
5517 "could not read firmware sections\n");
5518 goto out;
5519 }
5520
5521 /* Make sure text and data sections fit in hardware memory. */
5522 if (fw->main.textsz > sc->fw_text_maxsz ||
5523 fw->main.datasz > sc->fw_data_maxsz ||
5524 fw->init.textsz > sc->fw_text_maxsz ||
5525 fw->init.datasz > sc->fw_data_maxsz ||
5526 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
5527 (fw->boot.textsz & 3) != 0) {
5528 aprint_error_dev(sc->sc_dev,
5529 "firmware sections too large\n");
5530 goto out;
5531 }
5532
5533 /* We can proceed with loading the firmware. */
5534 return 0;
5535 out:
5536 firmware_free(fw->data, fw->size);
5537 fw->data = NULL;
5538 fw->size = 0;
5539 return error ? error : EINVAL;
5540 }
5541
5542 static int
5543 iwn_clock_wait(struct iwn_softc *sc)
5544 {
5545 int ntries;
5546
5547 /* Set "initialization complete" bit. */
5548 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5549
5550 /* Wait for clock stabilization. */
5551 for (ntries = 0; ntries < 2500; ntries++) {
5552 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
5553 return 0;
5554 DELAY(10);
5555 }
5556 aprint_error_dev(sc->sc_dev,
5557 "timeout waiting for clock stabilization\n");
5558 return ETIMEDOUT;
5559 }
5560
5561 static int
5562 iwn_apm_init(struct iwn_softc *sc)
5563 {
5564 pcireg_t reg;
5565 int error;
5566
5567 /* Disable L0s exit timer (NMI bug workaround). */
5568 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
5569 /* Don't wait for ICH L0s (ICH bug workaround). */
5570 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
5571
5572 /* Set FH wait threshold to max (HW bug under stress workaround). */
5573 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
5574
5575 /* Enable HAP INTA to move adapter from L1a to L0s. */
5576 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
5577
5578 /* Retrieve PCIe Active State Power Management (ASPM). */
5579 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
5580 sc->sc_cap_off + PCIE_LCSR);
5581 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
5582 if (reg & PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */
5583 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
5584 else
5585 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
5586
5587 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
5588 sc->hw_type <= IWN_HW_REV_TYPE_1000)
5589 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
5590
5591 /* Wait for clock stabilization before accessing prph. */
5592 if ((error = iwn_clock_wait(sc)) != 0)
5593 return error;
5594
5595 if ((error = iwn_nic_lock(sc)) != 0)
5596 return error;
5597 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
5598 /* Enable DMA and BSM (Bootstrap State Machine). */
5599 iwn_prph_write(sc, IWN_APMG_CLK_EN,
5600 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
5601 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
5602 } else {
5603 /* Enable DMA. */
5604 iwn_prph_write(sc, IWN_APMG_CLK_EN,
5605 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
5606 }
5607 DELAY(20);
5608 /* Disable L1-Active. */
5609 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
5610 iwn_nic_unlock(sc);
5611
5612 return 0;
5613 }
5614
5615 static void
5616 iwn_apm_stop_master(struct iwn_softc *sc)
5617 {
5618 int ntries;
5619
5620 /* Stop busmaster DMA activity. */
5621 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
5622 for (ntries = 0; ntries < 100; ntries++) {
5623 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
5624 return;
5625 DELAY(10);
5626 }
5627 aprint_error_dev(sc->sc_dev,
5628 "timeout waiting for master\n");
5629 }
5630
5631 static void
5632 iwn_apm_stop(struct iwn_softc *sc)
5633 {
5634 iwn_apm_stop_master(sc);
5635
5636 /* Reset the entire device. */
5637 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
5638 DELAY(10);
5639 /* Clear "initialization complete" bit. */
5640 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5641 }
5642
5643 static int
5644 iwn4965_nic_config(struct iwn_softc *sc)
5645 {
5646 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
5647 /*
5648 * I don't believe this to be correct but this is what the
5649 * vendor driver is doing. Probably the bits should not be
5650 * shifted in IWN_RFCFG_*.
5651 */
5652 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5653 IWN_RFCFG_TYPE(sc->rfcfg) |
5654 IWN_RFCFG_STEP(sc->rfcfg) |
5655 IWN_RFCFG_DASH(sc->rfcfg));
5656 }
5657 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5658 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
5659 return 0;
5660 }
5661
5662 static int
5663 iwn5000_nic_config(struct iwn_softc *sc)
5664 {
5665 uint32_t tmp;
5666 int error;
5667
5668 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
5669 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5670 IWN_RFCFG_TYPE(sc->rfcfg) |
5671 IWN_RFCFG_STEP(sc->rfcfg) |
5672 IWN_RFCFG_DASH(sc->rfcfg));
5673 }
5674 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5675 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
5676
5677 if ((error = iwn_nic_lock(sc)) != 0)
5678 return error;
5679 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
5680
5681 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
5682 /*
5683 * Select first Switching Voltage Regulator (1.32V) to
5684 * solve a stability issue related to noisy DC2DC line
5685 * in the silicon of 1000 Series.
5686 */
5687 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
5688 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
5689 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
5690 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
5691 }
5692 iwn_nic_unlock(sc);
5693
5694 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
5695 /* Use internal power amplifier only. */
5696 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
5697 }
5698 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
5699 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
5700 /* Indicate that ROM calibration version is >=6. */
5701 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
5702 }
5703 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
5704 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
5705 return 0;
5706 }
5707
5708 /*
5709 * Take NIC ownership over Intel Active Management Technology (AMT).
5710 */
5711 static int
5712 iwn_hw_prepare(struct iwn_softc *sc)
5713 {
5714 int ntries;
5715
5716 /* Check if hardware is ready. */
5717 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
5718 for (ntries = 0; ntries < 5; ntries++) {
5719 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
5720 IWN_HW_IF_CONFIG_NIC_READY)
5721 return 0;
5722 DELAY(10);
5723 }
5724
5725 /* Hardware not ready, force into ready state. */
5726 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
5727 for (ntries = 0; ntries < 15000; ntries++) {
5728 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
5729 IWN_HW_IF_CONFIG_PREPARE_DONE))
5730 break;
5731 DELAY(10);
5732 }
5733 if (ntries == 15000)
5734 return ETIMEDOUT;
5735
5736 /* Hardware should be ready now. */
5737 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
5738 for (ntries = 0; ntries < 5; ntries++) {
5739 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
5740 IWN_HW_IF_CONFIG_NIC_READY)
5741 return 0;
5742 DELAY(10);
5743 }
5744 return ETIMEDOUT;
5745 }
5746
5747 static int
5748 iwn_hw_init(struct iwn_softc *sc)
5749 {
5750 struct iwn_ops *ops = &sc->ops;
5751 int error, chnl, qid;
5752
5753 /* Clear pending interrupts. */
5754 IWN_WRITE(sc, IWN_INT, 0xffffffff);
5755
5756 if ((error = iwn_apm_init(sc)) != 0) {
5757 aprint_error_dev(sc->sc_dev,
5758 "could not power ON adapter\n");
5759 return error;
5760 }
5761
5762 /* Select VMAIN power source. */
5763 if ((error = iwn_nic_lock(sc)) != 0)
5764 return error;
5765 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
5766 iwn_nic_unlock(sc);
5767
5768 /* Perform adapter-specific initialization. */
5769 if ((error = ops->nic_config(sc)) != 0)
5770 return error;
5771
5772 /* Initialize RX ring. */
5773 if ((error = iwn_nic_lock(sc)) != 0)
5774 return error;
5775 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
5776 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
5777 /* Set physical address of RX ring (256-byte aligned). */
5778 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
5779 /* Set physical address of RX status (16-byte aligned). */
5780 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
5781 /* Enable RX. */
5782 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
5783 IWN_FH_RX_CONFIG_ENA |
5784 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
5785 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
5786 IWN_FH_RX_CONFIG_SINGLE_FRAME |
5787 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
5788 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
5789 iwn_nic_unlock(sc);
5790 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
5791
5792 if ((error = iwn_nic_lock(sc)) != 0)
5793 return error;
5794
5795 /* Initialize TX scheduler. */
5796 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
5797
5798 /* Set physical address of "keep warm" page (16-byte aligned). */
5799 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
5800
5801 /* Initialize TX rings. */
5802 for (qid = 0; qid < sc->ntxqs; qid++) {
5803 struct iwn_tx_ring *txq = &sc->txq[qid];
5804
5805 /* Set physical address of TX ring (256-byte aligned). */
5806 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
5807 txq->desc_dma.paddr >> 8);
5808 }
5809 iwn_nic_unlock(sc);
5810
5811 /* Enable DMA channels. */
5812 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
5813 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
5814 IWN_FH_TX_CONFIG_DMA_ENA |
5815 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
5816 }
5817
5818 /* Clear "radio off" and "commands blocked" bits. */
5819 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
5820 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
5821
5822 /* Clear pending interrupts. */
5823 IWN_WRITE(sc, IWN_INT, 0xffffffff);
5824 /* Enable interrupt coalescing. */
5825 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
5826 /* Enable interrupts. */
5827 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
5828
5829 /* _Really_ make sure "radio off" bit is cleared! */
5830 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
5831 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
5832
5833 /* Enable shadow registers. */
5834 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
5835 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
5836
5837 if ((error = ops->load_firmware(sc)) != 0) {
5838 aprint_error_dev(sc->sc_dev,
5839 "could not load firmware\n");
5840 return error;
5841 }
5842 /* Wait at most one second for firmware alive notification. */
5843 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) {
5844 aprint_error_dev(sc->sc_dev,
5845 "timeout waiting for adapter to initialize\n");
5846 return error;
5847 }
5848 /* Do post-firmware initialization. */
5849 return ops->post_alive(sc);
5850 }
5851
5852 static void
5853 iwn_hw_stop(struct iwn_softc *sc)
5854 {
5855 int chnl, qid, ntries;
5856
5857 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
5858
5859 /* Disable interrupts. */
5860 IWN_WRITE(sc, IWN_INT_MASK, 0);
5861 IWN_WRITE(sc, IWN_INT, 0xffffffff);
5862 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
5863 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
5864
5865 /* Make sure we no longer hold the NIC lock. */
5866 iwn_nic_unlock(sc);
5867
5868 /* Stop TX scheduler. */
5869 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
5870
5871 /* Stop all DMA channels. */
5872 if (iwn_nic_lock(sc) == 0) {
5873 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
5874 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
5875 for (ntries = 0; ntries < 200; ntries++) {
5876 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
5877 IWN_FH_TX_STATUS_IDLE(chnl))
5878 break;
5879 DELAY(10);
5880 }
5881 }
5882 iwn_nic_unlock(sc);
5883 }
5884
5885 /* Stop RX ring. */
5886 iwn_reset_rx_ring(sc, &sc->rxq);
5887
5888 /* Reset all TX rings. */
5889 for (qid = 0; qid < sc->ntxqs; qid++)
5890 iwn_reset_tx_ring(sc, &sc->txq[qid]);
5891
5892 if (iwn_nic_lock(sc) == 0) {
5893 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
5894 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
5895 iwn_nic_unlock(sc);
5896 }
5897 DELAY(5);
5898 /* Power OFF adapter. */
5899 iwn_apm_stop(sc);
5900 }
5901
5902 static int
5903 iwn_init(struct ifnet *ifp)
5904 {
5905 struct iwn_softc *sc = ifp->if_softc;
5906 struct ieee80211com *ic = &sc->sc_ic;
5907 int error;
5908
5909 mutex_enter(&sc->sc_mtx);
5910 if (sc->sc_flags & IWN_FLAG_HW_INITED)
5911 goto out;
5912 if ((error = iwn_hw_prepare(sc)) != 0) {
5913 aprint_error_dev(sc->sc_dev,
5914 "hardware not ready\n");
5915 goto fail;
5916 }
5917
5918 /* Check that the radio is not disabled by hardware switch. */
5919 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
5920 aprint_error_dev(sc->sc_dev,
5921 "radio is disabled by hardware switch\n");
5922 error = EPERM; /* :-) */
5923 goto fail;
5924 }
5925
5926 /* Read firmware images from the filesystem. */
5927 if ((error = iwn_read_firmware(sc)) != 0) {
5928 aprint_error_dev(sc->sc_dev,
5929 "could not read firmware\n");
5930 goto fail;
5931 }
5932
5933 /* Initialize interrupt mask to default value. */
5934 sc->int_mask = IWN_INT_MASK_DEF;
5935 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
5936
5937 /* Initialize hardware and upload firmware. */
5938 KASSERT(sc->fw.data != NULL && sc->fw.size > 0);
5939 error = iwn_hw_init(sc);
5940 firmware_free(sc->fw.data, sc->fw.size);
5941 sc->fw.data = NULL;
5942 sc->fw.size = 0;
5943 if (error != 0) {
5944 aprint_error_dev(sc->sc_dev,
5945 "could not initialize hardware\n");
5946 goto fail;
5947 }
5948
5949 /* Configure adapter now that it is ready. */
5950 if ((error = iwn_config(sc)) != 0) {
5951 aprint_error_dev(sc->sc_dev,
5952 "could not configure device\n");
5953 goto fail;
5954 }
5955
5956 ifp->if_flags &= ~IFF_OACTIVE;
5957 ifp->if_flags |= IFF_RUNNING;
5958
5959 if (ic->ic_opmode != IEEE80211_M_MONITOR)
5960 ieee80211_begin_scan(ic, 0);
5961 else
5962 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
5963
5964 sc->sc_flags |= IWN_FLAG_HW_INITED;
5965 out:
5966 mutex_exit(&sc->sc_mtx);
5967 return 0;
5968
5969 fail: mutex_exit(&sc->sc_mtx);
5970 iwn_stop(ifp, 1);
5971 return error;
5972 }
5973
5974 static void
5975 iwn_stop(struct ifnet *ifp, int disable)
5976 {
5977 struct iwn_softc *sc = ifp->if_softc;
5978 struct ieee80211com *ic = &sc->sc_ic;
5979
5980 if (!disable)
5981 mutex_enter(&sc->sc_mtx);
5982 sc->sc_flags &= ~IWN_FLAG_HW_INITED;
5983 ifp->if_timer = sc->sc_tx_timer = 0;
5984 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5985
5986 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5987
5988 /* Power OFF hardware. */
5989 iwn_hw_stop(sc);
5990
5991 if (!disable)
5992 mutex_exit(&sc->sc_mtx);
5993 }
5994
5995 /*
5996 * XXX MCLGETI alternative
5997 *
5998 * With IWN_USE_RBUF defined it uses the rbuf cache for receive buffers
5999 * as long as there are available free buffers then it uses MEXTMALLOC.,
6000 * Without IWN_USE_RBUF defined it uses MEXTMALLOC exclusively.
6001 * The MCLGET4K code is used for testing an alternative mbuf cache.
6002 */
6003
6004 static struct mbuf *
6005 MCLGETIalt(struct iwn_softc *sc, int how,
6006 struct ifnet *ifp __unused, u_int size)
6007 {
6008 struct mbuf *m;
6009 #ifdef IWN_USE_RBUF
6010 struct iwn_rbuf *rbuf;
6011 #endif
6012
6013 MGETHDR(m, how, MT_DATA);
6014 if (m == NULL)
6015 return NULL;
6016
6017 #ifdef IWN_USE_RBUF
6018 if (sc->rxq.nb_free_entries > 0 &&
6019 (rbuf = iwn_alloc_rbuf(sc)) != NULL) {
6020 /* Attach buffer to mbuf header. */
6021 MEXTADD(m, rbuf->vaddr, size, 0, iwn_free_rbuf, rbuf);
6022 m->m_flags |= M_EXT_RW;
6023 }
6024 else {
6025 MEXTMALLOC(m, size, how);
6026 if ((m->m_flags & M_EXT) == 0) {
6027 m_freem(m);
6028 return NULL;
6029 }
6030 }
6031
6032 #else
6033 #ifdef MCLGET4K
6034 if (size == 4096)
6035 MCLGET4K(m, how);
6036 else
6037 panic("size must be 4k");
6038 #else
6039 MEXTMALLOC(m, size, how);
6040 #endif
6041 if ((m->m_flags & M_EXT) == 0) {
6042 m_freem(m);
6043 return NULL;
6044 }
6045 #endif
6046
6047 return m;
6048 }
6049
6050 #ifdef IWN_USE_RBUF
6051 static struct iwn_rbuf *
6052 iwn_alloc_rbuf(struct iwn_softc *sc)
6053 {
6054 struct iwn_rbuf *rbuf;
6055 mutex_enter(&sc->rxq.freelist_mtx);
6056
6057 rbuf = SLIST_FIRST(&sc->rxq.freelist);
6058 if (rbuf != NULL) {
6059 SLIST_REMOVE_HEAD(&sc->rxq.freelist, next);
6060 sc->rxq.nb_free_entries --;
6061 }
6062 mutex_exit(&sc->rxq.freelist_mtx);
6063 return rbuf;
6064 }
6065
6066 /*
6067 * This is called automatically by the network stack when the mbuf to which
6068 * our RX buffer is attached is freed.
6069 */
6070 static void
6071 iwn_free_rbuf(struct mbuf* m, void *buf, size_t size, void *arg)
6072 {
6073 struct iwn_rbuf *rbuf = arg;
6074 struct iwn_softc *sc = rbuf->sc;
6075
6076 /* Put the RX buffer back in the free list. */
6077 mutex_enter(&sc->rxq.freelist_mtx);
6078 SLIST_INSERT_HEAD(&sc->rxq.freelist, rbuf, next);
6079 mutex_exit(&sc->rxq.freelist_mtx);
6080
6081 sc->rxq.nb_free_entries ++;
6082 if (__predict_true(m != NULL))
6083 pool_cache_put(mb_cache, m);
6084 }
6085
6086 static int
6087 iwn_alloc_rpool(struct iwn_softc *sc)
6088 {
6089 struct iwn_rx_ring *ring = &sc->rxq;
6090 struct iwn_rbuf *rbuf;
6091 int i, error;
6092
6093 mutex_init(&ring->freelist_mtx, MUTEX_DEFAULT, IPL_NET);
6094
6095 /* Allocate a big chunk of DMA'able memory... */
6096 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->buf_dma, NULL,
6097 IWN_RBUF_COUNT * IWN_RBUF_SIZE, PAGE_SIZE);
6098 if (error != 0) {
6099 aprint_error_dev(sc->sc_dev,
6100 "could not allocate RX buffers DMA memory\n");
6101 return error;
6102 }
6103 /* ...and split it into chunks of IWN_RBUF_SIZE bytes. */
6104 SLIST_INIT(&ring->freelist);
6105 for (i = 0; i < IWN_RBUF_COUNT; i++) {
6106 rbuf = &ring->rbuf[i];
6107
6108 rbuf->sc = sc; /* Backpointer for callbacks. */
6109 rbuf->vaddr = (void *)((vaddr_t)ring->buf_dma.vaddr + i * IWN_RBUF_SIZE);
6110 rbuf->paddr = ring->buf_dma.paddr + i * IWN_RBUF_SIZE;
6111
6112 SLIST_INSERT_HEAD(&ring->freelist, rbuf, next);
6113 }
6114 ring->nb_free_entries = IWN_RBUF_COUNT;
6115 return 0;
6116 }
6117
6118 static void
6119 iwn_free_rpool(struct iwn_softc *sc)
6120 {
6121 iwn_dma_contig_free(&sc->rxq.buf_dma);
6122 }
6123 #endif
6124
6125 /*
6126 * XXX code from OpenBSD src/sys/net80211/ieee80211_output.c
6127 * Copyright (c) 2001 Atsushi Onoe
6128 * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
6129 * Copyright (c) 2007-2009 Damien Bergamini
6130 * All rights reserved.
6131 */
6132
6133 /*
6134 * Add an SSID element to a frame (see 7.3.2.1).
6135 */
6136 static u_int8_t *
6137 ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len)
6138 {
6139 *frm++ = IEEE80211_ELEMID_SSID;
6140 *frm++ = len;
6141 memcpy(frm, ssid, len);
6142 return frm + len;
6143 }
6144
6145 /*
6146 * Add a supported rates element to a frame (see 7.3.2.2).
6147 */
6148 static u_int8_t *
6149 ieee80211_add_rates(u_int8_t *frm, const struct ieee80211_rateset *rs)
6150 {
6151 int nrates;
6152
6153 *frm++ = IEEE80211_ELEMID_RATES;
6154 nrates = min(rs->rs_nrates, IEEE80211_RATE_SIZE);
6155 *frm++ = nrates;
6156 memcpy(frm, rs->rs_rates, nrates);
6157 return frm + nrates;
6158 }
6159
6160 /*
6161 * Add an extended supported rates element to a frame (see 7.3.2.14).
6162 */
6163 static u_int8_t *
6164 ieee80211_add_xrates(u_int8_t *frm, const struct ieee80211_rateset *rs)
6165 {
6166 int nrates;
6167
6168 KASSERT(rs->rs_nrates > IEEE80211_RATE_SIZE);
6169
6170 *frm++ = IEEE80211_ELEMID_XRATES;
6171 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
6172 *frm++ = nrates;
6173 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
6174 return frm + nrates;
6175 }
6176
6177 /*
6178 * XXX: Hack to set the current channel to the value advertised in beacons or
6179 * probe responses. Only used during AP detection.
6180 * XXX: Duplicated from if_iwi.c
6181 */
6182 static void
6183 iwn_fix_channel(struct ieee80211com *ic, struct mbuf *m)
6184 {
6185 struct ieee80211_frame *wh;
6186 uint8_t subtype;
6187 uint8_t *frm, *efrm;
6188
6189 wh = mtod(m, struct ieee80211_frame *);
6190
6191 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
6192 return;
6193
6194 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6195
6196 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
6197 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
6198 return;
6199
6200 frm = (uint8_t *)(wh + 1);
6201 efrm = mtod(m, uint8_t *) + m->m_len;
6202
6203 frm += 12; /* skip tstamp, bintval and capinfo fields */
6204 while (frm < efrm) {
6205 if (*frm == IEEE80211_ELEMID_DSPARMS)
6206 #if IEEE80211_CHAN_MAX < 255
6207 if (frm[2] <= IEEE80211_CHAN_MAX)
6208 #endif
6209 ic->ic_curchan = &ic->ic_channels[frm[2]];
6210
6211 frm += frm[1] + 2;
6212 }
6213 }
6214
6215 #ifdef notyetMODULE
6216
6217 MODULE(MODULE_CLASS_DRIVER, if_iwn, "pci");
6218
6219 #ifdef _MODULE
6220 #include "ioconf.c"
6221 #endif
6222
6223 static int
6224 if_iwn_modcmd(modcmd_t cmd, void *data)
6225 {
6226 int error = 0;
6227
6228 switch (cmd) {
6229 case MODULE_CMD_INIT:
6230 #ifdef _MODULE
6231 error = config_init_component(cfdriver_ioconf_if_iwn,
6232 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn);
6233 #endif
6234 return error;
6235 case MODULE_CMD_FINI:
6236 #ifdef _MODULE
6237 error = config_fini_component(cfdriver_ioconf_if_iwn,
6238 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn);
6239 #endif
6240 return error;
6241 case MODULE_CMD_AUTOUNLOAD:
6242 #ifdef _MODULE
6243 /* XXX This is not optional! */
6244 #endif
6245 return error;
6246 default:
6247 return ENOTTY;
6248 }
6249 }
6250 #endif
6251