if_iwn.c revision 1.69 1 /* $NetBSD: if_iwn.c,v 1.69 2013/09/14 13:11:31 joerg Exp $ */
2 /* $OpenBSD: if_iwn.c,v 1.119 2013/05/29 23:16:52 yuo Exp $ */
3
4 /*-
5 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini (at) free.fr>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 /*
21 * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network
22 * adapters.
23 */
24 #include <sys/cdefs.h>
25 __KERNEL_RCSID(0, "$NetBSD: if_iwn.c,v 1.69 2013/09/14 13:11:31 joerg Exp $");
26
27 #define IWN_USE_RBUF /* Use local storage for RX */
28 #undef IWN_HWCRYPTO /* XXX does not even compile yet */
29
30 #include <sys/param.h>
31 #include <sys/sockio.h>
32 #include <sys/proc.h>
33 #include <sys/mbuf.h>
34 #include <sys/kernel.h>
35 #include <sys/socket.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #ifdef notyetMODULE
39 #include <sys/module.h>
40 #endif
41 #include <sys/mutex.h>
42 #include <sys/conf.h>
43 #include <sys/kauth.h>
44 #include <sys/callout.h>
45
46 #include <dev/sysmon/sysmonvar.h>
47
48 #include <sys/bus.h>
49 #include <machine/endian.h>
50 #include <machine/intr.h>
51
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <dev/pci/pcidevs.h>
55
56 #include <net/bpf.h>
57 #include <net/if.h>
58 #include <net/if_arp.h>
59 #include <net/if_dl.h>
60 #include <net/if_media.h>
61 #include <net/if_types.h>
62
63 #include <netinet/in.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in_var.h>
66 #include <net/if_ether.h>
67 #include <netinet/ip.h>
68
69 #include <net80211/ieee80211_var.h>
70 #include <net80211/ieee80211_amrr.h>
71 #include <net80211/ieee80211_radiotap.h>
72
73 #include <dev/firmload.h>
74
75 #include <dev/pci/if_iwnreg.h>
76 #include <dev/pci/if_iwnvar.h>
77
78 static const pci_product_id_t iwn_devices[] = {
79 PCI_PRODUCT_INTEL_WIFI_LINK_1030_1,
80 PCI_PRODUCT_INTEL_WIFI_LINK_1030_2,
81 PCI_PRODUCT_INTEL_WIFI_LINK_4965_1,
82 PCI_PRODUCT_INTEL_WIFI_LINK_4965_2,
83 PCI_PRODUCT_INTEL_WIFI_LINK_4965_3,
84 PCI_PRODUCT_INTEL_WIFI_LINK_4965_4,
85 PCI_PRODUCT_INTEL_WIFI_LINK_5100_1,
86 PCI_PRODUCT_INTEL_WIFI_LINK_5100_2,
87 PCI_PRODUCT_INTEL_WIFI_LINK_5150_1,
88 PCI_PRODUCT_INTEL_WIFI_LINK_5150_2,
89 PCI_PRODUCT_INTEL_WIFI_LINK_5300_1,
90 PCI_PRODUCT_INTEL_WIFI_LINK_5300_2,
91 PCI_PRODUCT_INTEL_WIFI_LINK_5350_1,
92 PCI_PRODUCT_INTEL_WIFI_LINK_5350_2,
93 PCI_PRODUCT_INTEL_WIFI_LINK_1000_1,
94 PCI_PRODUCT_INTEL_WIFI_LINK_1000_2,
95 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_1,
96 PCI_PRODUCT_INTEL_WIFI_LINK_6000_3X3_2,
97 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1,
98 PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2,
99 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_1,
100 PCI_PRODUCT_INTEL_WIFI_LINK_6050_2X2_2,
101 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_1,
102 PCI_PRODUCT_INTEL_WIFI_LINK_6005_2X2_2,
103 PCI_PRODUCT_INTEL_WIFI_LINK_6230_1,
104 PCI_PRODUCT_INTEL_WIFI_LINK_6230_2,
105 PCI_PRODUCT_INTEL_WIFI_LINK_6235,
106 };
107
108 /*
109 * Supported rates for 802.11a/b/g modes (in 500Kbps unit).
110 */
111 static const struct ieee80211_rateset iwn_rateset_11a =
112 { 8, { 12, 18, 24, 36, 48, 72, 96, 108 } };
113
114 static const struct ieee80211_rateset iwn_rateset_11b =
115 { 4, { 2, 4, 11, 22 } };
116
117 static const struct ieee80211_rateset iwn_rateset_11g =
118 { 12, { 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108 } };
119
120 static int iwn_match(device_t , struct cfdata *, void *);
121 static void iwn_attach(device_t , device_t , void *);
122 static int iwn4965_attach(struct iwn_softc *, pci_product_id_t);
123 static int iwn5000_attach(struct iwn_softc *, pci_product_id_t);
124 static void iwn_radiotap_attach(struct iwn_softc *);
125 static int iwn_detach(device_t , int);
126 #if 0
127 static void iwn_power(int, void *);
128 #endif
129 static bool iwn_resume(device_t, const pmf_qual_t *);
130 static int iwn_nic_lock(struct iwn_softc *);
131 static int iwn_eeprom_lock(struct iwn_softc *);
132 static int iwn_init_otprom(struct iwn_softc *);
133 static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int);
134 static int iwn_dma_contig_alloc(bus_dma_tag_t, struct iwn_dma_info *,
135 void **, bus_size_t, bus_size_t);
136 static void iwn_dma_contig_free(struct iwn_dma_info *);
137 static int iwn_alloc_sched(struct iwn_softc *);
138 static void iwn_free_sched(struct iwn_softc *);
139 static int iwn_alloc_kw(struct iwn_softc *);
140 static void iwn_free_kw(struct iwn_softc *);
141 static int iwn_alloc_ict(struct iwn_softc *);
142 static void iwn_free_ict(struct iwn_softc *);
143 static int iwn_alloc_fwmem(struct iwn_softc *);
144 static void iwn_free_fwmem(struct iwn_softc *);
145 static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
146 static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
147 static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *);
148 static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *,
149 int);
150 static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
151 static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *);
152 static void iwn5000_ict_reset(struct iwn_softc *);
153 static int iwn_read_eeprom(struct iwn_softc *);
154 static void iwn4965_read_eeprom(struct iwn_softc *);
155
156 #ifdef IWN_DEBUG
157 static void iwn4965_print_power_group(struct iwn_softc *, int);
158 #endif
159 static void iwn5000_read_eeprom(struct iwn_softc *);
160 static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t);
161 static void iwn_read_eeprom_enhinfo(struct iwn_softc *);
162 static struct ieee80211_node *iwn_node_alloc(struct ieee80211_node_table *);
163 static void iwn_newassoc(struct ieee80211_node *, int);
164 static int iwn_media_change(struct ifnet *);
165 static int iwn_newstate(struct ieee80211com *, enum ieee80211_state, int);
166 static void iwn_iter_func(void *, struct ieee80211_node *);
167 static void iwn_calib_timeout(void *);
168 static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *,
169 struct iwn_rx_data *);
170 static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *,
171 struct iwn_rx_data *);
172 #ifndef IEEE80211_NO_HT
173 static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *,
174 struct iwn_rx_data *);
175 #endif
176 static void iwn5000_rx_calib_results(struct iwn_softc *,
177 struct iwn_rx_desc *, struct iwn_rx_data *);
178 static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *,
179 struct iwn_rx_data *);
180 static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
181 struct iwn_rx_data *);
182 static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *,
183 struct iwn_rx_data *);
184 static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int,
185 uint8_t);
186 static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *);
187 static void iwn_notif_intr(struct iwn_softc *);
188 static void iwn_wakeup_intr(struct iwn_softc *);
189 static void iwn_fatal_intr(struct iwn_softc *);
190 static int iwn_intr(void *);
191 static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t,
192 uint16_t);
193 static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t,
194 uint16_t);
195 #ifdef notyet
196 static void iwn5000_reset_sched(struct iwn_softc *, int, int);
197 #endif
198 static int iwn_tx(struct iwn_softc *, struct mbuf *,
199 struct ieee80211_node *, int);
200 static void iwn_start(struct ifnet *);
201 static void iwn_watchdog(struct ifnet *);
202 static int iwn_ioctl(struct ifnet *, u_long, void *);
203 static int iwn_cmd(struct iwn_softc *, int, const void *, int, int);
204 static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *,
205 int);
206 static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *,
207 int);
208 static int iwn_set_link_quality(struct iwn_softc *,
209 struct ieee80211_node *);
210 static int iwn_add_broadcast_node(struct iwn_softc *, int);
211 static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t);
212 static int iwn_set_critical_temp(struct iwn_softc *);
213 static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *);
214 static void iwn4965_power_calibration(struct iwn_softc *, int);
215 static int iwn4965_set_txpower(struct iwn_softc *, int);
216 static int iwn5000_set_txpower(struct iwn_softc *, int);
217 static int iwn4965_get_rssi(const struct iwn_rx_stat *);
218 static int iwn5000_get_rssi(const struct iwn_rx_stat *);
219 static int iwn_get_noise(const struct iwn_rx_general_stats *);
220 static int iwn4965_get_temperature(struct iwn_softc *);
221 static int iwn5000_get_temperature(struct iwn_softc *);
222 static int iwn_init_sensitivity(struct iwn_softc *);
223 static void iwn_collect_noise(struct iwn_softc *,
224 const struct iwn_rx_general_stats *);
225 static int iwn4965_init_gains(struct iwn_softc *);
226 static int iwn5000_init_gains(struct iwn_softc *);
227 static int iwn4965_set_gains(struct iwn_softc *);
228 static int iwn5000_set_gains(struct iwn_softc *);
229 static void iwn_tune_sensitivity(struct iwn_softc *,
230 const struct iwn_rx_stats *);
231 static int iwn_send_sensitivity(struct iwn_softc *);
232 static int iwn_set_pslevel(struct iwn_softc *, int, int, int);
233 static int iwn5000_runtime_calib(struct iwn_softc *);
234
235 static int iwn_config_bt_coex_bluetooth(struct iwn_softc *);
236 static int iwn_config_bt_coex_prio_table(struct iwn_softc *);
237 static int iwn_config_bt_coex_adv1(struct iwn_softc *);
238
239 static int iwn_config(struct iwn_softc *);
240 static int iwn_scan(struct iwn_softc *, uint16_t);
241 static int iwn_auth(struct iwn_softc *);
242 static int iwn_run(struct iwn_softc *);
243 #ifdef IWN_HWCRYPTO
244 static int iwn_set_key(struct ieee80211com *, struct ieee80211_node *,
245 struct ieee80211_key *);
246 static void iwn_delete_key(struct ieee80211com *, struct ieee80211_node *,
247 struct ieee80211_key *);
248 #endif
249 static int iwn_wme_update(struct ieee80211com *);
250 #ifndef IEEE80211_NO_HT
251 static int iwn_ampdu_rx_start(struct ieee80211com *,
252 struct ieee80211_node *, uint8_t);
253 static void iwn_ampdu_rx_stop(struct ieee80211com *,
254 struct ieee80211_node *, uint8_t);
255 static int iwn_ampdu_tx_start(struct ieee80211com *,
256 struct ieee80211_node *, uint8_t);
257 static void iwn_ampdu_tx_stop(struct ieee80211com *,
258 struct ieee80211_node *, uint8_t);
259 static void iwn4965_ampdu_tx_start(struct iwn_softc *,
260 struct ieee80211_node *, uint8_t, uint16_t);
261 static void iwn4965_ampdu_tx_stop(struct iwn_softc *,
262 uint8_t, uint16_t);
263 static void iwn5000_ampdu_tx_start(struct iwn_softc *,
264 struct ieee80211_node *, uint8_t, uint16_t);
265 static void iwn5000_ampdu_tx_stop(struct iwn_softc *,
266 uint8_t, uint16_t);
267 #endif
268 static int iwn5000_query_calibration(struct iwn_softc *);
269 static int iwn5000_send_calibration(struct iwn_softc *);
270 static int iwn5000_send_wimax_coex(struct iwn_softc *);
271 static int iwn4965_post_alive(struct iwn_softc *);
272 static int iwn5000_post_alive(struct iwn_softc *);
273 static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *,
274 int);
275 static int iwn4965_load_firmware(struct iwn_softc *);
276 static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t,
277 const uint8_t *, int);
278 static int iwn5000_load_firmware(struct iwn_softc *);
279 static int iwn_read_firmware_leg(struct iwn_softc *,
280 struct iwn_fw_info *);
281 static int iwn_read_firmware_tlv(struct iwn_softc *,
282 struct iwn_fw_info *, uint16_t);
283 static int iwn_read_firmware(struct iwn_softc *);
284 static int iwn_clock_wait(struct iwn_softc *);
285 static int iwn_apm_init(struct iwn_softc *);
286 static void iwn_apm_stop_master(struct iwn_softc *);
287 static void iwn_apm_stop(struct iwn_softc *);
288 static int iwn4965_nic_config(struct iwn_softc *);
289 static int iwn5000_nic_config(struct iwn_softc *);
290 static int iwn_hw_prepare(struct iwn_softc *);
291 static int iwn_hw_init(struct iwn_softc *);
292 static void iwn_hw_stop(struct iwn_softc *);
293 static int iwn_init(struct ifnet *);
294 static void iwn_stop(struct ifnet *, int);
295
296 /* XXX MCLGETI alternative */
297 static struct mbuf *MCLGETIalt(struct iwn_softc *, int,
298 struct ifnet *, u_int);
299 #ifdef IWN_USE_RBUF
300 static struct iwn_rbuf *iwn_alloc_rbuf(struct iwn_softc *);
301 static void iwn_free_rbuf(struct mbuf *, void *, size_t, void *);
302 static int iwn_alloc_rpool(struct iwn_softc *);
303 static void iwn_free_rpool(struct iwn_softc *);
304 #endif
305
306 /* XXX needed by iwn_scan */
307 static u_int8_t *ieee80211_add_ssid(u_int8_t *, const u_int8_t *, u_int);
308 static u_int8_t *ieee80211_add_rates(u_int8_t *,
309 const struct ieee80211_rateset *);
310 static u_int8_t *ieee80211_add_xrates(u_int8_t *,
311 const struct ieee80211_rateset *);
312
313 static void iwn_fix_channel(struct ieee80211com *, struct mbuf *);
314
315 #ifdef IWN_DEBUG
316 #define DPRINTF(x) do { if (iwn_debug > 0) printf x; } while (0)
317 #define DPRINTFN(n, x) do { if (iwn_debug >= (n)) printf x; } while (0)
318 int iwn_debug = 0;
319 #else
320 #define DPRINTF(x)
321 #define DPRINTFN(n, x)
322 #endif
323
324 CFATTACH_DECL_NEW(iwn, sizeof(struct iwn_softc), iwn_match, iwn_attach,
325 iwn_detach, NULL);
326
327 static int
328 iwn_match(device_t parent, cfdata_t match __unused, void *aux)
329 {
330 struct pci_attach_args *pa = aux;
331 size_t i;
332
333 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL)
334 return 0;
335
336 for (i = 0; i < __arraycount(iwn_devices); i++)
337 if (PCI_PRODUCT(pa->pa_id) == iwn_devices[i])
338 return 1;
339
340 return 0;
341 }
342
343 static void
344 iwn_attach(device_t parent __unused, device_t self, void *aux)
345 {
346 struct iwn_softc *sc = device_private(self);
347 struct ieee80211com *ic = &sc->sc_ic;
348 struct ifnet *ifp = &sc->sc_ec.ec_if;
349 struct pci_attach_args *pa = aux;
350 const char *intrstr;
351 pci_intr_handle_t ih;
352 pcireg_t memtype, reg;
353 int i, error;
354
355 sc->sc_dev = self;
356 sc->sc_pct = pa->pa_pc;
357 sc->sc_pcitag = pa->pa_tag;
358 sc->sc_dmat = pa->pa_dmat;
359 mutex_init(&sc->sc_mtx, MUTEX_DEFAULT, IPL_NONE);
360
361 callout_init(&sc->calib_to, 0);
362 callout_setfunc(&sc->calib_to, iwn_calib_timeout, sc);
363
364 pci_aprint_devinfo(pa, NULL);
365
366 /*
367 * Get the offset of the PCI Express Capability Structure in PCI
368 * Configuration Space.
369 */
370 error = pci_get_capability(sc->sc_pct, sc->sc_pcitag,
371 PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, NULL);
372 if (error == 0) {
373 aprint_error(": PCIe capability structure not found!\n");
374 return;
375 }
376
377 /* Clear device-specific "PCI retry timeout" register (41h). */
378 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
379 if (reg & 0xff00)
380 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
381
382 /* Enable bus-mastering and hardware bug workaround. */
383 /* XXX verify the bus-mastering is really needed (not in OpenBSD) */
384 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG);
385 reg |= PCI_COMMAND_MASTER_ENABLE;
386 if (reg & PCI_COMMAND_INTERRUPT_DISABLE) {
387 DPRINTF(("PCIe INTx Disable set\n"));
388 reg &= ~PCI_COMMAND_INTERRUPT_DISABLE;
389 }
390 pci_conf_write(sc->sc_pct, sc->sc_pcitag, PCI_COMMAND_STATUS_REG, reg);
391
392 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, IWN_PCI_BAR0);
393 error = pci_mapreg_map(pa, IWN_PCI_BAR0, memtype, 0, &sc->sc_st,
394 &sc->sc_sh, NULL, &sc->sc_sz);
395 if (error != 0) {
396 aprint_error(": can't map mem space\n");
397 return;
398 }
399
400 /* Install interrupt handler. */
401 if (pci_intr_map(pa, &ih) != 0) {
402 aprint_error(": can't map interrupt\n");
403 return;
404 }
405 intrstr = pci_intr_string(sc->sc_pct, ih);
406 sc->sc_ih = pci_intr_establish(sc->sc_pct, ih, IPL_NET, iwn_intr, sc);
407 if (sc->sc_ih == NULL) {
408 aprint_error(": can't establish interrupt");
409 if (intrstr != NULL)
410 aprint_error(" at %s", intrstr);
411 aprint_error("\n");
412 return;
413 }
414 aprint_normal_dev(self, "interrupting at %s\n", intrstr);
415
416 /* Read hardware revision and attach. */
417 sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> 4) & 0xf;
418 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
419 error = iwn4965_attach(sc, PCI_PRODUCT(pa->pa_id));
420 else
421 error = iwn5000_attach(sc, PCI_PRODUCT(pa->pa_id));
422 if (error != 0) {
423 aprint_error(": could not attach device\n");
424 return;
425 }
426
427 if ((error = iwn_hw_prepare(sc)) != 0) {
428 aprint_error(": hardware not ready\n");
429 return;
430 }
431
432 /* Read MAC address, channels, etc from EEPROM. */
433 if ((error = iwn_read_eeprom(sc)) != 0) {
434 aprint_error(": could not read EEPROM\n");
435 return;
436 }
437
438 /* Allocate DMA memory for firmware transfers. */
439 if ((error = iwn_alloc_fwmem(sc)) != 0) {
440 aprint_error(": could not allocate memory for firmware\n");
441 return;
442 }
443
444 /* Allocate "Keep Warm" page. */
445 if ((error = iwn_alloc_kw(sc)) != 0) {
446 aprint_error(": could not allocate keep warm page\n");
447 goto fail1;
448 }
449
450 /* Allocate ICT table for 5000 Series. */
451 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
452 (error = iwn_alloc_ict(sc)) != 0) {
453 aprint_error(": could not allocate ICT table\n");
454 goto fail2;
455 }
456
457 /* Allocate TX scheduler "rings". */
458 if ((error = iwn_alloc_sched(sc)) != 0) {
459 aprint_error(": could not allocate TX scheduler rings\n");
460 goto fail3;
461 }
462
463 #ifdef IWN_USE_RBUF
464 /* Allocate RX buffers. */
465 if ((error = iwn_alloc_rpool(sc)) != 0) {
466 aprint_error_dev(self, "could not allocate RX buffers\n");
467 goto fail3;
468 }
469 #endif
470
471 /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */
472 for (i = 0; i < sc->ntxqs; i++) {
473 if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) {
474 aprint_error(": could not allocate TX ring %d\n", i);
475 goto fail4;
476 }
477 }
478
479 /* Allocate RX ring. */
480 if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) {
481 aprint_error(": could not allocate RX ring\n");
482 goto fail4;
483 }
484
485 /* Clear pending interrupts. */
486 IWN_WRITE(sc, IWN_INT, 0xffffffff);
487
488 /* Count the number of available chains. */
489 sc->ntxchains =
490 ((sc->txchainmask >> 2) & 1) +
491 ((sc->txchainmask >> 1) & 1) +
492 ((sc->txchainmask >> 0) & 1);
493 sc->nrxchains =
494 ((sc->rxchainmask >> 2) & 1) +
495 ((sc->rxchainmask >> 1) & 1) +
496 ((sc->rxchainmask >> 0) & 1);
497 aprint_normal_dev(self, "MIMO %dT%dR, %.4s, address %s\n",
498 sc->ntxchains, sc->nrxchains, sc->eeprom_domain,
499 ether_sprintf(ic->ic_myaddr));
500
501 ic->ic_ifp = ifp;
502 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
503 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
504 ic->ic_state = IEEE80211_S_INIT;
505
506 /* Set device capabilities. */
507 /* XXX OpenBSD has IEEE80211_C_WEP, IEEE80211_C_RSN,
508 * and IEEE80211_C_PMGT too. */
509 ic->ic_caps =
510 IEEE80211_C_IBSS | /* IBSS mode support */
511 IEEE80211_C_WPA | /* 802.11i */
512 IEEE80211_C_MONITOR | /* monitor mode supported */
513 IEEE80211_C_TXPMGT | /* tx power management */
514 IEEE80211_C_SHSLOT | /* short slot time supported */
515 IEEE80211_C_SHPREAMBLE | /* short preamble supported */
516 IEEE80211_C_WME; /* 802.11e */
517
518 #ifndef IEEE80211_NO_HT
519 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
520 /* Set HT capabilities. */
521 ic->ic_htcaps =
522 #if IWN_RBUF_SIZE == 8192
523 IEEE80211_HTCAP_AMSDU7935 |
524 #endif
525 IEEE80211_HTCAP_CBW20_40 |
526 IEEE80211_HTCAP_SGI20 |
527 IEEE80211_HTCAP_SGI40;
528 if (sc->hw_type != IWN_HW_REV_TYPE_4965)
529 ic->ic_htcaps |= IEEE80211_HTCAP_GF;
530 if (sc->hw_type == IWN_HW_REV_TYPE_6050)
531 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DYN;
532 else
533 ic->ic_htcaps |= IEEE80211_HTCAP_SMPS_DIS;
534 }
535 #endif /* !IEEE80211_NO_HT */
536
537 /* Set supported legacy rates. */
538 ic->ic_sup_rates[IEEE80211_MODE_11B] = iwn_rateset_11b;
539 ic->ic_sup_rates[IEEE80211_MODE_11G] = iwn_rateset_11g;
540 if (sc->sc_flags & IWN_FLAG_HAS_5GHZ) {
541 ic->ic_sup_rates[IEEE80211_MODE_11A] = iwn_rateset_11a;
542 }
543 #ifndef IEEE80211_NO_HT
544 if (sc->sc_flags & IWN_FLAG_HAS_11N) {
545 /* Set supported HT rates. */
546 ic->ic_sup_mcs[0] = 0xff; /* MCS 0-7 */
547 if (sc->nrxchains > 1)
548 ic->ic_sup_mcs[1] = 0xff; /* MCS 7-15 */
549 if (sc->nrxchains > 2)
550 ic->ic_sup_mcs[2] = 0xff; /* MCS 16-23 */
551 }
552 #endif
553
554 /* IBSS channel undefined for now. */
555 ic->ic_ibss_chan = &ic->ic_channels[0];
556
557 ifp->if_softc = sc;
558 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
559 ifp->if_init = iwn_init;
560 ifp->if_ioctl = iwn_ioctl;
561 ifp->if_start = iwn_start;
562 ifp->if_stop = iwn_stop;
563 ifp->if_watchdog = iwn_watchdog;
564 IFQ_SET_READY(&ifp->if_snd);
565 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ);
566
567 if_attach(ifp);
568 ieee80211_ifattach(ic);
569 ic->ic_node_alloc = iwn_node_alloc;
570 ic->ic_newassoc = iwn_newassoc;
571 #ifdef IWN_HWCRYPTO
572 ic->ic_crypto.cs_key_set = iwn_set_key;
573 ic->ic_crypto.cs_key_delete = iwn_delete_key;
574 #endif
575 ic->ic_wme.wme_update = iwn_wme_update;
576 #ifndef IEEE80211_NO_HT
577 ic->ic_ampdu_rx_start = iwn_ampdu_rx_start;
578 ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop;
579 ic->ic_ampdu_tx_start = iwn_ampdu_tx_start;
580 ic->ic_ampdu_tx_stop = iwn_ampdu_tx_stop;
581 #endif
582
583 /* Override 802.11 state transition machine. */
584 sc->sc_newstate = ic->ic_newstate;
585 ic->ic_newstate = iwn_newstate;
586 ieee80211_media_init(ic, iwn_media_change, ieee80211_media_status);
587
588 sc->amrr.amrr_min_success_threshold = 1;
589 sc->amrr.amrr_max_success_threshold = 15;
590
591 iwn_radiotap_attach(sc);
592
593 /*
594 * XXX for NetBSD, OpenBSD timeout_set replaced by
595 * callout_init and callout_setfunc, above.
596 */
597
598 if (pmf_device_register(self, NULL, iwn_resume))
599 pmf_class_network_register(self, ifp);
600 else
601 aprint_error_dev(self, "couldn't establish power handler\n");
602
603 /* XXX NetBSD add call to ieee80211_announce for dmesg. */
604 ieee80211_announce(ic);
605
606 return;
607
608 /* Free allocated memory if something failed during attachment. */
609 fail4: while (--i >= 0)
610 iwn_free_tx_ring(sc, &sc->txq[i]);
611 #ifdef IWN_USE_RBUF
612 iwn_free_rpool(sc);
613 #endif
614 iwn_free_sched(sc);
615 fail3: if (sc->ict != NULL)
616 iwn_free_ict(sc);
617 fail2: iwn_free_kw(sc);
618 fail1: iwn_free_fwmem(sc);
619 }
620
621 int
622 iwn4965_attach(struct iwn_softc *sc, pci_product_id_t pid)
623 {
624 struct iwn_ops *ops = &sc->ops;
625
626 ops->load_firmware = iwn4965_load_firmware;
627 ops->read_eeprom = iwn4965_read_eeprom;
628 ops->post_alive = iwn4965_post_alive;
629 ops->nic_config = iwn4965_nic_config;
630 ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
631 ops->update_sched = iwn4965_update_sched;
632 ops->get_temperature = iwn4965_get_temperature;
633 ops->get_rssi = iwn4965_get_rssi;
634 ops->set_txpower = iwn4965_set_txpower;
635 ops->init_gains = iwn4965_init_gains;
636 ops->set_gains = iwn4965_set_gains;
637 ops->add_node = iwn4965_add_node;
638 ops->tx_done = iwn4965_tx_done;
639 #ifndef IEEE80211_NO_HT
640 ops->ampdu_tx_start = iwn4965_ampdu_tx_start;
641 ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop;
642 #endif
643 sc->ntxqs = IWN4965_NTXQUEUES;
644 sc->ndmachnls = IWN4965_NDMACHNLS;
645 sc->broadcast_id = IWN4965_ID_BROADCAST;
646 sc->rxonsz = IWN4965_RXONSZ;
647 sc->schedsz = IWN4965_SCHEDSZ;
648 sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ;
649 sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ;
650 sc->fwsz = IWN4965_FWSZ;
651 sc->sched_txfact_addr = IWN4965_SCHED_TXFACT;
652 sc->limits = &iwn4965_sensitivity_limits;
653 sc->fwname = "iwlwifi-4965-2.ucode";
654 /* Override chains masks, ROM is known to be broken. */
655 sc->txchainmask = IWN_ANT_AB;
656 sc->rxchainmask = IWN_ANT_ABC;
657
658 return 0;
659 }
660
661 int
662 iwn5000_attach(struct iwn_softc *sc, pci_product_id_t pid)
663 {
664 struct iwn_ops *ops = &sc->ops;
665
666 ops->load_firmware = iwn5000_load_firmware;
667 ops->read_eeprom = iwn5000_read_eeprom;
668 ops->post_alive = iwn5000_post_alive;
669 ops->nic_config = iwn5000_nic_config;
670 ops->config_bt_coex = iwn_config_bt_coex_bluetooth;
671 ops->update_sched = iwn5000_update_sched;
672 ops->get_temperature = iwn5000_get_temperature;
673 ops->get_rssi = iwn5000_get_rssi;
674 ops->set_txpower = iwn5000_set_txpower;
675 ops->init_gains = iwn5000_init_gains;
676 ops->set_gains = iwn5000_set_gains;
677 ops->add_node = iwn5000_add_node;
678 ops->tx_done = iwn5000_tx_done;
679 #ifndef IEEE80211_NO_HT
680 ops->ampdu_tx_start = iwn5000_ampdu_tx_start;
681 ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop;
682 #endif
683 sc->ntxqs = IWN5000_NTXQUEUES;
684 sc->ndmachnls = IWN5000_NDMACHNLS;
685 sc->broadcast_id = IWN5000_ID_BROADCAST;
686 sc->rxonsz = IWN5000_RXONSZ;
687 sc->schedsz = IWN5000_SCHEDSZ;
688 sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ;
689 sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ;
690 sc->fwsz = IWN5000_FWSZ;
691 sc->sched_txfact_addr = IWN5000_SCHED_TXFACT;
692
693 switch (sc->hw_type) {
694 case IWN_HW_REV_TYPE_5100:
695 sc->limits = &iwn5000_sensitivity_limits;
696 sc->fwname = "iwlwifi-5000-2.ucode";
697 /* Override chains masks, ROM is known to be broken. */
698 sc->txchainmask = IWN_ANT_B;
699 sc->rxchainmask = IWN_ANT_AB;
700 break;
701 case IWN_HW_REV_TYPE_5150:
702 sc->limits = &iwn5150_sensitivity_limits;
703 sc->fwname = "iwlwifi-5150-2.ucode";
704 break;
705 case IWN_HW_REV_TYPE_5300:
706 case IWN_HW_REV_TYPE_5350:
707 sc->limits = &iwn5000_sensitivity_limits;
708 sc->fwname = "iwlwifi-5000-2.ucode";
709 break;
710 case IWN_HW_REV_TYPE_1000:
711 sc->limits = &iwn1000_sensitivity_limits;
712 sc->fwname = "iwlwifi-1000-3.ucode";
713 break;
714 case IWN_HW_REV_TYPE_6000:
715 sc->limits = &iwn6000_sensitivity_limits;
716 sc->fwname = "iwlwifi-6000-4.ucode";
717 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_1 ||
718 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6000_IPA_2) {
719 sc->sc_flags |= IWN_FLAG_INTERNAL_PA;
720 /* Override chains masks, ROM is known to be broken. */
721 sc->txchainmask = IWN_ANT_BC;
722 sc->rxchainmask = IWN_ANT_BC;
723 }
724 break;
725 case IWN_HW_REV_TYPE_6050:
726 sc->limits = &iwn6000_sensitivity_limits;
727 sc->fwname = "iwlwifi-6050-5.ucode";
728 break;
729 case IWN_HW_REV_TYPE_6005:
730 sc->limits = &iwn6000_sensitivity_limits;
731 /* Type 6030 cards return IWN_HW_REV_TYPE_6005 */
732 if (pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_1 ||
733 pid == PCI_PRODUCT_INTEL_WIFI_LINK_1030_2 ||
734 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_1 ||
735 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6230_2 ||
736 pid == PCI_PRODUCT_INTEL_WIFI_LINK_6235) {
737 sc->fwname = "iwlwifi-6000g2b-6.ucode";
738 ops->config_bt_coex = iwn_config_bt_coex_adv1;
739 }
740 else
741 sc->fwname = "iwlwifi-6000g2a-5.ucode";
742 break;
743 default:
744 aprint_normal(": adapter type %d not supported\n", sc->hw_type);
745 return ENOTSUP;
746 }
747 return 0;
748 }
749
750 /*
751 * Attach the interface to 802.11 radiotap.
752 */
753 static void
754 iwn_radiotap_attach(struct iwn_softc *sc)
755 {
756 struct ifnet *ifp = sc->sc_ic.ic_ifp;
757
758 bpf_attach2(ifp, DLT_IEEE802_11_RADIO,
759 sizeof (struct ieee80211_frame) + IEEE80211_RADIOTAP_HDRLEN,
760 &sc->sc_drvbpf);
761
762 sc->sc_rxtap_len = sizeof sc->sc_rxtapu;
763 sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len);
764 sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT);
765
766 sc->sc_txtap_len = sizeof sc->sc_txtapu;
767 sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len);
768 sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT);
769 }
770
771 static int
772 iwn_detach(device_t self, int flags __unused)
773 {
774 struct iwn_softc *sc = device_private(self);
775 struct ifnet *ifp = sc->sc_ic.ic_ifp;
776 int qid;
777
778 callout_stop(&sc->calib_to);
779
780 /* Uninstall interrupt handler. */
781 if (sc->sc_ih != NULL)
782 pci_intr_disestablish(sc->sc_pct, sc->sc_ih);
783
784 /* Free DMA resources. */
785 iwn_free_rx_ring(sc, &sc->rxq);
786 for (qid = 0; qid < sc->ntxqs; qid++)
787 iwn_free_tx_ring(sc, &sc->txq[qid]);
788 #ifdef IWN_USE_RBUF
789 iwn_free_rpool(sc);
790 #endif
791 iwn_free_sched(sc);
792 iwn_free_kw(sc);
793 if (sc->ict != NULL)
794 iwn_free_ict(sc);
795 iwn_free_fwmem(sc);
796
797 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz);
798
799 ieee80211_ifdetach(&sc->sc_ic);
800 if_detach(ifp);
801
802 return 0;
803 }
804
805 #if 0
806 /*
807 * XXX Investigate if clearing the PCI retry timeout could eliminate
808 * the repeated scan calls. Also the calls to if_init and if_start
809 * are similar to the effect of adding the call to ifioctl_common .
810 */
811 static void
812 iwn_power(int why, void *arg)
813 {
814 struct iwn_softc *sc = arg;
815 struct ifnet *ifp;
816 pcireg_t reg;
817 int s;
818
819 if (why != PWR_RESUME)
820 return;
821
822 /* Clear device-specific "PCI retry timeout" register (41h). */
823 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag, 0x40);
824 if (reg & 0xff00)
825 pci_conf_write(sc->sc_pct, sc->sc_pcitag, 0x40, reg & ~0xff00);
826
827 s = splnet();
828 ifp = &sc->sc_ic.ic_if;
829 if (ifp->if_flags & IFF_UP) {
830 ifp->if_init(ifp);
831 if (ifp->if_flags & IFF_RUNNING)
832 ifp->if_start(ifp);
833 }
834 splx(s);
835 }
836 #endif
837
838 static bool
839 iwn_resume(device_t dv, const pmf_qual_t *qual)
840 {
841 return true;
842 }
843
844 static int
845 iwn_nic_lock(struct iwn_softc *sc)
846 {
847 int ntries;
848
849 /* Request exclusive access to NIC. */
850 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
851
852 /* Spin until we actually get the lock. */
853 for (ntries = 0; ntries < 1000; ntries++) {
854 if ((IWN_READ(sc, IWN_GP_CNTRL) &
855 (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) ==
856 IWN_GP_CNTRL_MAC_ACCESS_ENA)
857 return 0;
858 DELAY(10);
859 }
860 return ETIMEDOUT;
861 }
862
863 static __inline void
864 iwn_nic_unlock(struct iwn_softc *sc)
865 {
866 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ);
867 }
868
869 static __inline uint32_t
870 iwn_prph_read(struct iwn_softc *sc, uint32_t addr)
871 {
872 IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr);
873 IWN_BARRIER_READ_WRITE(sc);
874 return IWN_READ(sc, IWN_PRPH_RDATA);
875 }
876
877 static __inline void
878 iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
879 {
880 IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr);
881 IWN_BARRIER_WRITE(sc);
882 IWN_WRITE(sc, IWN_PRPH_WDATA, data);
883 }
884
885 static __inline void
886 iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
887 {
888 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask);
889 }
890
891 static __inline void
892 iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask)
893 {
894 iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask);
895 }
896
897 static __inline void
898 iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr,
899 const uint32_t *data, int count)
900 {
901 for (; count > 0; count--, data++, addr += 4)
902 iwn_prph_write(sc, addr, *data);
903 }
904
905 static __inline uint32_t
906 iwn_mem_read(struct iwn_softc *sc, uint32_t addr)
907 {
908 IWN_WRITE(sc, IWN_MEM_RADDR, addr);
909 IWN_BARRIER_READ_WRITE(sc);
910 return IWN_READ(sc, IWN_MEM_RDATA);
911 }
912
913 static __inline void
914 iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data)
915 {
916 IWN_WRITE(sc, IWN_MEM_WADDR, addr);
917 IWN_BARRIER_WRITE(sc);
918 IWN_WRITE(sc, IWN_MEM_WDATA, data);
919 }
920
921 #ifndef IEEE80211_NO_HT
922 static __inline void
923 iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data)
924 {
925 uint32_t tmp;
926
927 tmp = iwn_mem_read(sc, addr & ~3);
928 if (addr & 3)
929 tmp = (tmp & 0x0000ffff) | data << 16;
930 else
931 tmp = (tmp & 0xffff0000) | data;
932 iwn_mem_write(sc, addr & ~3, tmp);
933 }
934 #endif
935
936 static __inline void
937 iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data,
938 int count)
939 {
940 for (; count > 0; count--, addr += 4)
941 *data++ = iwn_mem_read(sc, addr);
942 }
943
944 static __inline void
945 iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val,
946 int count)
947 {
948 for (; count > 0; count--, addr += 4)
949 iwn_mem_write(sc, addr, val);
950 }
951
952 static int
953 iwn_eeprom_lock(struct iwn_softc *sc)
954 {
955 int i, ntries;
956
957 for (i = 0; i < 100; i++) {
958 /* Request exclusive access to EEPROM. */
959 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
960 IWN_HW_IF_CONFIG_EEPROM_LOCKED);
961
962 /* Spin until we actually get the lock. */
963 for (ntries = 0; ntries < 100; ntries++) {
964 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
965 IWN_HW_IF_CONFIG_EEPROM_LOCKED)
966 return 0;
967 DELAY(10);
968 }
969 }
970 return ETIMEDOUT;
971 }
972
973 static __inline void
974 iwn_eeprom_unlock(struct iwn_softc *sc)
975 {
976 IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED);
977 }
978
979 /*
980 * Initialize access by host to One Time Programmable ROM.
981 * NB: This kind of ROM can be found on 1000 or 6000 Series only.
982 */
983 static int
984 iwn_init_otprom(struct iwn_softc *sc)
985 {
986 uint16_t prev = 0, base, next;
987 int count, error;
988
989 /* Wait for clock stabilization before accessing prph. */
990 if ((error = iwn_clock_wait(sc)) != 0)
991 return error;
992
993 if ((error = iwn_nic_lock(sc)) != 0)
994 return error;
995 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
996 DELAY(5);
997 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ);
998 iwn_nic_unlock(sc);
999
1000 /* Set auto clock gate disable bit for HW with OTP shadow RAM. */
1001 if (sc->hw_type != IWN_HW_REV_TYPE_1000) {
1002 IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT,
1003 IWN_RESET_LINK_PWR_MGMT_DIS);
1004 }
1005 IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER);
1006 /* Clear ECC status. */
1007 IWN_SETBITS(sc, IWN_OTP_GP,
1008 IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS);
1009
1010 /*
1011 * Find the block before last block (contains the EEPROM image)
1012 * for HW without OTP shadow RAM.
1013 */
1014 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
1015 /* Switch to absolute addressing mode. */
1016 IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS);
1017 base = 0;
1018 for (count = 0; count < IWN1000_OTP_NBLOCKS; count++) {
1019 error = iwn_read_prom_data(sc, base, &next, 2);
1020 if (error != 0)
1021 return error;
1022 if (next == 0) /* End of linked-list. */
1023 break;
1024 prev = base;
1025 base = le16toh(next);
1026 }
1027 if (count == 0 || count == IWN1000_OTP_NBLOCKS)
1028 return EIO;
1029 /* Skip "next" word. */
1030 sc->prom_base = prev + 1;
1031 }
1032 return 0;
1033 }
1034
1035 static int
1036 iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count)
1037 {
1038 uint8_t *out = data;
1039 uint32_t val, tmp;
1040 int ntries;
1041
1042 addr += sc->prom_base;
1043 for (; count > 0; count -= 2, addr++) {
1044 IWN_WRITE(sc, IWN_EEPROM, addr << 2);
1045 for (ntries = 0; ntries < 10; ntries++) {
1046 val = IWN_READ(sc, IWN_EEPROM);
1047 if (val & IWN_EEPROM_READ_VALID)
1048 break;
1049 DELAY(5);
1050 }
1051 if (ntries == 10) {
1052 aprint_error_dev(sc->sc_dev,
1053 "timeout reading ROM at 0x%x\n", addr);
1054 return ETIMEDOUT;
1055 }
1056 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1057 /* OTPROM, check for ECC errors. */
1058 tmp = IWN_READ(sc, IWN_OTP_GP);
1059 if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) {
1060 aprint_error_dev(sc->sc_dev,
1061 "OTPROM ECC error at 0x%x\n", addr);
1062 return EIO;
1063 }
1064 if (tmp & IWN_OTP_GP_ECC_CORR_STTS) {
1065 /* Correctable ECC error, clear bit. */
1066 IWN_SETBITS(sc, IWN_OTP_GP,
1067 IWN_OTP_GP_ECC_CORR_STTS);
1068 }
1069 }
1070 *out++ = val >> 16;
1071 if (count > 1)
1072 *out++ = val >> 24;
1073 }
1074 return 0;
1075 }
1076
1077 static int
1078 iwn_dma_contig_alloc(bus_dma_tag_t tag, struct iwn_dma_info *dma, void **kvap,
1079 bus_size_t size, bus_size_t alignment)
1080 {
1081 int nsegs, error;
1082
1083 dma->tag = tag;
1084 dma->size = size;
1085
1086 error = bus_dmamap_create(tag, size, 1, size, 0, BUS_DMA_NOWAIT,
1087 &dma->map);
1088 if (error != 0)
1089 goto fail;
1090
1091 error = bus_dmamem_alloc(tag, size, alignment, 0, &dma->seg, 1, &nsegs,
1092 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_ZERO */
1093 if (error != 0)
1094 goto fail;
1095
1096 error = bus_dmamem_map(tag, &dma->seg, 1, size, &dma->vaddr,
1097 BUS_DMA_NOWAIT); /* XXX OpenBSD adds BUS_DMA_COHERENT */
1098 if (error != 0)
1099 goto fail;
1100
1101 error = bus_dmamap_load(tag, dma->map, dma->vaddr, size, NULL,
1102 BUS_DMA_NOWAIT);
1103 if (error != 0)
1104 goto fail;
1105
1106 /* XXX Presumably needed because of missing BUS_DMA_ZERO, above. */
1107 memset(dma->vaddr, 0, size);
1108 bus_dmamap_sync(tag, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
1109
1110 dma->paddr = dma->map->dm_segs[0].ds_addr;
1111 if (kvap != NULL)
1112 *kvap = dma->vaddr;
1113
1114 return 0;
1115
1116 fail: iwn_dma_contig_free(dma);
1117 return error;
1118 }
1119
1120 static void
1121 iwn_dma_contig_free(struct iwn_dma_info *dma)
1122 {
1123 if (dma->map != NULL) {
1124 if (dma->vaddr != NULL) {
1125 bus_dmamap_sync(dma->tag, dma->map, 0, dma->size,
1126 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1127 bus_dmamap_unload(dma->tag, dma->map);
1128 bus_dmamem_unmap(dma->tag, dma->vaddr, dma->size);
1129 bus_dmamem_free(dma->tag, &dma->seg, 1);
1130 dma->vaddr = NULL;
1131 }
1132 bus_dmamap_destroy(dma->tag, dma->map);
1133 dma->map = NULL;
1134 }
1135 }
1136
1137 static int
1138 iwn_alloc_sched(struct iwn_softc *sc)
1139 {
1140 /* TX scheduler rings must be aligned on a 1KB boundary. */
1141 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
1142 (void **)&sc->sched, sc->schedsz, 1024);
1143 }
1144
1145 static void
1146 iwn_free_sched(struct iwn_softc *sc)
1147 {
1148 iwn_dma_contig_free(&sc->sched_dma);
1149 }
1150
1151 static int
1152 iwn_alloc_kw(struct iwn_softc *sc)
1153 {
1154 /* "Keep Warm" page must be aligned on a 4KB boundary. */
1155 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, NULL, 4096,
1156 4096);
1157 }
1158
1159 static void
1160 iwn_free_kw(struct iwn_softc *sc)
1161 {
1162 iwn_dma_contig_free(&sc->kw_dma);
1163 }
1164
1165 static int
1166 iwn_alloc_ict(struct iwn_softc *sc)
1167 {
1168 /* ICT table must be aligned on a 4KB boundary. */
1169 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
1170 (void **)&sc->ict, IWN_ICT_SIZE, 4096);
1171 }
1172
1173 static void
1174 iwn_free_ict(struct iwn_softc *sc)
1175 {
1176 iwn_dma_contig_free(&sc->ict_dma);
1177 }
1178
1179 static int
1180 iwn_alloc_fwmem(struct iwn_softc *sc)
1181 {
1182 /* Must be aligned on a 16-byte boundary. */
1183 return iwn_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma, NULL,
1184 sc->fwsz, 16);
1185 }
1186
1187 static void
1188 iwn_free_fwmem(struct iwn_softc *sc)
1189 {
1190 iwn_dma_contig_free(&sc->fw_dma);
1191 }
1192
1193 static int
1194 iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1195 {
1196 bus_size_t size;
1197 int i, error;
1198
1199 ring->cur = 0;
1200
1201 /* Allocate RX descriptors (256-byte aligned). */
1202 size = IWN_RX_RING_COUNT * sizeof (uint32_t);
1203 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1204 (void **)&ring->desc, size, 256);
1205 if (error != 0) {
1206 aprint_error_dev(sc->sc_dev,
1207 "could not allocate RX ring DMA memory\n");
1208 goto fail;
1209 }
1210
1211 /* Allocate RX status area (16-byte aligned). */
1212 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
1213 (void **)&ring->stat, sizeof (struct iwn_rx_status), 16);
1214 if (error != 0) {
1215 aprint_error_dev(sc->sc_dev,
1216 "could not allocate RX status DMA memory\n");
1217 goto fail;
1218 }
1219
1220 /*
1221 * Allocate and map RX buffers.
1222 */
1223 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1224 struct iwn_rx_data *data = &ring->data[i];
1225
1226 error = bus_dmamap_create(sc->sc_dmat, IWN_RBUF_SIZE, 1,
1227 IWN_RBUF_SIZE, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1228 &data->map);
1229 if (error != 0) {
1230 aprint_error_dev(sc->sc_dev,
1231 "could not create RX buf DMA map\n");
1232 goto fail;
1233 }
1234
1235 data->m = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
1236 if (data->m == NULL) {
1237 aprint_error_dev(sc->sc_dev,
1238 "could not allocate RX mbuf\n");
1239 error = ENOBUFS;
1240 goto fail;
1241 }
1242
1243 error = bus_dmamap_load(sc->sc_dmat, data->map,
1244 mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
1245 BUS_DMA_NOWAIT | BUS_DMA_READ);
1246 if (error != 0) {
1247 aprint_error_dev(sc->sc_dev,
1248 "can't not map mbuf (error %d)\n", error);
1249 goto fail;
1250 }
1251
1252 /* Set physical address of RX buffer (256-byte aligned). */
1253 ring->desc[i] = htole32(data->map->dm_segs[0].ds_addr >> 8);
1254 }
1255
1256 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0, size,
1257 BUS_DMASYNC_PREWRITE);
1258
1259 return 0;
1260
1261 fail: iwn_free_rx_ring(sc, ring);
1262 return error;
1263 }
1264
1265 static void
1266 iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1267 {
1268 int ntries;
1269
1270 if (iwn_nic_lock(sc) == 0) {
1271 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
1272 for (ntries = 0; ntries < 1000; ntries++) {
1273 if (IWN_READ(sc, IWN_FH_RX_STATUS) &
1274 IWN_FH_RX_STATUS_IDLE)
1275 break;
1276 DELAY(10);
1277 }
1278 iwn_nic_unlock(sc);
1279 }
1280 ring->cur = 0;
1281 sc->last_rx_valid = 0;
1282 }
1283
1284 static void
1285 iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring)
1286 {
1287 int i;
1288
1289 iwn_dma_contig_free(&ring->desc_dma);
1290 iwn_dma_contig_free(&ring->stat_dma);
1291
1292 for (i = 0; i < IWN_RX_RING_COUNT; i++) {
1293 struct iwn_rx_data *data = &ring->data[i];
1294
1295 if (data->m != NULL) {
1296 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1297 data->map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1298 bus_dmamap_unload(sc->sc_dmat, data->map);
1299 m_freem(data->m);
1300 }
1301 if (data->map != NULL)
1302 bus_dmamap_destroy(sc->sc_dmat, data->map);
1303 }
1304 }
1305
1306 static int
1307 iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid)
1308 {
1309 bus_addr_t paddr;
1310 bus_size_t size;
1311 int i, error;
1312
1313 ring->qid = qid;
1314 ring->queued = 0;
1315 ring->cur = 0;
1316
1317 /* Allocate TX descriptors (256-byte aligned). */
1318 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc);
1319 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma,
1320 (void **)&ring->desc, size, 256);
1321 if (error != 0) {
1322 aprint_error_dev(sc->sc_dev,
1323 "could not allocate TX ring DMA memory\n");
1324 goto fail;
1325 }
1326 /*
1327 * We only use rings 0 through 4 (4 EDCA + cmd) so there is no need
1328 * to allocate commands space for other rings.
1329 * XXX Do we really need to allocate descriptors for other rings?
1330 */
1331 if (qid > 4)
1332 return 0;
1333
1334 size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd);
1335 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma,
1336 (void **)&ring->cmd, size, 4);
1337 if (error != 0) {
1338 aprint_error_dev(sc->sc_dev,
1339 "could not allocate TX cmd DMA memory\n");
1340 goto fail;
1341 }
1342
1343 paddr = ring->cmd_dma.paddr;
1344 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1345 struct iwn_tx_data *data = &ring->data[i];
1346
1347 data->cmd_paddr = paddr;
1348 data->scratch_paddr = paddr + 12;
1349 paddr += sizeof (struct iwn_tx_cmd);
1350
1351 error = bus_dmamap_create(sc->sc_dmat, MCLBYTES,
1352 IWN_MAX_SCATTER - 1, MCLBYTES, 0, BUS_DMA_NOWAIT,
1353 &data->map);
1354 if (error != 0) {
1355 aprint_error_dev(sc->sc_dev,
1356 "could not create TX buf DMA map\n");
1357 goto fail;
1358 }
1359 }
1360 return 0;
1361
1362 fail: iwn_free_tx_ring(sc, ring);
1363 return error;
1364 }
1365
1366 static void
1367 iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1368 {
1369 int i;
1370
1371 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1372 struct iwn_tx_data *data = &ring->data[i];
1373
1374 if (data->m != NULL) {
1375 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1376 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1377 bus_dmamap_unload(sc->sc_dmat, data->map);
1378 m_freem(data->m);
1379 data->m = NULL;
1380 }
1381 }
1382 /* Clear TX descriptors. */
1383 memset(ring->desc, 0, ring->desc_dma.size);
1384 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map, 0,
1385 ring->desc_dma.size, BUS_DMASYNC_PREWRITE);
1386 sc->qfullmsk &= ~(1 << ring->qid);
1387 ring->queued = 0;
1388 ring->cur = 0;
1389 }
1390
1391 static void
1392 iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring)
1393 {
1394 int i;
1395
1396 iwn_dma_contig_free(&ring->desc_dma);
1397 iwn_dma_contig_free(&ring->cmd_dma);
1398
1399 for (i = 0; i < IWN_TX_RING_COUNT; i++) {
1400 struct iwn_tx_data *data = &ring->data[i];
1401
1402 if (data->m != NULL) {
1403 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
1404 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1405 bus_dmamap_unload(sc->sc_dmat, data->map);
1406 m_freem(data->m);
1407 }
1408 if (data->map != NULL)
1409 bus_dmamap_destroy(sc->sc_dmat, data->map);
1410 }
1411 }
1412
1413 static void
1414 iwn5000_ict_reset(struct iwn_softc *sc)
1415 {
1416 /* Disable interrupts. */
1417 IWN_WRITE(sc, IWN_INT_MASK, 0);
1418
1419 /* Reset ICT table. */
1420 memset(sc->ict, 0, IWN_ICT_SIZE);
1421 sc->ict_cur = 0;
1422
1423 /* Set physical address of ICT table (4KB aligned). */
1424 DPRINTF(("enabling ICT\n"));
1425 IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE |
1426 IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12);
1427
1428 /* Enable periodic RX interrupt. */
1429 sc->int_mask |= IWN_INT_RX_PERIODIC;
1430 /* Switch to ICT interrupt mode in driver. */
1431 sc->sc_flags |= IWN_FLAG_USE_ICT;
1432
1433 /* Re-enable interrupts. */
1434 IWN_WRITE(sc, IWN_INT, 0xffffffff);
1435 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
1436 }
1437
1438 static int
1439 iwn_read_eeprom(struct iwn_softc *sc)
1440 {
1441 struct iwn_ops *ops = &sc->ops;
1442 struct ieee80211com *ic = &sc->sc_ic;
1443 uint16_t val;
1444 int error;
1445
1446 /* Check whether adapter has an EEPROM or an OTPROM. */
1447 if (sc->hw_type >= IWN_HW_REV_TYPE_1000 &&
1448 (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP))
1449 sc->sc_flags |= IWN_FLAG_HAS_OTPROM;
1450 DPRINTF(("%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ?
1451 "OTPROM" : "EEPROM"));
1452
1453 /* Adapter has to be powered on for EEPROM access to work. */
1454 if ((error = iwn_apm_init(sc)) != 0) {
1455 aprint_error_dev(sc->sc_dev,
1456 "could not power ON adapter\n");
1457 return error;
1458 }
1459
1460 if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) {
1461 aprint_error_dev(sc->sc_dev,
1462 "bad ROM signature\n");
1463 return EIO;
1464 }
1465 if ((error = iwn_eeprom_lock(sc)) != 0) {
1466 aprint_error_dev(sc->sc_dev,
1467 "could not lock ROM (error=%d)\n", error);
1468 return error;
1469 }
1470 if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) {
1471 if ((error = iwn_init_otprom(sc)) != 0) {
1472 aprint_error_dev(sc->sc_dev,
1473 "could not initialize OTPROM\n");
1474 return error;
1475 }
1476 }
1477
1478 iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2);
1479 DPRINTF(("SKU capabilities=0x%04x\n", le16toh(val)));
1480 /* Check if HT support is bonded out. */
1481 if (val & htole16(IWN_EEPROM_SKU_CAP_11N))
1482 sc->sc_flags |= IWN_FLAG_HAS_11N;
1483
1484 iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2);
1485 sc->rfcfg = le16toh(val);
1486 DPRINTF(("radio config=0x%04x\n", sc->rfcfg));
1487 /* Read Tx/Rx chains from ROM unless it's known to be broken. */
1488 if (sc->txchainmask == 0)
1489 sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg);
1490 if (sc->rxchainmask == 0)
1491 sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg);
1492
1493 /* Read MAC address. */
1494 iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6);
1495
1496 /* Read adapter-specific information from EEPROM. */
1497 ops->read_eeprom(sc);
1498
1499 iwn_apm_stop(sc); /* Power OFF adapter. */
1500
1501 iwn_eeprom_unlock(sc);
1502 return 0;
1503 }
1504
1505 static void
1506 iwn4965_read_eeprom(struct iwn_softc *sc)
1507 {
1508 uint32_t addr;
1509 uint16_t val;
1510 int i;
1511
1512 /* Read regulatory domain (4 ASCII characters). */
1513 iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4);
1514
1515 /* Read the list of authorized channels (20MHz ones only). */
1516 for (i = 0; i < 5; i++) {
1517 addr = iwn4965_regulatory_bands[i];
1518 iwn_read_eeprom_channels(sc, i, addr);
1519 }
1520
1521 /* Read maximum allowed TX power for 2GHz and 5GHz bands. */
1522 iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2);
1523 sc->maxpwr2GHz = val & 0xff;
1524 sc->maxpwr5GHz = val >> 8;
1525 /* Check that EEPROM values are within valid range. */
1526 if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50)
1527 sc->maxpwr5GHz = 38;
1528 if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50)
1529 sc->maxpwr2GHz = 38;
1530 DPRINTF(("maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz));
1531
1532 /* Read samples for each TX power group. */
1533 iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands,
1534 sizeof sc->bands);
1535
1536 /* Read voltage at which samples were taken. */
1537 iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2);
1538 sc->eeprom_voltage = (int16_t)le16toh(val);
1539 DPRINTF(("voltage=%d (in 0.3V)\n", sc->eeprom_voltage));
1540
1541 #ifdef IWN_DEBUG
1542 /* Print samples. */
1543 if (iwn_debug > 0) {
1544 for (i = 0; i < IWN_NBANDS; i++)
1545 iwn4965_print_power_group(sc, i);
1546 }
1547 #endif
1548 }
1549
1550 #ifdef IWN_DEBUG
1551 static void
1552 iwn4965_print_power_group(struct iwn_softc *sc, int i)
1553 {
1554 struct iwn4965_eeprom_band *band = &sc->bands[i];
1555 struct iwn4965_eeprom_chan_samples *chans = band->chans;
1556 int j, c;
1557
1558 aprint_normal("===band %d===\n", i);
1559 aprint_normal("chan lo=%d, chan hi=%d\n", band->lo, band->hi);
1560 aprint_normal("chan1 num=%d\n", chans[0].num);
1561 for (c = 0; c < 2; c++) {
1562 for (j = 0; j < IWN_NSAMPLES; j++) {
1563 aprint_normal("chain %d, sample %d: temp=%d gain=%d "
1564 "power=%d pa_det=%d\n", c, j,
1565 chans[0].samples[c][j].temp,
1566 chans[0].samples[c][j].gain,
1567 chans[0].samples[c][j].power,
1568 chans[0].samples[c][j].pa_det);
1569 }
1570 }
1571 aprint_normal("chan2 num=%d\n", chans[1].num);
1572 for (c = 0; c < 2; c++) {
1573 for (j = 0; j < IWN_NSAMPLES; j++) {
1574 aprint_normal("chain %d, sample %d: temp=%d gain=%d "
1575 "power=%d pa_det=%d\n", c, j,
1576 chans[1].samples[c][j].temp,
1577 chans[1].samples[c][j].gain,
1578 chans[1].samples[c][j].power,
1579 chans[1].samples[c][j].pa_det);
1580 }
1581 }
1582 }
1583 #endif
1584
1585 static void
1586 iwn5000_read_eeprom(struct iwn_softc *sc)
1587 {
1588 struct iwn5000_eeprom_calib_hdr hdr;
1589 int32_t volt;
1590 uint32_t base, addr;
1591 uint16_t val;
1592 int i;
1593
1594 /* Read regulatory domain (4 ASCII characters). */
1595 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1596 base = le16toh(val);
1597 iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN,
1598 sc->eeprom_domain, 4);
1599
1600 /* Read the list of authorized channels (20MHz ones only). */
1601 for (i = 0; i < 5; i++) {
1602 addr = base + iwn5000_regulatory_bands[i];
1603 iwn_read_eeprom_channels(sc, i, addr);
1604 }
1605
1606 /* Read enhanced TX power information for 6000 Series. */
1607 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
1608 iwn_read_eeprom_enhinfo(sc);
1609
1610 iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2);
1611 base = le16toh(val);
1612 iwn_read_prom_data(sc, base, &hdr, sizeof hdr);
1613 DPRINTF(("calib version=%u pa type=%u voltage=%u\n",
1614 hdr.version, hdr.pa_type, le16toh(hdr.volt)));
1615 sc->calib_ver = hdr.version;
1616
1617 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
1618 /* Compute temperature offset. */
1619 iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2);
1620 sc->eeprom_temp = le16toh(val);
1621 iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2);
1622 volt = le16toh(val);
1623 sc->temp_off = sc->eeprom_temp - (volt / -5);
1624 DPRINTF(("temp=%d volt=%d offset=%dK\n",
1625 sc->eeprom_temp, volt, sc->temp_off));
1626 } else {
1627 /* Read crystal calibration. */
1628 iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL,
1629 &sc->eeprom_crystal, sizeof (uint32_t));
1630 DPRINTF(("crystal calibration 0x%08x\n",
1631 le32toh(sc->eeprom_crystal)));
1632 }
1633 }
1634
1635 static void
1636 iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr)
1637 {
1638 struct ieee80211com *ic = &sc->sc_ic;
1639 const struct iwn_chan_band *band = &iwn_bands[n];
1640 struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND];
1641 uint8_t chan;
1642 int i;
1643
1644 iwn_read_prom_data(sc, addr, channels,
1645 band->nchan * sizeof (struct iwn_eeprom_chan));
1646
1647 for (i = 0; i < band->nchan; i++) {
1648 if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID))
1649 continue;
1650
1651 chan = band->chan[i];
1652
1653 if (n == 0) { /* 2GHz band */
1654 ic->ic_channels[chan].ic_freq =
1655 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ);
1656 ic->ic_channels[chan].ic_flags =
1657 IEEE80211_CHAN_CCK | IEEE80211_CHAN_OFDM |
1658 IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
1659
1660 } else { /* 5GHz band */
1661 /*
1662 * Some adapters support channels 7, 8, 11 and 12
1663 * both in the 2GHz and 4.9GHz bands.
1664 * Because of limitations in our net80211 layer,
1665 * we don't support them in the 4.9GHz band.
1666 */
1667 if (chan <= 14)
1668 continue;
1669
1670 ic->ic_channels[chan].ic_freq =
1671 ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ);
1672 ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_A;
1673 /* We have at least one valid 5GHz channel. */
1674 sc->sc_flags |= IWN_FLAG_HAS_5GHZ;
1675 }
1676
1677 /* Is active scan allowed on this channel? */
1678 if (!(channels[i].flags & IWN_EEPROM_CHAN_ACTIVE)) {
1679 ic->ic_channels[chan].ic_flags |=
1680 IEEE80211_CHAN_PASSIVE;
1681 }
1682
1683 /* Save maximum allowed TX power for this channel. */
1684 sc->maxpwr[chan] = channels[i].maxpwr;
1685
1686 DPRINTF(("adding chan %d flags=0x%x maxpwr=%d\n",
1687 chan, channels[i].flags, sc->maxpwr[chan]));
1688 }
1689 }
1690
1691 static void
1692 iwn_read_eeprom_enhinfo(struct iwn_softc *sc)
1693 {
1694 struct iwn_eeprom_enhinfo enhinfo[35];
1695 uint16_t val, base;
1696 int8_t maxpwr;
1697 int i;
1698
1699 iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2);
1700 base = le16toh(val);
1701 iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO,
1702 enhinfo, sizeof enhinfo);
1703
1704 memset(sc->enh_maxpwr, 0, sizeof sc->enh_maxpwr);
1705 for (i = 0; i < __arraycount(enhinfo); i++) {
1706 if (enhinfo[i].chan == 0 || enhinfo[i].reserved != 0)
1707 continue; /* Skip invalid entries. */
1708
1709 maxpwr = 0;
1710 if (sc->txchainmask & IWN_ANT_A)
1711 maxpwr = MAX(maxpwr, enhinfo[i].chain[0]);
1712 if (sc->txchainmask & IWN_ANT_B)
1713 maxpwr = MAX(maxpwr, enhinfo[i].chain[1]);
1714 if (sc->txchainmask & IWN_ANT_C)
1715 maxpwr = MAX(maxpwr, enhinfo[i].chain[2]);
1716 if (sc->ntxchains == 2)
1717 maxpwr = MAX(maxpwr, enhinfo[i].mimo2);
1718 else if (sc->ntxchains == 3)
1719 maxpwr = MAX(maxpwr, enhinfo[i].mimo3);
1720 maxpwr /= 2; /* Convert half-dBm to dBm. */
1721
1722 DPRINTF(("enhinfo %d, maxpwr=%d\n", i, maxpwr));
1723 sc->enh_maxpwr[i] = maxpwr;
1724 }
1725 }
1726
1727 static struct ieee80211_node *
1728 iwn_node_alloc(struct ieee80211_node_table *ic __unused)
1729 {
1730 return malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO);
1731 }
1732
1733 static void
1734 iwn_newassoc(struct ieee80211_node *ni, int isnew)
1735 {
1736 struct iwn_softc *sc = ni->ni_ic->ic_ifp->if_softc;
1737 struct iwn_node *wn = (void *)ni;
1738 uint8_t rate;
1739 int ridx, i;
1740
1741 ieee80211_amrr_node_init(&sc->amrr, &wn->amn);
1742 /* Start at lowest available bit-rate, AMRR will raise. */
1743 ni->ni_txrate = 0;
1744
1745 for (i = 0; i < ni->ni_rates.rs_nrates; i++) {
1746 rate = ni->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL;
1747 /* Map 802.11 rate to HW rate index. */
1748 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
1749 if (iwn_rates[ridx].rate == rate)
1750 break;
1751 wn->ridx[i] = ridx;
1752 }
1753 }
1754
1755 static int
1756 iwn_media_change(struct ifnet *ifp)
1757 {
1758 struct iwn_softc *sc = ifp->if_softc;
1759 struct ieee80211com *ic = &sc->sc_ic;
1760 uint8_t rate, ridx;
1761 int error;
1762
1763 error = ieee80211_media_change(ifp);
1764 if (error != ENETRESET)
1765 return error;
1766
1767 if (ic->ic_fixed_rate != -1) {
1768 rate = ic->ic_sup_rates[ic->ic_curmode].
1769 rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL;
1770 /* Map 802.11 rate to HW rate index. */
1771 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++)
1772 if (iwn_rates[ridx].rate == rate)
1773 break;
1774 sc->fixed_ridx = ridx;
1775 }
1776
1777 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
1778 (IFF_UP | IFF_RUNNING)) {
1779 iwn_stop(ifp, 0);
1780 error = iwn_init(ifp);
1781 }
1782 return error;
1783 }
1784
1785 static int
1786 iwn_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg)
1787 {
1788 struct ifnet *ifp = ic->ic_ifp;
1789 struct iwn_softc *sc = ifp->if_softc;
1790 int error;
1791
1792 callout_stop(&sc->calib_to);
1793
1794 switch (nstate) {
1795 case IEEE80211_S_SCAN:
1796 /* XXX Do not abort a running scan. */
1797 if (sc->sc_flags & IWN_FLAG_SCANNING) {
1798 if (ic->ic_state != nstate)
1799 aprint_error_dev(sc->sc_dev, "scan request(%d) "
1800 "while scanning(%d) ignored\n", nstate,
1801 ic->ic_state);
1802 break;
1803 }
1804
1805 /* XXX Not sure if call and flags are needed. */
1806 ieee80211_node_table_reset(&ic->ic_scan);
1807 ic->ic_flags |= IEEE80211_F_SCAN | IEEE80211_F_ASCAN;
1808 sc->sc_flags |= IWN_FLAG_SCANNING;
1809
1810 /* Make the link LED blink while we're scanning. */
1811 iwn_set_led(sc, IWN_LED_LINK, 10, 10);
1812
1813 if ((error = iwn_scan(sc, IEEE80211_CHAN_2GHZ)) != 0) {
1814 aprint_error_dev(sc->sc_dev,
1815 "could not initiate scan\n");
1816 return error;
1817 }
1818 ic->ic_state = nstate;
1819 return 0;
1820
1821 case IEEE80211_S_ASSOC:
1822 if (ic->ic_state != IEEE80211_S_RUN)
1823 break;
1824 /* FALLTHROUGH */
1825 case IEEE80211_S_AUTH:
1826 /* Reset state to handle reassociations correctly. */
1827 sc->rxon.associd = 0;
1828 sc->rxon.filter &= ~htole32(IWN_FILTER_BSS);
1829 sc->calib.state = IWN_CALIB_STATE_INIT;
1830
1831 if ((error = iwn_auth(sc)) != 0) {
1832 aprint_error_dev(sc->sc_dev,
1833 "could not move to auth state\n");
1834 return error;
1835 }
1836 break;
1837
1838 case IEEE80211_S_RUN:
1839 if ((error = iwn_run(sc)) != 0) {
1840 aprint_error_dev(sc->sc_dev,
1841 "could not move to run state\n");
1842 return error;
1843 }
1844 break;
1845
1846 case IEEE80211_S_INIT:
1847 sc->sc_flags &= ~IWN_FLAG_SCANNING;
1848 sc->calib.state = IWN_CALIB_STATE_INIT;
1849 break;
1850 }
1851
1852 return sc->sc_newstate(ic, nstate, arg);
1853 }
1854
1855 static void
1856 iwn_iter_func(void *arg, struct ieee80211_node *ni)
1857 {
1858 struct iwn_softc *sc = arg;
1859 struct iwn_node *wn = (struct iwn_node *)ni;
1860
1861 ieee80211_amrr_choose(&sc->amrr, ni, &wn->amn);
1862 }
1863
1864 static void
1865 iwn_calib_timeout(void *arg)
1866 {
1867 struct iwn_softc *sc = arg;
1868 struct ieee80211com *ic = &sc->sc_ic;
1869 int s;
1870
1871 s = splnet();
1872 if (ic->ic_fixed_rate == -1) {
1873 if (ic->ic_opmode == IEEE80211_M_STA)
1874 iwn_iter_func(sc, ic->ic_bss);
1875 else
1876 ieee80211_iterate_nodes(&ic->ic_sta, iwn_iter_func, sc);
1877 }
1878 /* Force automatic TX power calibration every 60 secs. */
1879 if (++sc->calib_cnt >= 120) {
1880 uint32_t flags = 0;
1881
1882 DPRINTF(("sending request for statistics\n"));
1883 (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags,
1884 sizeof flags, 1);
1885 sc->calib_cnt = 0;
1886 }
1887 splx(s);
1888
1889 /* Automatic rate control triggered every 500ms. */
1890 callout_schedule(&sc->calib_to, hz/2);
1891 }
1892
1893 /*
1894 * Process an RX_PHY firmware notification. This is usually immediately
1895 * followed by an MPDU_RX_DONE notification.
1896 */
1897 static void
1898 iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1899 struct iwn_rx_data *data)
1900 {
1901 struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1);
1902
1903 DPRINTFN(2, ("received PHY stats\n"));
1904 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
1905 sizeof (*stat), BUS_DMASYNC_POSTREAD);
1906
1907 /* Save RX statistics, they will be used on MPDU_RX_DONE. */
1908 memcpy(&sc->last_rx_stat, stat, sizeof (*stat));
1909 sc->last_rx_valid = 1;
1910 }
1911
1912 /*
1913 * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification.
1914 * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one.
1915 */
1916 static void
1917 iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
1918 struct iwn_rx_data *data)
1919 {
1920 struct iwn_ops *ops = &sc->ops;
1921 struct ieee80211com *ic = &sc->sc_ic;
1922 struct ifnet *ifp = ic->ic_ifp;
1923 struct iwn_rx_ring *ring = &sc->rxq;
1924 struct ieee80211_frame *wh;
1925 struct ieee80211_node *ni;
1926 struct mbuf *m, *m1;
1927 struct iwn_rx_stat *stat;
1928 char *head;
1929 uint32_t flags;
1930 int error, len, rssi;
1931
1932 if (desc->type == IWN_MPDU_RX_DONE) {
1933 /* Check for prior RX_PHY notification. */
1934 if (!sc->last_rx_valid) {
1935 DPRINTF(("missing RX_PHY\n"));
1936 return;
1937 }
1938 sc->last_rx_valid = 0;
1939 stat = &sc->last_rx_stat;
1940 } else
1941 stat = (struct iwn_rx_stat *)(desc + 1);
1942
1943 bus_dmamap_sync(sc->sc_dmat, data->map, 0, IWN_RBUF_SIZE,
1944 BUS_DMASYNC_POSTREAD);
1945
1946 if (stat->cfg_phy_len > IWN_STAT_MAXLEN) {
1947 aprint_error_dev(sc->sc_dev,
1948 "invalid RX statistic header\n");
1949 return;
1950 }
1951 if (desc->type == IWN_MPDU_RX_DONE) {
1952 struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1);
1953 head = (char *)(mpdu + 1);
1954 len = le16toh(mpdu->len);
1955 } else {
1956 head = (char *)(stat + 1) + stat->cfg_phy_len;
1957 len = le16toh(stat->len);
1958 }
1959
1960 flags = le32toh(*(uint32_t *)(head + len));
1961
1962 /* Discard frames with a bad FCS early. */
1963 if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) {
1964 DPRINTFN(2, ("RX flags error %x\n", flags));
1965 ifp->if_ierrors++;
1966 return;
1967 }
1968 /* Discard frames that are too short. */
1969 if (len < sizeof (*wh)) {
1970 DPRINTF(("frame too short: %d\n", len));
1971 ic->ic_stats.is_rx_tooshort++;
1972 ifp->if_ierrors++;
1973 return;
1974 }
1975
1976 m1 = MCLGETIalt(sc, M_DONTWAIT, NULL, IWN_RBUF_SIZE);
1977 if (m1 == NULL) {
1978 ic->ic_stats.is_rx_nobuf++;
1979 ifp->if_ierrors++;
1980 return;
1981 }
1982 bus_dmamap_unload(sc->sc_dmat, data->map);
1983
1984 error = bus_dmamap_load(sc->sc_dmat, data->map, mtod(m1, void *),
1985 IWN_RBUF_SIZE, NULL, BUS_DMA_NOWAIT | BUS_DMA_READ);
1986 if (error != 0) {
1987 m_freem(m1);
1988
1989 /* Try to reload the old mbuf. */
1990 error = bus_dmamap_load(sc->sc_dmat, data->map,
1991 mtod(data->m, void *), IWN_RBUF_SIZE, NULL,
1992 BUS_DMA_NOWAIT | BUS_DMA_READ);
1993 if (error != 0) {
1994 panic("%s: could not load old RX mbuf",
1995 device_xname(sc->sc_dev));
1996 }
1997 /* Physical address may have changed. */
1998 ring->desc[ring->cur] =
1999 htole32(data->map->dm_segs[0].ds_addr >> 8);
2000 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2001 ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2002 BUS_DMASYNC_PREWRITE);
2003 ifp->if_ierrors++;
2004 return;
2005 }
2006
2007 m = data->m;
2008 data->m = m1;
2009 /* Update RX descriptor. */
2010 ring->desc[ring->cur] = htole32(data->map->dm_segs[0].ds_addr >> 8);
2011 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2012 ring->cur * sizeof (uint32_t), sizeof (uint32_t),
2013 BUS_DMASYNC_PREWRITE);
2014
2015 /* Finalize mbuf. */
2016 m->m_pkthdr.rcvif = ifp;
2017 m->m_data = head;
2018 m->m_pkthdr.len = m->m_len = len;
2019
2020 /* Grab a reference to the source node. */
2021 wh = mtod(m, struct ieee80211_frame *);
2022 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
2023
2024 /* XXX OpenBSD adds decryption here (see also comments in iwn_tx). */
2025 /* NetBSD does decryption in ieee80211_input. */
2026
2027 rssi = ops->get_rssi(stat);
2028
2029 /* XXX Added for NetBSD: scans never stop without it */
2030 if (ic->ic_state == IEEE80211_S_SCAN)
2031 iwn_fix_channel(ic, m);
2032
2033 if (sc->sc_drvbpf != NULL) {
2034 struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap;
2035
2036 tap->wr_flags = 0;
2037 if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE))
2038 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
2039 tap->wr_chan_freq =
2040 htole16(ic->ic_channels[stat->chan].ic_freq);
2041 tap->wr_chan_flags =
2042 htole16(ic->ic_channels[stat->chan].ic_flags);
2043 tap->wr_dbm_antsignal = (int8_t)rssi;
2044 tap->wr_dbm_antnoise = (int8_t)sc->noise;
2045 tap->wr_tsft = stat->tstamp;
2046 switch (stat->rate) {
2047 /* CCK rates. */
2048 case 10: tap->wr_rate = 2; break;
2049 case 20: tap->wr_rate = 4; break;
2050 case 55: tap->wr_rate = 11; break;
2051 case 110: tap->wr_rate = 22; break;
2052 /* OFDM rates. */
2053 case 0xd: tap->wr_rate = 12; break;
2054 case 0xf: tap->wr_rate = 18; break;
2055 case 0x5: tap->wr_rate = 24; break;
2056 case 0x7: tap->wr_rate = 36; break;
2057 case 0x9: tap->wr_rate = 48; break;
2058 case 0xb: tap->wr_rate = 72; break;
2059 case 0x1: tap->wr_rate = 96; break;
2060 case 0x3: tap->wr_rate = 108; break;
2061 /* Unknown rate: should not happen. */
2062 default: tap->wr_rate = 0;
2063 }
2064
2065 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_rxtap_len, m);
2066 }
2067
2068 /* Send the frame to the 802.11 layer. */
2069 ieee80211_input(ic, m, ni, rssi, 0);
2070
2071 /* Node is no longer needed. */
2072 ieee80211_free_node(ni);
2073 }
2074
2075 #ifndef IEEE80211_NO_HT
2076 /* Process an incoming Compressed BlockAck. */
2077 static void
2078 iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2079 struct iwn_rx_data *data)
2080 {
2081 struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1);
2082 struct iwn_tx_ring *txq;
2083
2084 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), sizeof (*ba),
2085 BUS_DMASYNC_POSTREAD);
2086
2087 txq = &sc->txq[le16toh(ba->qid)];
2088 /* XXX TBD */
2089 }
2090 #endif
2091
2092 /*
2093 * Process a CALIBRATION_RESULT notification sent by the initialization
2094 * firmware on response to a CMD_CALIB_CONFIG command (5000 only).
2095 */
2096 static void
2097 iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2098 struct iwn_rx_data *data)
2099 {
2100 struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1);
2101 int len, idx = -1;
2102
2103 /* Runtime firmware should not send such a notification. */
2104 if (sc->sc_flags & IWN_FLAG_CALIB_DONE)
2105 return;
2106
2107 len = (le32toh(desc->len) & 0x3fff) - 4;
2108 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc), len,
2109 BUS_DMASYNC_POSTREAD);
2110
2111 switch (calib->code) {
2112 case IWN5000_PHY_CALIB_DC:
2113 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
2114 idx = 0;
2115 break;
2116 case IWN5000_PHY_CALIB_LO:
2117 idx = 1;
2118 break;
2119 case IWN5000_PHY_CALIB_TX_IQ:
2120 idx = 2;
2121 break;
2122 case IWN5000_PHY_CALIB_TX_IQ_PERIODIC:
2123 if (sc->hw_type < IWN_HW_REV_TYPE_6000 &&
2124 sc->hw_type != IWN_HW_REV_TYPE_5150)
2125 idx = 3;
2126 break;
2127 case IWN5000_PHY_CALIB_BASE_BAND:
2128 idx = 4;
2129 break;
2130 }
2131 if (idx == -1) /* Ignore other results. */
2132 return;
2133
2134 /* Save calibration result. */
2135 if (sc->calibcmd[idx].buf != NULL)
2136 free(sc->calibcmd[idx].buf, M_DEVBUF);
2137 sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT);
2138 if (sc->calibcmd[idx].buf == NULL) {
2139 DPRINTF(("not enough memory for calibration result %d\n",
2140 calib->code));
2141 return;
2142 }
2143 DPRINTF(("saving calibration result code=%d len=%d\n",
2144 calib->code, len));
2145 sc->calibcmd[idx].len = len;
2146 memcpy(sc->calibcmd[idx].buf, calib, len);
2147 }
2148
2149 /*
2150 * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification.
2151 * The latter is sent by the firmware after each received beacon.
2152 */
2153 static void
2154 iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2155 struct iwn_rx_data *data)
2156 {
2157 struct iwn_ops *ops = &sc->ops;
2158 struct ieee80211com *ic = &sc->sc_ic;
2159 struct iwn_calib_state *calib = &sc->calib;
2160 struct iwn_stats *stats = (struct iwn_stats *)(desc + 1);
2161 int temp;
2162
2163 /* Ignore statistics received during a scan. */
2164 if (ic->ic_state != IEEE80211_S_RUN)
2165 return;
2166
2167 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2168 sizeof (*stats), BUS_DMASYNC_POSTREAD);
2169
2170 DPRINTFN(3, ("received statistics (cmd=%d)\n", desc->type));
2171 sc->calib_cnt = 0; /* Reset TX power calibration timeout. */
2172
2173 /* Test if temperature has changed. */
2174 if (stats->general.temp != sc->rawtemp) {
2175 /* Convert "raw" temperature to degC. */
2176 sc->rawtemp = stats->general.temp;
2177 temp = ops->get_temperature(sc);
2178 DPRINTFN(2, ("temperature=%dC\n", temp));
2179
2180 /* Update TX power if need be (4965AGN only). */
2181 if (sc->hw_type == IWN_HW_REV_TYPE_4965)
2182 iwn4965_power_calibration(sc, temp);
2183 }
2184
2185 if (desc->type != IWN_BEACON_STATISTICS)
2186 return; /* Reply to a statistics request. */
2187
2188 sc->noise = iwn_get_noise(&stats->rx.general);
2189
2190 /* Test that RSSI and noise are present in stats report. */
2191 if (le32toh(stats->rx.general.flags) != 1) {
2192 DPRINTF(("received statistics without RSSI\n"));
2193 return;
2194 }
2195
2196 /*
2197 * XXX Differential gain calibration makes the 6005 firmware
2198 * crap out, so skip it for now. This effectively disables
2199 * sensitivity tuning as well.
2200 */
2201 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
2202 return;
2203
2204 if (calib->state == IWN_CALIB_STATE_ASSOC)
2205 iwn_collect_noise(sc, &stats->rx.general);
2206 else if (calib->state == IWN_CALIB_STATE_RUN)
2207 iwn_tune_sensitivity(sc, &stats->rx);
2208 }
2209
2210 /*
2211 * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN
2212 * and 5000 adapters have different incompatible TX status formats.
2213 */
2214 static void
2215 iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2216 struct iwn_rx_data *data)
2217 {
2218 struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1);
2219
2220 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2221 sizeof (*stat), BUS_DMASYNC_POSTREAD);
2222 iwn_tx_done(sc, desc, stat->ackfailcnt, le32toh(stat->status) & 0xff);
2223 }
2224
2225 static void
2226 iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc,
2227 struct iwn_rx_data *data)
2228 {
2229 struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1);
2230
2231 #ifdef notyet
2232 /* Reset TX scheduler slot. */
2233 iwn5000_reset_sched(sc, desc->qid & 0xf, desc->idx);
2234 #endif
2235
2236 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2237 sizeof (*stat), BUS_DMASYNC_POSTREAD);
2238 iwn_tx_done(sc, desc, stat->ackfailcnt, le16toh(stat->status) & 0xff);
2239 }
2240
2241 /*
2242 * Adapter-independent backend for TX_DONE firmware notifications.
2243 */
2244 static void
2245 iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int ackfailcnt,
2246 uint8_t status)
2247 {
2248 struct ieee80211com *ic = &sc->sc_ic;
2249 struct ifnet *ifp = ic->ic_ifp;
2250 struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf];
2251 struct iwn_tx_data *data = &ring->data[desc->idx];
2252 struct iwn_node *wn = (struct iwn_node *)data->ni;
2253
2254 /* Update rate control statistics. */
2255 wn->amn.amn_txcnt++;
2256 if (ackfailcnt > 0)
2257 wn->amn.amn_retrycnt++;
2258
2259 if (status != 1 && status != 2)
2260 ifp->if_oerrors++;
2261 else
2262 ifp->if_opackets++;
2263
2264 /* Unmap and free mbuf. */
2265 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
2266 BUS_DMASYNC_POSTWRITE);
2267 bus_dmamap_unload(sc->sc_dmat, data->map);
2268 m_freem(data->m);
2269 data->m = NULL;
2270 ieee80211_free_node(data->ni);
2271 data->ni = NULL;
2272
2273 sc->sc_tx_timer = 0;
2274 if (--ring->queued < IWN_TX_RING_LOMARK) {
2275 sc->qfullmsk &= ~(1 << ring->qid);
2276 if (sc->qfullmsk == 0 && (ifp->if_flags & IFF_OACTIVE)) {
2277 ifp->if_flags &= ~IFF_OACTIVE;
2278 (*ifp->if_start)(ifp);
2279 }
2280 }
2281 }
2282
2283 /*
2284 * Process a "command done" firmware notification. This is where we wakeup
2285 * processes waiting for a synchronous command completion.
2286 */
2287 static void
2288 iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc)
2289 {
2290 struct iwn_tx_ring *ring = &sc->txq[4];
2291 struct iwn_tx_data *data;
2292
2293 if ((desc->qid & 0xf) != 4)
2294 return; /* Not a command ack. */
2295
2296 data = &ring->data[desc->idx];
2297
2298 /* If the command was mapped in an mbuf, free it. */
2299 if (data->m != NULL) {
2300 bus_dmamap_sync(sc->sc_dmat, data->map, 0,
2301 data->map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
2302 bus_dmamap_unload(sc->sc_dmat, data->map);
2303 m_freem(data->m);
2304 data->m = NULL;
2305 }
2306 wakeup(&ring->desc[desc->idx]);
2307 }
2308
2309 /*
2310 * Process an INT_FH_RX or INT_SW_RX interrupt.
2311 */
2312 static void
2313 iwn_notif_intr(struct iwn_softc *sc)
2314 {
2315 struct iwn_ops *ops = &sc->ops;
2316 struct ieee80211com *ic = &sc->sc_ic;
2317 struct ifnet *ifp = ic->ic_ifp;
2318 uint16_t hw;
2319
2320 bus_dmamap_sync(sc->sc_dmat, sc->rxq.stat_dma.map,
2321 0, sc->rxq.stat_dma.size, BUS_DMASYNC_POSTREAD);
2322
2323 hw = le16toh(sc->rxq.stat->closed_count) & 0xfff;
2324 while (sc->rxq.cur != hw) {
2325 struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur];
2326 struct iwn_rx_desc *desc;
2327
2328 bus_dmamap_sync(sc->sc_dmat, data->map, 0, sizeof (*desc),
2329 BUS_DMASYNC_POSTREAD);
2330 desc = mtod(data->m, struct iwn_rx_desc *);
2331
2332 DPRINTFN(4, ("notification qid=%d idx=%d flags=%x type=%d\n",
2333 desc->qid & 0xf, desc->idx, desc->flags, desc->type));
2334
2335 if (!(desc->qid & 0x80)) /* Reply to a command. */
2336 iwn_cmd_done(sc, desc);
2337
2338 switch (desc->type) {
2339 case IWN_RX_PHY:
2340 iwn_rx_phy(sc, desc, data);
2341 break;
2342
2343 case IWN_RX_DONE: /* 4965AGN only. */
2344 case IWN_MPDU_RX_DONE:
2345 /* An 802.11 frame has been received. */
2346 iwn_rx_done(sc, desc, data);
2347 break;
2348 #ifndef IEEE80211_NO_HT
2349 case IWN_RX_COMPRESSED_BA:
2350 /* A Compressed BlockAck has been received. */
2351 iwn_rx_compressed_ba(sc, desc, data);
2352 break;
2353 #endif
2354 case IWN_TX_DONE:
2355 /* An 802.11 frame has been transmitted. */
2356 ops->tx_done(sc, desc, data);
2357 break;
2358
2359 case IWN_RX_STATISTICS:
2360 case IWN_BEACON_STATISTICS:
2361 iwn_rx_statistics(sc, desc, data);
2362 break;
2363
2364 case IWN_BEACON_MISSED:
2365 {
2366 struct iwn_beacon_missed *miss =
2367 (struct iwn_beacon_missed *)(desc + 1);
2368
2369 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2370 sizeof (*miss), BUS_DMASYNC_POSTREAD);
2371 /*
2372 * If more than 5 consecutive beacons are missed,
2373 * reinitialize the sensitivity state machine.
2374 */
2375 DPRINTF(("beacons missed %d/%d\n",
2376 le32toh(miss->consecutive), le32toh(miss->total)));
2377 if (ic->ic_state == IEEE80211_S_RUN &&
2378 le32toh(miss->consecutive) > 5)
2379 (void)iwn_init_sensitivity(sc);
2380 break;
2381 }
2382 case IWN_UC_READY:
2383 {
2384 struct iwn_ucode_info *uc =
2385 (struct iwn_ucode_info *)(desc + 1);
2386
2387 /* The microcontroller is ready. */
2388 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2389 sizeof (*uc), BUS_DMASYNC_POSTREAD);
2390 DPRINTF(("microcode alive notification version=%d.%d "
2391 "subtype=%x alive=%x\n", uc->major, uc->minor,
2392 uc->subtype, le32toh(uc->valid)));
2393
2394 if (le32toh(uc->valid) != 1) {
2395 aprint_error_dev(sc->sc_dev,
2396 "microcontroller initialization "
2397 "failed\n");
2398 break;
2399 }
2400 if (uc->subtype == IWN_UCODE_INIT) {
2401 /* Save microcontroller report. */
2402 memcpy(&sc->ucode_info, uc, sizeof (*uc));
2403 }
2404 /* Save the address of the error log in SRAM. */
2405 sc->errptr = le32toh(uc->errptr);
2406 break;
2407 }
2408 case IWN_STATE_CHANGED:
2409 {
2410 uint32_t *status = (uint32_t *)(desc + 1);
2411
2412 /* Enabled/disabled notification. */
2413 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2414 sizeof (*status), BUS_DMASYNC_POSTREAD);
2415 DPRINTF(("state changed to %x\n", le32toh(*status)));
2416
2417 if (le32toh(*status) & 1) {
2418 /* The radio button has to be pushed. */
2419 aprint_error_dev(sc->sc_dev,
2420 "Radio transmitter is off\n");
2421 /* Turn the interface down. */
2422 ifp->if_flags &= ~IFF_UP;
2423 iwn_stop(ifp, 1);
2424 return; /* No further processing. */
2425 }
2426 break;
2427 }
2428 case IWN_START_SCAN:
2429 {
2430 struct iwn_start_scan *scan =
2431 (struct iwn_start_scan *)(desc + 1);
2432
2433 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2434 sizeof (*scan), BUS_DMASYNC_POSTREAD);
2435 DPRINTFN(2, ("scanning channel %d status %x\n",
2436 scan->chan, le32toh(scan->status)));
2437
2438 /* Fix current channel. */
2439 ic->ic_bss->ni_chan = &ic->ic_channels[scan->chan];
2440 break;
2441 }
2442 case IWN_STOP_SCAN:
2443 {
2444 struct iwn_stop_scan *scan =
2445 (struct iwn_stop_scan *)(desc + 1);
2446
2447 bus_dmamap_sync(sc->sc_dmat, data->map, sizeof (*desc),
2448 sizeof (*scan), BUS_DMASYNC_POSTREAD);
2449 DPRINTF(("scan finished nchan=%d status=%d chan=%d\n",
2450 scan->nchan, scan->status, scan->chan));
2451
2452 if (scan->status == 1 && scan->chan <= 14 &&
2453 (sc->sc_flags & IWN_FLAG_HAS_5GHZ)) {
2454 /*
2455 * We just finished scanning 2GHz channels,
2456 * start scanning 5GHz ones.
2457 */
2458 if (iwn_scan(sc, IEEE80211_CHAN_5GHZ) == 0)
2459 break;
2460 }
2461 sc->sc_flags &= ~IWN_FLAG_SCANNING;
2462 ieee80211_end_scan(ic);
2463 break;
2464 }
2465 case IWN5000_CALIBRATION_RESULT:
2466 iwn5000_rx_calib_results(sc, desc, data);
2467 break;
2468
2469 case IWN5000_CALIBRATION_DONE:
2470 sc->sc_flags |= IWN_FLAG_CALIB_DONE;
2471 wakeup(sc);
2472 break;
2473 }
2474
2475 sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT;
2476 }
2477
2478 /* Tell the firmware what we have processed. */
2479 hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1;
2480 IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7);
2481 }
2482
2483 /*
2484 * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up
2485 * from power-down sleep mode.
2486 */
2487 static void
2488 iwn_wakeup_intr(struct iwn_softc *sc)
2489 {
2490 int qid;
2491
2492 DPRINTF(("ucode wakeup from power-down sleep\n"));
2493
2494 /* Wakeup RX and TX rings. */
2495 IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7);
2496 for (qid = 0; qid < sc->ntxqs; qid++) {
2497 struct iwn_tx_ring *ring = &sc->txq[qid];
2498 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur);
2499 }
2500 }
2501
2502 /*
2503 * Dump the error log of the firmware when a firmware panic occurs. Although
2504 * we can't debug the firmware because it is neither open source nor free, it
2505 * can help us to identify certain classes of problems.
2506 */
2507 static void
2508 iwn_fatal_intr(struct iwn_softc *sc)
2509 {
2510 struct iwn_fw_dump dump;
2511 int i;
2512
2513 /* Force a complete recalibration on next init. */
2514 sc->sc_flags &= ~IWN_FLAG_CALIB_DONE;
2515
2516 /* Check that the error log address is valid. */
2517 if (sc->errptr < IWN_FW_DATA_BASE ||
2518 sc->errptr + sizeof (dump) >
2519 IWN_FW_DATA_BASE + sc->fw_data_maxsz) {
2520 aprint_error_dev(sc->sc_dev,
2521 "bad firmware error log address 0x%08x\n", sc->errptr);
2522 return;
2523 }
2524 if (iwn_nic_lock(sc) != 0) {
2525 aprint_error_dev(sc->sc_dev,
2526 "could not read firmware error log\n");
2527 return;
2528 }
2529 /* Read firmware error log from SRAM. */
2530 iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump,
2531 sizeof (dump) / sizeof (uint32_t));
2532 iwn_nic_unlock(sc);
2533
2534 if (dump.valid == 0) {
2535 aprint_error_dev(sc->sc_dev,
2536 "firmware error log is empty\n");
2537 return;
2538 }
2539 aprint_error("firmware error log:\n");
2540 aprint_error(" error type = \"%s\" (0x%08X)\n",
2541 (dump.id < __arraycount(iwn_fw_errmsg)) ?
2542 iwn_fw_errmsg[dump.id] : "UNKNOWN",
2543 dump.id);
2544 aprint_error(" program counter = 0x%08X\n", dump.pc);
2545 aprint_error(" source line = 0x%08X\n", dump.src_line);
2546 aprint_error(" error data = 0x%08X%08X\n",
2547 dump.error_data[0], dump.error_data[1]);
2548 aprint_error(" branch link = 0x%08X%08X\n",
2549 dump.branch_link[0], dump.branch_link[1]);
2550 aprint_error(" interrupt link = 0x%08X%08X\n",
2551 dump.interrupt_link[0], dump.interrupt_link[1]);
2552 aprint_error(" time = %u\n", dump.time[0]);
2553
2554 /* Dump driver status (TX and RX rings) while we're here. */
2555 aprint_error("driver status:\n");
2556 for (i = 0; i < sc->ntxqs; i++) {
2557 struct iwn_tx_ring *ring = &sc->txq[i];
2558 aprint_error(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n",
2559 i, ring->qid, ring->cur, ring->queued);
2560 }
2561 aprint_error(" rx ring: cur=%d\n", sc->rxq.cur);
2562 aprint_error(" 802.11 state %d\n", sc->sc_ic.ic_state);
2563 }
2564
2565 static int
2566 iwn_intr(void *arg)
2567 {
2568 struct iwn_softc *sc = arg;
2569 struct ifnet *ifp = sc->sc_ic.ic_ifp;
2570 uint32_t r1, r2, tmp;
2571
2572 /* Disable interrupts. */
2573 IWN_WRITE(sc, IWN_INT_MASK, 0);
2574
2575 /* Read interrupts from ICT (fast) or from registers (slow). */
2576 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2577 tmp = 0;
2578 while (sc->ict[sc->ict_cur] != 0) {
2579 tmp |= sc->ict[sc->ict_cur];
2580 sc->ict[sc->ict_cur] = 0; /* Acknowledge. */
2581 sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT;
2582 }
2583 tmp = le32toh(tmp);
2584 if (tmp == 0xffffffff) /* Shouldn't happen. */
2585 tmp = 0;
2586 else if (tmp & 0xc0000) /* Workaround a HW bug. */
2587 tmp |= 0x8000;
2588 r1 = (tmp & 0xff00) << 16 | (tmp & 0xff);
2589 r2 = 0; /* Unused. */
2590 } else {
2591 r1 = IWN_READ(sc, IWN_INT);
2592 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
2593 return 0; /* Hardware gone! */
2594 r2 = IWN_READ(sc, IWN_FH_INT);
2595 }
2596 if (r1 == 0 && r2 == 0) {
2597 if (ifp->if_flags & IFF_UP)
2598 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2599 return 0; /* Interrupt not for us. */
2600 }
2601
2602 /* Acknowledge interrupts. */
2603 IWN_WRITE(sc, IWN_INT, r1);
2604 if (!(sc->sc_flags & IWN_FLAG_USE_ICT))
2605 IWN_WRITE(sc, IWN_FH_INT, r2);
2606
2607 if (r1 & IWN_INT_RF_TOGGLED) {
2608 tmp = IWN_READ(sc, IWN_GP_CNTRL);
2609 aprint_error_dev(sc->sc_dev,
2610 "RF switch: radio %s\n",
2611 (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled");
2612 }
2613 if (r1 & IWN_INT_CT_REACHED) {
2614 aprint_error_dev(sc->sc_dev,
2615 "critical temperature reached!\n");
2616 }
2617 if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) {
2618 aprint_error_dev(sc->sc_dev,
2619 "fatal firmware error\n");
2620 /* Dump firmware error log and stop. */
2621 iwn_fatal_intr(sc);
2622 ifp->if_flags &= ~IFF_UP;
2623 iwn_stop(ifp, 1);
2624 return 1;
2625 }
2626 if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) ||
2627 (r2 & IWN_FH_INT_RX)) {
2628 if (sc->sc_flags & IWN_FLAG_USE_ICT) {
2629 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX))
2630 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX);
2631 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2632 IWN_INT_PERIODIC_DIS);
2633 iwn_notif_intr(sc);
2634 if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) {
2635 IWN_WRITE_1(sc, IWN_INT_PERIODIC,
2636 IWN_INT_PERIODIC_ENA);
2637 }
2638 } else
2639 iwn_notif_intr(sc);
2640 }
2641
2642 if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) {
2643 if (sc->sc_flags & IWN_FLAG_USE_ICT)
2644 IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX);
2645 wakeup(sc); /* FH DMA transfer completed. */
2646 }
2647
2648 if (r1 & IWN_INT_ALIVE)
2649 wakeup(sc); /* Firmware is alive. */
2650
2651 if (r1 & IWN_INT_WAKEUP)
2652 iwn_wakeup_intr(sc);
2653
2654 /* Re-enable interrupts. */
2655 if (ifp->if_flags & IFF_UP)
2656 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
2657
2658 return 1;
2659 }
2660
2661 /*
2662 * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and
2663 * 5000 adapters use a slightly different format).
2664 */
2665 static void
2666 iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2667 uint16_t len)
2668 {
2669 uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx];
2670
2671 *w = htole16(len + 8);
2672 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2673 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
2674 sizeof (uint16_t),
2675 BUS_DMASYNC_PREWRITE);
2676 if (idx < IWN_SCHED_WINSZ) {
2677 *(w + IWN_TX_RING_COUNT) = *w;
2678 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2679 (char *)(void *)(w + IWN_TX_RING_COUNT) -
2680 (char *)(void *)sc->sched_dma.vaddr,
2681 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2682 }
2683 }
2684
2685 static void
2686 iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id,
2687 uint16_t len)
2688 {
2689 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2690
2691 *w = htole16(id << 12 | (len + 8));
2692 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2693 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
2694 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2695 if (idx < IWN_SCHED_WINSZ) {
2696 *(w + IWN_TX_RING_COUNT) = *w;
2697 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2698 (char *)(void *)(w + IWN_TX_RING_COUNT) -
2699 (char *)(void *)sc->sched_dma.vaddr,
2700 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2701 }
2702 }
2703
2704 #ifdef notyet
2705 static void
2706 iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx)
2707 {
2708 uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx];
2709
2710 *w = (*w & htole16(0xf000)) | htole16(1);
2711 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2712 (char *)(void *)w - (char *)(void *)sc->sched_dma.vaddr,
2713 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2714 if (idx < IWN_SCHED_WINSZ) {
2715 *(w + IWN_TX_RING_COUNT) = *w;
2716 bus_dmamap_sync(sc->sc_dmat, sc->sched_dma.map,
2717 (char *)(void *)(w + IWN_TX_RING_COUNT) -
2718 (char *)(void *)sc->sched_dma.vaddr,
2719 sizeof (uint16_t), BUS_DMASYNC_PREWRITE);
2720 }
2721 }
2722 #endif
2723
2724 static int
2725 iwn_tx(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
2726 {
2727 struct ieee80211com *ic = &sc->sc_ic;
2728 struct iwn_node *wn = (void *)ni;
2729 struct iwn_tx_ring *ring;
2730 struct iwn_tx_desc *desc;
2731 struct iwn_tx_data *data;
2732 struct iwn_tx_cmd *cmd;
2733 struct iwn_cmd_data *tx;
2734 const struct iwn_rate *rinfo;
2735 struct ieee80211_frame *wh;
2736 struct ieee80211_key *k = NULL;
2737 struct mbuf *m1;
2738 uint32_t flags;
2739 u_int hdrlen;
2740 bus_dma_segment_t *seg;
2741 uint8_t tid, ridx, txant, type;
2742 int i, totlen, error, pad;
2743
2744 const struct chanAccParams *cap;
2745 int noack;
2746 int hdrlen2;
2747
2748 wh = mtod(m, struct ieee80211_frame *);
2749 hdrlen = ieee80211_anyhdrsize(wh);
2750 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
2751
2752 hdrlen2 = (ieee80211_has_qos(wh)) ?
2753 sizeof (struct ieee80211_qosframe) :
2754 sizeof (struct ieee80211_frame);
2755
2756 if (hdrlen != hdrlen2)
2757 aprint_error_dev(sc->sc_dev, "hdrlen error (%d != %d)\n",
2758 hdrlen, hdrlen2);
2759
2760 /* XXX OpenBSD sets a different tid when using QOS */
2761 tid = 0;
2762 if (ieee80211_has_qos(wh)) {
2763 cap = &ic->ic_wme.wme_chanParams;
2764 noack = cap->cap_wmeParams[ac].wmep_noackPolicy;
2765 }
2766 else
2767 noack = 0;
2768
2769 ring = &sc->txq[ac];
2770 desc = &ring->desc[ring->cur];
2771 data = &ring->data[ring->cur];
2772
2773 /* Choose a TX rate index. */
2774 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2775 type != IEEE80211_FC0_TYPE_DATA) {
2776 ridx = (ic->ic_curmode == IEEE80211_MODE_11A) ?
2777 IWN_RIDX_OFDM6 : IWN_RIDX_CCK1;
2778 } else if (ic->ic_fixed_rate != -1) {
2779 ridx = sc->fixed_ridx;
2780 } else
2781 ridx = wn->ridx[ni->ni_txrate];
2782 rinfo = &iwn_rates[ridx];
2783
2784 /* Encrypt the frame if need be. */
2785 /*
2786 * XXX For now, NetBSD swaps the encryption and bpf sections
2787 * in order to match old code and other drivers. Tests with
2788 * tcpdump indicates that the order is irrelevant, however,
2789 * as bpf produces unencrypted data for both ordering choices.
2790 */
2791 if (wh->i_fc[1] & IEEE80211_FC1_WEP) {
2792 k = ieee80211_crypto_encap(ic, ni, m);
2793 if (k == NULL) {
2794 m_freem(m);
2795 return ENOBUFS;
2796 }
2797 /* Packet header may have moved, reset our local pointer. */
2798 wh = mtod(m, struct ieee80211_frame *);
2799 }
2800 totlen = m->m_pkthdr.len;
2801
2802 if (sc->sc_drvbpf != NULL) {
2803 struct iwn_tx_radiotap_header *tap = &sc->sc_txtap;
2804
2805 tap->wt_flags = 0;
2806 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
2807 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
2808 tap->wt_rate = rinfo->rate;
2809 tap->wt_hwqueue = ac;
2810 if (wh->i_fc[1] & IEEE80211_FC1_WEP)
2811 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
2812
2813 bpf_mtap2(sc->sc_drvbpf, tap, sc->sc_txtap_len, m);
2814 }
2815
2816 /* Prepare TX firmware command. */
2817 cmd = &ring->cmd[ring->cur];
2818 cmd->code = IWN_CMD_TX_DATA;
2819 cmd->flags = 0;
2820 cmd->qid = ring->qid;
2821 cmd->idx = ring->cur;
2822
2823 tx = (struct iwn_cmd_data *)cmd->data;
2824 /* NB: No need to clear tx, all fields are reinitialized here. */
2825 tx->scratch = 0; /* clear "scratch" area */
2826
2827 flags = 0;
2828 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2829 /* Unicast frame, check if an ACK is expected. */
2830 if (!noack)
2831 flags |= IWN_TX_NEED_ACK;
2832 }
2833
2834 #ifdef notyet
2835 /* XXX NetBSD does not define IEEE80211_FC0_SUBTYPE_BAR */
2836 if ((wh->i_fc[0] &
2837 (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) ==
2838 (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR))
2839 flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */
2840 #endif
2841
2842 if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG)
2843 flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */
2844
2845 /* Check if frame must be protected using RTS/CTS or CTS-to-self. */
2846 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
2847 /* NB: Group frames are sent using CCK in 802.11b/g. */
2848 if (totlen + IEEE80211_CRC_LEN > ic->ic_rtsthreshold) {
2849 flags |= IWN_TX_NEED_RTS;
2850 } else if ((ic->ic_flags & IEEE80211_F_USEPROT) &&
2851 ridx >= IWN_RIDX_OFDM6) {
2852 if (ic->ic_protmode == IEEE80211_PROT_CTSONLY)
2853 flags |= IWN_TX_NEED_CTS;
2854 else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS)
2855 flags |= IWN_TX_NEED_RTS;
2856 }
2857 if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) {
2858 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
2859 /* 5000 autoselects RTS/CTS or CTS-to-self. */
2860 flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS);
2861 flags |= IWN_TX_NEED_PROTECTION;
2862 } else
2863 flags |= IWN_TX_FULL_TXOP;
2864 }
2865 }
2866
2867 if (IEEE80211_IS_MULTICAST(wh->i_addr1) ||
2868 type != IEEE80211_FC0_TYPE_DATA)
2869 tx->id = sc->broadcast_id;
2870 else
2871 tx->id = wn->id;
2872
2873 if (type == IEEE80211_FC0_TYPE_MGT) {
2874 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
2875
2876 #ifndef IEEE80211_STA_ONLY
2877 /* Tell HW to set timestamp in probe responses. */
2878 /* XXX NetBSD rev 1.11 added probe requests here but */
2879 /* probe requests do not take timestamps (from Bergamini). */
2880 if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
2881 flags |= IWN_TX_INSERT_TSTAMP;
2882 #endif
2883 /* XXX NetBSD rev 1.11 and 1.20 added AUTH/DAUTH and RTS/CTS */
2884 /* changes here. These are not needed (from Bergamini). */
2885 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
2886 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
2887 tx->timeout = htole16(3);
2888 else
2889 tx->timeout = htole16(2);
2890 } else
2891 tx->timeout = htole16(0);
2892
2893 if (hdrlen & 3) {
2894 /* First segment length must be a multiple of 4. */
2895 flags |= IWN_TX_NEED_PADDING;
2896 pad = 4 - (hdrlen & 3);
2897 } else
2898 pad = 0;
2899
2900 tx->len = htole16(totlen);
2901 tx->tid = tid;
2902 tx->rts_ntries = 60;
2903 tx->data_ntries = 15;
2904 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
2905 tx->plcp = rinfo->plcp;
2906 tx->rflags = rinfo->flags;
2907 if (tx->id == sc->broadcast_id) {
2908 /* Group or management frame. */
2909 tx->linkq = 0;
2910 /* XXX Alternate between antenna A and B? */
2911 txant = IWN_LSB(sc->txchainmask);
2912 tx->rflags |= IWN_RFLAG_ANT(txant);
2913 } else {
2914 tx->linkq = ni->ni_rates.rs_nrates - ni->ni_txrate - 1;
2915 flags |= IWN_TX_LINKQ; /* enable MRR */
2916 }
2917 /* Set physical address of "scratch area". */
2918 tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr));
2919 tx->hiaddr = IWN_HIADDR(data->scratch_paddr);
2920
2921 /* Copy 802.11 header in TX command. */
2922 /* XXX NetBSD changed this in rev 1.20 */
2923 memcpy(((uint8_t *)tx) + sizeof(*tx), wh, hdrlen);
2924
2925 /* Trim 802.11 header. */
2926 m_adj(m, hdrlen);
2927 tx->security = 0;
2928 tx->flags = htole32(flags);
2929
2930 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
2931 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
2932 if (error != 0) {
2933 if (error != EFBIG) {
2934 aprint_error_dev(sc->sc_dev,
2935 "can't map mbuf (error %d)\n", error);
2936 m_freem(m);
2937 return error;
2938 }
2939 /* Too many DMA segments, linearize mbuf. */
2940 MGETHDR(m1, M_DONTWAIT, MT_DATA);
2941 if (m1 == NULL) {
2942 m_freem(m);
2943 return ENOBUFS;
2944 }
2945 if (m->m_pkthdr.len > MHLEN) {
2946 MCLGET(m1, M_DONTWAIT);
2947 if (!(m1->m_flags & M_EXT)) {
2948 m_freem(m);
2949 m_freem(m1);
2950 return ENOBUFS;
2951 }
2952 }
2953 m_copydata(m, 0, m->m_pkthdr.len, mtod(m1, void *));
2954 m1->m_pkthdr.len = m1->m_len = m->m_pkthdr.len;
2955 m_freem(m);
2956 m = m1;
2957
2958 error = bus_dmamap_load_mbuf(sc->sc_dmat, data->map, m,
2959 BUS_DMA_NOWAIT | BUS_DMA_WRITE);
2960 if (error != 0) {
2961 aprint_error_dev(sc->sc_dev,
2962 "can't map mbuf (error %d)\n", error);
2963 m_freem(m);
2964 return error;
2965 }
2966 }
2967
2968 data->m = m;
2969 data->ni = ni;
2970
2971 DPRINTFN(4, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n",
2972 ring->qid, ring->cur, m->m_pkthdr.len, data->map->dm_nsegs));
2973
2974 /* Fill TX descriptor. */
2975 desc->nsegs = 1 + data->map->dm_nsegs;
2976 /* First DMA segment is used by the TX command. */
2977 desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr));
2978 desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) |
2979 (4 + sizeof (*tx) + hdrlen + pad) << 4);
2980 /* Other DMA segments are for data payload. */
2981 seg = data->map->dm_segs;
2982 for (i = 1; i <= data->map->dm_nsegs; i++) {
2983 desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr));
2984 desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) |
2985 seg->ds_len << 4);
2986 seg++;
2987 }
2988
2989 bus_dmamap_sync(sc->sc_dmat, data->map, 0, data->map->dm_mapsize,
2990 BUS_DMASYNC_PREWRITE);
2991 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
2992 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
2993 sizeof (*cmd), BUS_DMASYNC_PREWRITE);
2994 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
2995 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
2996 sizeof (*desc), BUS_DMASYNC_PREWRITE);
2997
2998 #ifdef notyet
2999 /* Update TX scheduler. */
3000 ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen);
3001 #endif
3002
3003 /* Kick TX ring. */
3004 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3005 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3006
3007 /* Mark TX ring as full if we reach a certain threshold. */
3008 if (++ring->queued > IWN_TX_RING_HIMARK)
3009 sc->qfullmsk |= 1 << ring->qid;
3010
3011 return 0;
3012 }
3013
3014 static void
3015 iwn_start(struct ifnet *ifp)
3016 {
3017 struct iwn_softc *sc = ifp->if_softc;
3018 struct ieee80211com *ic = &sc->sc_ic;
3019 struct ieee80211_node *ni;
3020 struct ether_header *eh;
3021 struct mbuf *m;
3022 int ac;
3023
3024 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING)
3025 return;
3026
3027 for (;;) {
3028 if (sc->qfullmsk != 0) {
3029 ifp->if_flags |= IFF_OACTIVE;
3030 break;
3031 }
3032 /* Send pending management frames first. */
3033 IF_DEQUEUE(&ic->ic_mgtq, m);
3034 if (m != NULL) {
3035 ni = (void *)m->m_pkthdr.rcvif;
3036 ac = 0;
3037 goto sendit;
3038 }
3039 if (ic->ic_state != IEEE80211_S_RUN)
3040 break;
3041
3042 /* Encapsulate and send data frames. */
3043 IFQ_DEQUEUE(&ifp->if_snd, m);
3044 if (m == NULL)
3045 break;
3046 if (m->m_len < sizeof (*eh) &&
3047 (m = m_pullup(m, sizeof (*eh))) == NULL) {
3048 ifp->if_oerrors++;
3049 continue;
3050 }
3051 eh = mtod(m, struct ether_header *);
3052 ni = ieee80211_find_txnode(ic, eh->ether_dhost);
3053 if (ni == NULL) {
3054 m_freem(m);
3055 ifp->if_oerrors++;
3056 continue;
3057 }
3058 /* classify mbuf so we can find which tx ring to use */
3059 if (ieee80211_classify(ic, m, ni) != 0) {
3060 m_freem(m);
3061 ieee80211_free_node(ni);
3062 ifp->if_oerrors++;
3063 continue;
3064 }
3065
3066 /* No QoS encapsulation for EAPOL frames. */
3067 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
3068 M_WME_GETAC(m) : WME_AC_BE;
3069
3070 bpf_mtap(ifp, m);
3071
3072 if ((m = ieee80211_encap(ic, m, ni)) == NULL) {
3073 ieee80211_free_node(ni);
3074 ifp->if_oerrors++;
3075 continue;
3076 }
3077 sendit:
3078 bpf_mtap3(ic->ic_rawbpf, m);
3079
3080 if (iwn_tx(sc, m, ni, ac) != 0) {
3081 ieee80211_free_node(ni);
3082 ifp->if_oerrors++;
3083 continue;
3084 }
3085
3086 sc->sc_tx_timer = 5;
3087 ifp->if_timer = 1;
3088 }
3089 }
3090
3091 static void
3092 iwn_watchdog(struct ifnet *ifp)
3093 {
3094 struct iwn_softc *sc = ifp->if_softc;
3095
3096 ifp->if_timer = 0;
3097
3098 if (sc->sc_tx_timer > 0) {
3099 if (--sc->sc_tx_timer == 0) {
3100 aprint_error_dev(sc->sc_dev,
3101 "device timeout\n");
3102 ifp->if_flags &= ~IFF_UP;
3103 iwn_stop(ifp, 1);
3104 ifp->if_oerrors++;
3105 return;
3106 }
3107 ifp->if_timer = 1;
3108 }
3109
3110 ieee80211_watchdog(&sc->sc_ic);
3111 }
3112
3113 static int
3114 iwn_ioctl(struct ifnet *ifp, u_long cmd, void *data)
3115 {
3116 struct iwn_softc *sc = ifp->if_softc;
3117 struct ieee80211com *ic = &sc->sc_ic;
3118 struct ifaddr *ifa;
3119 const struct sockaddr *sa;
3120 int s, error = 0;
3121
3122 s = splnet();
3123
3124 switch (cmd) {
3125 case SIOCSIFADDR:
3126 ifa = (struct ifaddr *)data;
3127 ifp->if_flags |= IFF_UP;
3128 #ifdef INET
3129 if (ifa->ifa_addr->sa_family == AF_INET)
3130 arp_ifinit(&ic->ic_ac, ifa);
3131 #endif
3132 /* FALLTHROUGH */
3133 case SIOCSIFFLAGS:
3134 /* XXX Added as it is in every NetBSD driver */
3135 if ((error = ifioctl_common(ifp, cmd, data)) != 0)
3136 break;
3137 if (ifp->if_flags & IFF_UP) {
3138 if (!(ifp->if_flags & IFF_RUNNING))
3139 error = iwn_init(ifp);
3140 } else {
3141 if (ifp->if_flags & IFF_RUNNING)
3142 iwn_stop(ifp, 1);
3143 }
3144 break;
3145
3146 case SIOCADDMULTI:
3147 case SIOCDELMULTI:
3148 sa = ifreq_getaddr(SIOCADDMULTI, (struct ifreq *)data);
3149 error = (cmd == SIOCADDMULTI) ?
3150 ether_addmulti(sa, &sc->sc_ec) :
3151 ether_delmulti(sa, &sc->sc_ec);
3152
3153 if (error == ENETRESET)
3154 error = 0;
3155 break;
3156
3157 default:
3158 error = ieee80211_ioctl(ic, cmd, data);
3159 }
3160
3161 if (error == ENETRESET) {
3162 error = 0;
3163 if ((ifp->if_flags & (IFF_UP | IFF_RUNNING)) ==
3164 (IFF_UP | IFF_RUNNING)) {
3165 iwn_stop(ifp, 0);
3166 error = iwn_init(ifp);
3167 }
3168 }
3169
3170 splx(s);
3171 return error;
3172 }
3173
3174 /*
3175 * Send a command to the firmware.
3176 */
3177 static int
3178 iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async)
3179 {
3180 struct iwn_tx_ring *ring = &sc->txq[4];
3181 struct iwn_tx_desc *desc;
3182 struct iwn_tx_data *data;
3183 struct iwn_tx_cmd *cmd;
3184 struct mbuf *m;
3185 bus_addr_t paddr;
3186 int totlen, error;
3187
3188 desc = &ring->desc[ring->cur];
3189 data = &ring->data[ring->cur];
3190 totlen = 4 + size;
3191
3192 if (size > sizeof cmd->data) {
3193 /* Command is too large to fit in a descriptor. */
3194 if (totlen > MCLBYTES)
3195 return EINVAL;
3196 MGETHDR(m, M_DONTWAIT, MT_DATA);
3197 if (m == NULL)
3198 return ENOMEM;
3199 if (totlen > MHLEN) {
3200 MCLGET(m, M_DONTWAIT);
3201 if (!(m->m_flags & M_EXT)) {
3202 m_freem(m);
3203 return ENOMEM;
3204 }
3205 }
3206 cmd = mtod(m, struct iwn_tx_cmd *);
3207 error = bus_dmamap_load(sc->sc_dmat, data->map, cmd, totlen,
3208 NULL, BUS_DMA_NOWAIT | BUS_DMA_WRITE);
3209 if (error != 0) {
3210 m_freem(m);
3211 return error;
3212 }
3213 data->m = m;
3214 paddr = data->map->dm_segs[0].ds_addr;
3215 } else {
3216 cmd = &ring->cmd[ring->cur];
3217 paddr = data->cmd_paddr;
3218 }
3219
3220 cmd->code = code;
3221 cmd->flags = 0;
3222 cmd->qid = ring->qid;
3223 cmd->idx = ring->cur;
3224 memcpy(cmd->data, buf, size);
3225
3226 desc->nsegs = 1;
3227 desc->segs[0].addr = htole32(IWN_LOADDR(paddr));
3228 desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4);
3229
3230 if (size > sizeof cmd->data) {
3231 bus_dmamap_sync(sc->sc_dmat, data->map, 0, totlen,
3232 BUS_DMASYNC_PREWRITE);
3233 } else {
3234 bus_dmamap_sync(sc->sc_dmat, ring->cmd_dma.map,
3235 (char *)(void *)cmd - (char *)(void *)ring->cmd_dma.vaddr,
3236 totlen, BUS_DMASYNC_PREWRITE);
3237 }
3238 bus_dmamap_sync(sc->sc_dmat, ring->desc_dma.map,
3239 (char *)(void *)desc - (char *)(void *)ring->desc_dma.vaddr,
3240 sizeof (*desc), BUS_DMASYNC_PREWRITE);
3241
3242 #ifdef notyet
3243 /* Update TX scheduler. */
3244 ops->update_sched(sc, ring->qid, ring->cur, 0, 0);
3245 #endif
3246 DPRINTFN(4, ("iwn_cmd %d size=%d %s\n", code, size, async ? " (async)" : ""));
3247
3248 /* Kick command ring. */
3249 ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT;
3250 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3251
3252 return async ? 0 : tsleep(desc, PCATCH, "iwncmd", hz);
3253 }
3254
3255 static int
3256 iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3257 {
3258 struct iwn4965_node_info hnode;
3259 char *src, *dst;
3260
3261 /*
3262 * We use the node structure for 5000 Series internally (it is
3263 * a superset of the one for 4965AGN). We thus copy the common
3264 * fields before sending the command.
3265 */
3266 src = (char *)node;
3267 dst = (char *)&hnode;
3268 memcpy(dst, src, 48);
3269 /* Skip TSC, RX MIC and TX MIC fields from ``src''. */
3270 memcpy(dst + 48, src + 72, 20);
3271 return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async);
3272 }
3273
3274 static int
3275 iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async)
3276 {
3277 /* Direct mapping. */
3278 return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async);
3279 }
3280
3281 static int
3282 iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni)
3283 {
3284 struct iwn_node *wn = (void *)ni;
3285 struct ieee80211_rateset *rs = &ni->ni_rates;
3286 struct iwn_cmd_link_quality linkq;
3287 const struct iwn_rate *rinfo;
3288 uint8_t txant;
3289 int i, txrate;
3290
3291 /* Use the first valid TX antenna. */
3292 txant = IWN_LSB(sc->txchainmask);
3293
3294 memset(&linkq, 0, sizeof linkq);
3295 linkq.id = wn->id;
3296 linkq.antmsk_1stream = txant;
3297 linkq.antmsk_2stream = IWN_ANT_AB;
3298 linkq.ampdu_max = 31;
3299 linkq.ampdu_threshold = 3;
3300 linkq.ampdu_limit = htole16(4000); /* 4ms */
3301
3302 /* Start at highest available bit-rate. */
3303 txrate = rs->rs_nrates - 1;
3304 for (i = 0; i < IWN_MAX_TX_RETRIES; i++) {
3305 rinfo = &iwn_rates[wn->ridx[txrate]];
3306 linkq.retry[i].plcp = rinfo->plcp;
3307 linkq.retry[i].rflags = rinfo->flags;
3308 linkq.retry[i].rflags |= IWN_RFLAG_ANT(txant);
3309 /* Next retry at immediate lower bit-rate. */
3310 if (txrate > 0)
3311 txrate--;
3312 }
3313 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1);
3314 }
3315
3316 /*
3317 * Broadcast node is used to send group-addressed and management frames.
3318 */
3319 static int
3320 iwn_add_broadcast_node(struct iwn_softc *sc, int async)
3321 {
3322 struct iwn_ops *ops = &sc->ops;
3323 struct iwn_node_info node;
3324 struct iwn_cmd_link_quality linkq;
3325 const struct iwn_rate *rinfo;
3326 uint8_t txant;
3327 int i, error;
3328
3329 memset(&node, 0, sizeof node);
3330 IEEE80211_ADDR_COPY(node.macaddr, etherbroadcastaddr);
3331 node.id = sc->broadcast_id;
3332 DPRINTF(("adding broadcast node\n"));
3333 if ((error = ops->add_node(sc, &node, async)) != 0)
3334 return error;
3335
3336 /* Use the first valid TX antenna. */
3337 txant = IWN_LSB(sc->txchainmask);
3338
3339 memset(&linkq, 0, sizeof linkq);
3340 linkq.id = sc->broadcast_id;
3341 linkq.antmsk_1stream = txant;
3342 linkq.antmsk_2stream = IWN_ANT_AB;
3343 linkq.ampdu_max = 64;
3344 linkq.ampdu_threshold = 3;
3345 linkq.ampdu_limit = htole16(4000); /* 4ms */
3346
3347 /* Use lowest mandatory bit-rate. */
3348 rinfo = (sc->sc_ic.ic_curmode != IEEE80211_MODE_11A) ?
3349 &iwn_rates[IWN_RIDX_CCK1] : &iwn_rates[IWN_RIDX_OFDM6];
3350 linkq.retry[0].plcp = rinfo->plcp;
3351 linkq.retry[0].rflags = rinfo->flags;
3352 linkq.retry[0].rflags |= IWN_RFLAG_ANT(txant);
3353 /* Use same bit-rate for all TX retries. */
3354 for (i = 1; i < IWN_MAX_TX_RETRIES; i++) {
3355 linkq.retry[i].plcp = linkq.retry[0].plcp;
3356 linkq.retry[i].rflags = linkq.retry[0].rflags;
3357 }
3358 return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async);
3359 }
3360
3361 static void
3362 iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on)
3363 {
3364 struct iwn_cmd_led led;
3365
3366 /* Clear microcode LED ownership. */
3367 IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL);
3368
3369 led.which = which;
3370 led.unit = htole32(10000); /* on/off in unit of 100ms */
3371 led.off = off;
3372 led.on = on;
3373 (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1);
3374 }
3375
3376 /*
3377 * Set the critical temperature at which the firmware will stop the radio
3378 * and notify us.
3379 */
3380 static int
3381 iwn_set_critical_temp(struct iwn_softc *sc)
3382 {
3383 struct iwn_critical_temp crit;
3384 int32_t temp;
3385
3386 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF);
3387
3388 if (sc->hw_type == IWN_HW_REV_TYPE_5150)
3389 temp = (IWN_CTOK(110) - sc->temp_off) * -5;
3390 else if (sc->hw_type == IWN_HW_REV_TYPE_4965)
3391 temp = IWN_CTOK(110);
3392 else
3393 temp = 110;
3394 memset(&crit, 0, sizeof crit);
3395 crit.tempR = htole32(temp);
3396 DPRINTF(("setting critical temperature to %d\n", temp));
3397 return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0);
3398 }
3399
3400 static int
3401 iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni)
3402 {
3403 struct iwn_cmd_timing cmd;
3404 uint64_t val, mod;
3405
3406 memset(&cmd, 0, sizeof cmd);
3407 memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t));
3408 cmd.bintval = htole16(ni->ni_intval);
3409 cmd.lintval = htole16(10);
3410
3411 /* Compute remaining time until next beacon. */
3412 val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */
3413 mod = le64toh(cmd.tstamp) % val;
3414 cmd.binitval = htole32((uint32_t)(val - mod));
3415
3416 DPRINTF(("timing bintval=%u, tstamp=%" PRIu64 ", init=%" PRIu32 "\n",
3417 ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)));
3418
3419 return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1);
3420 }
3421
3422 static void
3423 iwn4965_power_calibration(struct iwn_softc *sc, int temp)
3424 {
3425 /* Adjust TX power if need be (delta >= 3 degC). */
3426 DPRINTF(("temperature %d->%d\n", sc->temp, temp));
3427 if (abs(temp - sc->temp) >= 3) {
3428 /* Record temperature of last calibration. */
3429 sc->temp = temp;
3430 (void)iwn4965_set_txpower(sc, 1);
3431 }
3432 }
3433
3434 /*
3435 * Set TX power for current channel (each rate has its own power settings).
3436 * This function takes into account the regulatory information from EEPROM,
3437 * the current temperature and the current voltage.
3438 */
3439 static int
3440 iwn4965_set_txpower(struct iwn_softc *sc, int async)
3441 {
3442 /* Fixed-point arithmetic division using a n-bit fractional part. */
3443 #define fdivround(a, b, n) \
3444 ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n))
3445 /* Linear interpolation. */
3446 #define interpolate(x, x1, y1, x2, y2, n) \
3447 ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n))
3448
3449 static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 };
3450 struct ieee80211com *ic = &sc->sc_ic;
3451 struct iwn_ucode_info *uc = &sc->ucode_info;
3452 struct ieee80211_channel *ch;
3453 struct iwn4965_cmd_txpower cmd;
3454 struct iwn4965_eeprom_chan_samples *chans;
3455 const uint8_t *rf_gain, *dsp_gain;
3456 int32_t vdiff, tdiff;
3457 int i, c, grp, maxpwr;
3458 uint8_t chan;
3459
3460 /* Retrieve current channel from last RXON. */
3461 chan = sc->rxon.chan;
3462 DPRINTF(("setting TX power for channel %d\n", chan));
3463 ch = &ic->ic_channels[chan];
3464
3465 memset(&cmd, 0, sizeof cmd);
3466 cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1;
3467 cmd.chan = chan;
3468
3469 if (IEEE80211_IS_CHAN_5GHZ(ch)) {
3470 maxpwr = sc->maxpwr5GHz;
3471 rf_gain = iwn4965_rf_gain_5ghz;
3472 dsp_gain = iwn4965_dsp_gain_5ghz;
3473 } else {
3474 maxpwr = sc->maxpwr2GHz;
3475 rf_gain = iwn4965_rf_gain_2ghz;
3476 dsp_gain = iwn4965_dsp_gain_2ghz;
3477 }
3478
3479 /* Compute voltage compensation. */
3480 vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7;
3481 if (vdiff > 0)
3482 vdiff *= 2;
3483 if (abs(vdiff) > 2)
3484 vdiff = 0;
3485 DPRINTF(("voltage compensation=%d (UCODE=%d, EEPROM=%d)\n",
3486 vdiff, le32toh(uc->volt), sc->eeprom_voltage));
3487
3488 /* Get channel attenuation group. */
3489 if (chan <= 20) /* 1-20 */
3490 grp = 4;
3491 else if (chan <= 43) /* 34-43 */
3492 grp = 0;
3493 else if (chan <= 70) /* 44-70 */
3494 grp = 1;
3495 else if (chan <= 124) /* 71-124 */
3496 grp = 2;
3497 else /* 125-200 */
3498 grp = 3;
3499 DPRINTF(("chan %d, attenuation group=%d\n", chan, grp));
3500
3501 /* Get channel sub-band. */
3502 for (i = 0; i < IWN_NBANDS; i++)
3503 if (sc->bands[i].lo != 0 &&
3504 sc->bands[i].lo <= chan && chan <= sc->bands[i].hi)
3505 break;
3506 if (i == IWN_NBANDS) /* Can't happen in real-life. */
3507 return EINVAL;
3508 chans = sc->bands[i].chans;
3509 DPRINTF(("chan %d sub-band=%d\n", chan, i));
3510
3511 for (c = 0; c < 2; c++) {
3512 uint8_t power, gain, temp;
3513 int maxchpwr, pwr, ridx, idx;
3514
3515 power = interpolate(chan,
3516 chans[0].num, chans[0].samples[c][1].power,
3517 chans[1].num, chans[1].samples[c][1].power, 1);
3518 gain = interpolate(chan,
3519 chans[0].num, chans[0].samples[c][1].gain,
3520 chans[1].num, chans[1].samples[c][1].gain, 1);
3521 temp = interpolate(chan,
3522 chans[0].num, chans[0].samples[c][1].temp,
3523 chans[1].num, chans[1].samples[c][1].temp, 1);
3524 DPRINTF(("TX chain %d: power=%d gain=%d temp=%d\n",
3525 c, power, gain, temp));
3526
3527 /* Compute temperature compensation. */
3528 tdiff = ((sc->temp - temp) * 2) / tdiv[grp];
3529 DPRINTF(("temperature compensation=%d (current=%d, "
3530 "EEPROM=%d)\n", tdiff, sc->temp, temp));
3531
3532 for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) {
3533 /* Convert dBm to half-dBm. */
3534 maxchpwr = sc->maxpwr[chan] * 2;
3535 if ((ridx / 8) & 1)
3536 maxchpwr -= 6; /* MIMO 2T: -3dB */
3537
3538 pwr = maxpwr;
3539
3540 /* Adjust TX power based on rate. */
3541 if ((ridx % 8) == 5)
3542 pwr -= 15; /* OFDM48: -7.5dB */
3543 else if ((ridx % 8) == 6)
3544 pwr -= 17; /* OFDM54: -8.5dB */
3545 else if ((ridx % 8) == 7)
3546 pwr -= 20; /* OFDM60: -10dB */
3547 else
3548 pwr -= 10; /* Others: -5dB */
3549
3550 /* Do not exceed channel max TX power. */
3551 if (pwr > maxchpwr)
3552 pwr = maxchpwr;
3553
3554 idx = gain - (pwr - power) - tdiff - vdiff;
3555 if ((ridx / 8) & 1) /* MIMO */
3556 idx += (int32_t)le32toh(uc->atten[grp][c]);
3557
3558 if (cmd.band == 0)
3559 idx += 9; /* 5GHz */
3560 if (ridx == IWN_RIDX_MAX)
3561 idx += 5; /* CCK */
3562
3563 /* Make sure idx stays in a valid range. */
3564 if (idx < 0)
3565 idx = 0;
3566 else if (idx > IWN4965_MAX_PWR_INDEX)
3567 idx = IWN4965_MAX_PWR_INDEX;
3568
3569 DPRINTF(("TX chain %d, rate idx %d: power=%d\n",
3570 c, ridx, idx));
3571 cmd.power[ridx].rf_gain[c] = rf_gain[idx];
3572 cmd.power[ridx].dsp_gain[c] = dsp_gain[idx];
3573 }
3574 }
3575
3576 DPRINTF(("setting TX power for chan %d\n", chan));
3577 return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async);
3578
3579 #undef interpolate
3580 #undef fdivround
3581 }
3582
3583 static int
3584 iwn5000_set_txpower(struct iwn_softc *sc, int async)
3585 {
3586 struct iwn5000_cmd_txpower cmd;
3587
3588 /*
3589 * TX power calibration is handled automatically by the firmware
3590 * for 5000 Series.
3591 */
3592 memset(&cmd, 0, sizeof cmd);
3593 cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */
3594 cmd.flags = IWN5000_TXPOWER_NO_CLOSED;
3595 cmd.srv_limit = IWN5000_TXPOWER_AUTO;
3596 DPRINTF(("setting TX power\n"));
3597 return iwn_cmd(sc, IWN_CMD_TXPOWER_DBM, &cmd, sizeof cmd, async);
3598 }
3599
3600 /*
3601 * Retrieve the maximum RSSI (in dBm) among receivers.
3602 */
3603 static int
3604 iwn4965_get_rssi(const struct iwn_rx_stat *stat)
3605 {
3606 const struct iwn4965_rx_phystat *phy = (const void *)stat->phybuf;
3607 uint8_t mask, agc;
3608 int rssi;
3609
3610 mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC;
3611 agc = (le16toh(phy->agc) >> 7) & 0x7f;
3612
3613 rssi = 0;
3614 if (mask & IWN_ANT_A)
3615 rssi = MAX(rssi, phy->rssi[0]);
3616 if (mask & IWN_ANT_B)
3617 rssi = MAX(rssi, phy->rssi[2]);
3618 if (mask & IWN_ANT_C)
3619 rssi = MAX(rssi, phy->rssi[4]);
3620
3621 return rssi - agc - IWN_RSSI_TO_DBM;
3622 }
3623
3624 static int
3625 iwn5000_get_rssi(const struct iwn_rx_stat *stat)
3626 {
3627 const struct iwn5000_rx_phystat *phy = (const void *)stat->phybuf;
3628 uint8_t agc;
3629 int rssi;
3630
3631 agc = (le32toh(phy->agc) >> 9) & 0x7f;
3632
3633 rssi = MAX(le16toh(phy->rssi[0]) & 0xff,
3634 le16toh(phy->rssi[1]) & 0xff);
3635 rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi);
3636
3637 return rssi - agc - IWN_RSSI_TO_DBM;
3638 }
3639
3640 /*
3641 * Retrieve the average noise (in dBm) among receivers.
3642 */
3643 static int
3644 iwn_get_noise(const struct iwn_rx_general_stats *stats)
3645 {
3646 int i, total, nbant, noise;
3647
3648 total = nbant = 0;
3649 for (i = 0; i < 3; i++) {
3650 if ((noise = le32toh(stats->noise[i]) & 0xff) == 0)
3651 continue;
3652 total += noise;
3653 nbant++;
3654 }
3655 /* There should be at least one antenna but check anyway. */
3656 return (nbant == 0) ? -127 : (total / nbant) - 107;
3657 }
3658
3659 /*
3660 * Compute temperature (in degC) from last received statistics.
3661 */
3662 static int
3663 iwn4965_get_temperature(struct iwn_softc *sc)
3664 {
3665 struct iwn_ucode_info *uc = &sc->ucode_info;
3666 int32_t r1, r2, r3, r4, temp;
3667
3668 r1 = le32toh(uc->temp[0].chan20MHz);
3669 r2 = le32toh(uc->temp[1].chan20MHz);
3670 r3 = le32toh(uc->temp[2].chan20MHz);
3671 r4 = le32toh(sc->rawtemp);
3672
3673 if (r1 == r3) /* Prevents division by 0 (should not happen). */
3674 return 0;
3675
3676 /* Sign-extend 23-bit R4 value to 32-bit. */
3677 r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000;
3678 /* Compute temperature in Kelvin. */
3679 temp = (259 * (r4 - r2)) / (r3 - r1);
3680 temp = (temp * 97) / 100 + 8;
3681
3682 DPRINTF(("temperature %dK/%dC\n", temp, IWN_KTOC(temp)));
3683 return IWN_KTOC(temp);
3684 }
3685
3686 static int
3687 iwn5000_get_temperature(struct iwn_softc *sc)
3688 {
3689 int32_t temp;
3690
3691 /*
3692 * Temperature is not used by the driver for 5000 Series because
3693 * TX power calibration is handled by firmware. We export it to
3694 * users through the sensor framework though.
3695 */
3696 temp = le32toh(sc->rawtemp);
3697 if (sc->hw_type == IWN_HW_REV_TYPE_5150) {
3698 temp = (temp / -5) + sc->temp_off;
3699 temp = IWN_KTOC(temp);
3700 }
3701 return temp;
3702 }
3703
3704 /*
3705 * Initialize sensitivity calibration state machine.
3706 */
3707 static int
3708 iwn_init_sensitivity(struct iwn_softc *sc)
3709 {
3710 struct iwn_ops *ops = &sc->ops;
3711 struct iwn_calib_state *calib = &sc->calib;
3712 uint32_t flags;
3713 int error;
3714
3715 /* Reset calibration state machine. */
3716 memset(calib, 0, sizeof (*calib));
3717 calib->state = IWN_CALIB_STATE_INIT;
3718 calib->cck_state = IWN_CCK_STATE_HIFA;
3719 /* Set initial correlation values. */
3720 calib->ofdm_x1 = sc->limits->min_ofdm_x1;
3721 calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1;
3722 calib->ofdm_x4 = sc->limits->min_ofdm_x4;
3723 calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4;
3724 calib->cck_x4 = 125;
3725 calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4;
3726 calib->energy_cck = sc->limits->energy_cck;
3727
3728 /* Write initial sensitivity. */
3729 if ((error = iwn_send_sensitivity(sc)) != 0)
3730 return error;
3731
3732 /* Write initial gains. */
3733 if ((error = ops->init_gains(sc)) != 0)
3734 return error;
3735
3736 /* Request statistics at each beacon interval. */
3737 flags = 0;
3738 DPRINTF(("sending request for statistics\n"));
3739 return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1);
3740 }
3741
3742 /*
3743 * Collect noise and RSSI statistics for the first 20 beacons received
3744 * after association and use them to determine connected antennas and
3745 * to set differential gains.
3746 */
3747 static void
3748 iwn_collect_noise(struct iwn_softc *sc,
3749 const struct iwn_rx_general_stats *stats)
3750 {
3751 struct iwn_ops *ops = &sc->ops;
3752 struct iwn_calib_state *calib = &sc->calib;
3753 uint32_t val;
3754 int i;
3755
3756 /* Accumulate RSSI and noise for all 3 antennas. */
3757 for (i = 0; i < 3; i++) {
3758 calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff;
3759 calib->noise[i] += le32toh(stats->noise[i]) & 0xff;
3760 }
3761 /* NB: We update differential gains only once after 20 beacons. */
3762 if (++calib->nbeacons < 20)
3763 return;
3764
3765 /* Determine highest average RSSI. */
3766 val = MAX(calib->rssi[0], calib->rssi[1]);
3767 val = MAX(calib->rssi[2], val);
3768
3769 /* Determine which antennas are connected. */
3770 sc->chainmask = sc->rxchainmask;
3771 for (i = 0; i < 3; i++)
3772 if (val - calib->rssi[i] > 15 * 20)
3773 sc->chainmask &= ~(1 << i);
3774 DPRINTF(("RX chains mask: theoretical=0x%x, actual=0x%x\n",
3775 sc->rxchainmask, sc->chainmask));
3776
3777 /* If none of the TX antennas are connected, keep at least one. */
3778 if ((sc->chainmask & sc->txchainmask) == 0)
3779 sc->chainmask |= IWN_LSB(sc->txchainmask);
3780
3781 (void)ops->set_gains(sc);
3782 calib->state = IWN_CALIB_STATE_RUN;
3783
3784 #ifdef notyet
3785 /* XXX Disable RX chains with no antennas connected. */
3786 sc->rxon.rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask));
3787 (void)iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
3788 #endif
3789
3790 /* Enable power-saving mode if requested by user. */
3791 if (sc->sc_ic.ic_flags & IEEE80211_F_PMGTON)
3792 (void)iwn_set_pslevel(sc, 0, 3, 1);
3793 }
3794
3795 static int
3796 iwn4965_init_gains(struct iwn_softc *sc)
3797 {
3798 struct iwn_phy_calib_gain cmd;
3799
3800 memset(&cmd, 0, sizeof cmd);
3801 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
3802 /* Differential gains initially set to 0 for all 3 antennas. */
3803 DPRINTF(("setting initial differential gains\n"));
3804 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3805 }
3806
3807 static int
3808 iwn5000_init_gains(struct iwn_softc *sc)
3809 {
3810 struct iwn_phy_calib cmd;
3811
3812 memset(&cmd, 0, sizeof cmd);
3813 cmd.code = IWN5000_PHY_CALIB_RESET_NOISE_GAIN;
3814 cmd.ngroups = 1;
3815 cmd.isvalid = 1;
3816 DPRINTF(("setting initial differential gains\n"));
3817 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3818 }
3819
3820 static int
3821 iwn4965_set_gains(struct iwn_softc *sc)
3822 {
3823 struct iwn_calib_state *calib = &sc->calib;
3824 struct iwn_phy_calib_gain cmd;
3825 int i, delta, noise;
3826
3827 /* Get minimal noise among connected antennas. */
3828 noise = INT_MAX; /* NB: There's at least one antenna. */
3829 for (i = 0; i < 3; i++)
3830 if (sc->chainmask & (1 << i))
3831 noise = MIN(calib->noise[i], noise);
3832
3833 memset(&cmd, 0, sizeof cmd);
3834 cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN;
3835 /* Set differential gains for connected antennas. */
3836 for (i = 0; i < 3; i++) {
3837 if (sc->chainmask & (1 << i)) {
3838 /* Compute attenuation (in unit of 1.5dB). */
3839 delta = (noise - (int32_t)calib->noise[i]) / 30;
3840 /* NB: delta <= 0 */
3841 /* Limit to [-4.5dB,0]. */
3842 cmd.gain[i] = MIN(abs(delta), 3);
3843 if (delta < 0)
3844 cmd.gain[i] |= 1 << 2; /* sign bit */
3845 }
3846 }
3847 DPRINTF(("setting differential gains Ant A/B/C: %x/%x/%x (%x)\n",
3848 cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask));
3849 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3850 }
3851
3852 static int
3853 iwn5000_set_gains(struct iwn_softc *sc)
3854 {
3855 struct iwn_calib_state *calib = &sc->calib;
3856 struct iwn_phy_calib_gain cmd;
3857 int i, ant, div, delta;
3858
3859 /* We collected 20 beacons and !=6050 need a 1.5 factor. */
3860 div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30;
3861
3862 memset(&cmd, 0, sizeof cmd);
3863 cmd.code = IWN5000_PHY_CALIB_NOISE_GAIN;
3864 cmd.ngroups = 1;
3865 cmd.isvalid = 1;
3866 /* Get first available RX antenna as referential. */
3867 ant = IWN_LSB(sc->rxchainmask);
3868 /* Set differential gains for other antennas. */
3869 for (i = ant + 1; i < 3; i++) {
3870 if (sc->chainmask & (1 << i)) {
3871 /* The delta is relative to antenna "ant". */
3872 delta = ((int32_t)calib->noise[ant] -
3873 (int32_t)calib->noise[i]) / div;
3874 /* Limit to [-4.5dB,+4.5dB]. */
3875 cmd.gain[i - 1] = MIN(abs(delta), 3);
3876 if (delta < 0)
3877 cmd.gain[i - 1] |= 1 << 2; /* sign bit */
3878 }
3879 }
3880 DPRINTF(("setting differential gains: %x/%x (%x)\n",
3881 cmd.gain[0], cmd.gain[1], sc->chainmask));
3882 return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1);
3883 }
3884
3885 /*
3886 * Tune RF RX sensitivity based on the number of false alarms detected
3887 * during the last beacon period.
3888 */
3889 static void
3890 iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats)
3891 {
3892 #define inc(val, inc, max) \
3893 if ((val) < (max)) { \
3894 if ((val) < (max) - (inc)) \
3895 (val) += (inc); \
3896 else \
3897 (val) = (max); \
3898 needs_update = 1; \
3899 }
3900 #define dec(val, dec, min) \
3901 if ((val) > (min)) { \
3902 if ((val) > (min) + (dec)) \
3903 (val) -= (dec); \
3904 else \
3905 (val) = (min); \
3906 needs_update = 1; \
3907 }
3908
3909 const struct iwn_sensitivity_limits *limits = sc->limits;
3910 struct iwn_calib_state *calib = &sc->calib;
3911 uint32_t val, rxena, fa;
3912 uint32_t energy[3], energy_min;
3913 uint8_t noise[3], noise_ref;
3914 int i, needs_update = 0;
3915
3916 /* Check that we've been enabled long enough. */
3917 if ((rxena = le32toh(stats->general.load)) == 0)
3918 return;
3919
3920 /* Compute number of false alarms since last call for OFDM. */
3921 fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm;
3922 fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm;
3923 fa *= 200 * 1024; /* 200TU */
3924
3925 /* Save counters values for next call. */
3926 calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp);
3927 calib->fa_ofdm = le32toh(stats->ofdm.fa);
3928
3929 if (fa > 50 * rxena) {
3930 /* High false alarm count, decrease sensitivity. */
3931 DPRINTFN(2, ("OFDM high false alarm count: %u\n", fa));
3932 inc(calib->ofdm_x1, 1, limits->max_ofdm_x1);
3933 inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1);
3934 inc(calib->ofdm_x4, 1, limits->max_ofdm_x4);
3935 inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4);
3936
3937 } else if (fa < 5 * rxena) {
3938 /* Low false alarm count, increase sensitivity. */
3939 DPRINTFN(2, ("OFDM low false alarm count: %u\n", fa));
3940 dec(calib->ofdm_x1, 1, limits->min_ofdm_x1);
3941 dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1);
3942 dec(calib->ofdm_x4, 1, limits->min_ofdm_x4);
3943 dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4);
3944 }
3945
3946 /* Compute maximum noise among 3 receivers. */
3947 for (i = 0; i < 3; i++)
3948 noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff;
3949 val = MAX(noise[0], noise[1]);
3950 val = MAX(noise[2], val);
3951 /* Insert it into our samples table. */
3952 calib->noise_samples[calib->cur_noise_sample] = val;
3953 calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20;
3954
3955 /* Compute maximum noise among last 20 samples. */
3956 noise_ref = calib->noise_samples[0];
3957 for (i = 1; i < 20; i++)
3958 noise_ref = MAX(noise_ref, calib->noise_samples[i]);
3959
3960 /* Compute maximum energy among 3 receivers. */
3961 for (i = 0; i < 3; i++)
3962 energy[i] = le32toh(stats->general.energy[i]);
3963 val = MIN(energy[0], energy[1]);
3964 val = MIN(energy[2], val);
3965 /* Insert it into our samples table. */
3966 calib->energy_samples[calib->cur_energy_sample] = val;
3967 calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10;
3968
3969 /* Compute minimum energy among last 10 samples. */
3970 energy_min = calib->energy_samples[0];
3971 for (i = 1; i < 10; i++)
3972 energy_min = MAX(energy_min, calib->energy_samples[i]);
3973 energy_min += 6;
3974
3975 /* Compute number of false alarms since last call for CCK. */
3976 fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck;
3977 fa += le32toh(stats->cck.fa) - calib->fa_cck;
3978 fa *= 200 * 1024; /* 200TU */
3979
3980 /* Save counters values for next call. */
3981 calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp);
3982 calib->fa_cck = le32toh(stats->cck.fa);
3983
3984 if (fa > 50 * rxena) {
3985 /* High false alarm count, decrease sensitivity. */
3986 DPRINTFN(2, ("CCK high false alarm count: %u\n", fa));
3987 calib->cck_state = IWN_CCK_STATE_HIFA;
3988 calib->low_fa = 0;
3989
3990 if (calib->cck_x4 > 160) {
3991 calib->noise_ref = noise_ref;
3992 if (calib->energy_cck > 2)
3993 dec(calib->energy_cck, 2, energy_min);
3994 }
3995 if (calib->cck_x4 < 160) {
3996 calib->cck_x4 = 161;
3997 needs_update = 1;
3998 } else
3999 inc(calib->cck_x4, 3, limits->max_cck_x4);
4000
4001 inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4);
4002
4003 } else if (fa < 5 * rxena) {
4004 /* Low false alarm count, increase sensitivity. */
4005 DPRINTFN(2, ("CCK low false alarm count: %u\n", fa));
4006 calib->cck_state = IWN_CCK_STATE_LOFA;
4007 calib->low_fa++;
4008
4009 if (calib->cck_state != IWN_CCK_STATE_INIT &&
4010 (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 ||
4011 calib->low_fa > 100)) {
4012 inc(calib->energy_cck, 2, limits->min_energy_cck);
4013 dec(calib->cck_x4, 3, limits->min_cck_x4);
4014 dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4);
4015 }
4016 } else {
4017 /* Not worth to increase or decrease sensitivity. */
4018 DPRINTFN(2, ("CCK normal false alarm count: %u\n", fa));
4019 calib->low_fa = 0;
4020 calib->noise_ref = noise_ref;
4021
4022 if (calib->cck_state == IWN_CCK_STATE_HIFA) {
4023 /* Previous interval had many false alarms. */
4024 dec(calib->energy_cck, 8, energy_min);
4025 }
4026 calib->cck_state = IWN_CCK_STATE_INIT;
4027 }
4028
4029 if (needs_update)
4030 (void)iwn_send_sensitivity(sc);
4031 #undef dec
4032 #undef inc
4033 }
4034
4035 static int
4036 iwn_send_sensitivity(struct iwn_softc *sc)
4037 {
4038 struct iwn_calib_state *calib = &sc->calib;
4039 struct iwn_sensitivity_cmd cmd;
4040
4041 memset(&cmd, 0, sizeof cmd);
4042 cmd.which = IWN_SENSITIVITY_WORKTBL;
4043 /* OFDM modulation. */
4044 cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1);
4045 cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1);
4046 cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4);
4047 cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4);
4048 cmd.energy_ofdm = htole16(sc->limits->energy_ofdm);
4049 cmd.energy_ofdm_th = htole16(62);
4050 /* CCK modulation. */
4051 cmd.corr_cck_x4 = htole16(calib->cck_x4);
4052 cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4);
4053 cmd.energy_cck = htole16(calib->energy_cck);
4054 /* Barker modulation: use default values. */
4055 cmd.corr_barker = htole16(190);
4056 cmd.corr_barker_mrc = htole16(390);
4057
4058 DPRINTFN(2, ("setting sensitivity %d/%d/%d/%d/%d/%d/%d\n",
4059 calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4,
4060 calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4,
4061 calib->energy_cck));
4062 return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, sizeof cmd, 1);
4063 }
4064
4065 /*
4066 * Set STA mode power saving level (between 0 and 5).
4067 * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving.
4068 */
4069 static int
4070 iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async)
4071 {
4072 struct iwn_pmgt_cmd cmd;
4073 const struct iwn_pmgt *pmgt;
4074 uint32_t maxp, skip_dtim;
4075 pcireg_t reg;
4076 int i;
4077
4078 /* Select which PS parameters to use. */
4079 if (dtim <= 2)
4080 pmgt = &iwn_pmgt[0][level];
4081 else if (dtim <= 10)
4082 pmgt = &iwn_pmgt[1][level];
4083 else
4084 pmgt = &iwn_pmgt[2][level];
4085
4086 memset(&cmd, 0, sizeof cmd);
4087 if (level != 0) /* not CAM */
4088 cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP);
4089 if (level == 5)
4090 cmd.flags |= htole16(IWN_PS_FAST_PD);
4091 /* Retrieve PCIe Active State Power Management (ASPM). */
4092 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
4093 sc->sc_cap_off + PCIE_LCSR);
4094 if (!(reg & PCIE_LCSR_ASPM_L0S)) /* L0s Entry disabled. */
4095 cmd.flags |= htole16(IWN_PS_PCI_PMGT);
4096 cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024);
4097 cmd.txtimeout = htole32(pmgt->txtimeout * 1024);
4098
4099 if (dtim == 0) {
4100 dtim = 1;
4101 skip_dtim = 0;
4102 } else
4103 skip_dtim = pmgt->skip_dtim;
4104 if (skip_dtim != 0) {
4105 cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM);
4106 maxp = pmgt->intval[4];
4107 if (maxp == (uint32_t)-1)
4108 maxp = dtim * (skip_dtim + 1);
4109 else if (maxp > dtim)
4110 maxp = (maxp / dtim) * dtim;
4111 } else
4112 maxp = dtim;
4113 for (i = 0; i < 5; i++)
4114 cmd.intval[i] = htole32(MIN(maxp, pmgt->intval[i]));
4115
4116 DPRINTF(("setting power saving level to %d\n", level));
4117 return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async);
4118 }
4119
4120 int
4121 iwn5000_runtime_calib(struct iwn_softc *sc)
4122 {
4123 struct iwn5000_calib_config cmd;
4124
4125 memset(&cmd, 0, sizeof cmd);
4126 cmd.ucode.once.enable = 0xffffffff;
4127 cmd.ucode.once.start = IWN5000_CALIB_DC;
4128 DPRINTF(("configuring runtime calibration\n"));
4129 return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0);
4130 }
4131
4132 static int
4133 iwn_config_bt_coex_bluetooth(struct iwn_softc *sc)
4134 {
4135 struct iwn_bluetooth bluetooth;
4136
4137 memset(&bluetooth, 0, sizeof bluetooth);
4138 bluetooth.flags = IWN_BT_COEX_ENABLE;
4139 bluetooth.lead_time = IWN_BT_LEAD_TIME_DEF;
4140 bluetooth.max_kill = IWN_BT_MAX_KILL_DEF;
4141
4142 DPRINTF(("configuring bluetooth coexistence\n"));
4143 return iwn_cmd(sc, IWN_CMD_BT_COEX, &bluetooth, sizeof bluetooth, 0);
4144 }
4145
4146 static int
4147 iwn_config_bt_coex_prio_table(struct iwn_softc *sc)
4148 {
4149 uint8_t prio_table[16];
4150
4151 memset(&prio_table, 0, sizeof prio_table);
4152 prio_table[ 0] = 6; /* init calibration 1 */
4153 prio_table[ 1] = 7; /* init calibration 2 */
4154 prio_table[ 2] = 2; /* periodic calib low 1 */
4155 prio_table[ 3] = 3; /* periodic calib low 2 */
4156 prio_table[ 4] = 4; /* periodic calib high 1 */
4157 prio_table[ 5] = 5; /* periodic calib high 2 */
4158 prio_table[ 6] = 6; /* dtim */
4159 prio_table[ 7] = 8; /* scan52 */
4160 prio_table[ 8] = 10; /* scan24 */
4161
4162 DPRINTF(("sending priority lookup table\n"));
4163 return iwn_cmd(sc, IWN_CMD_BT_COEX_PRIO_TABLE,
4164 &prio_table, sizeof prio_table, 0);
4165 }
4166
4167 static int
4168 iwn_config_bt_coex_adv1(struct iwn_softc *sc)
4169 {
4170 int error;
4171 struct iwn_bt_adv1 d;
4172
4173 memset(&d, 0, sizeof d);
4174 d.basic.bt.flags = IWN_BT_COEX_ENABLE;
4175 d.basic.bt.lead_time = IWN_BT_LEAD_TIME_DEF;
4176 d.basic.bt.max_kill = IWN_BT_MAX_KILL_DEF;
4177 d.basic.bt.bt3_timer_t7_value = IWN_BT_BT3_T7_DEF;
4178 d.basic.bt.kill_ack_mask = IWN_BT_KILL_ACK_MASK_DEF;
4179 d.basic.bt.kill_cts_mask = IWN_BT_KILL_CTS_MASK_DEF;
4180 d.basic.bt3_prio_sample_time = IWN_BT_BT3_PRIO_SAMPLE_DEF;
4181 d.basic.bt3_timer_t2_value = IWN_BT_BT3_T2_DEF;
4182 d.basic.bt3_lookup_table[ 0] = htole32(0xaaaaaaaa); /* Normal */
4183 d.basic.bt3_lookup_table[ 1] = htole32(0xaaaaaaaa);
4184 d.basic.bt3_lookup_table[ 2] = htole32(0xaeaaaaaa);
4185 d.basic.bt3_lookup_table[ 3] = htole32(0xaaaaaaaa);
4186 d.basic.bt3_lookup_table[ 4] = htole32(0xcc00ff28);
4187 d.basic.bt3_lookup_table[ 5] = htole32(0x0000aaaa);
4188 d.basic.bt3_lookup_table[ 6] = htole32(0xcc00aaaa);
4189 d.basic.bt3_lookup_table[ 7] = htole32(0x0000aaaa);
4190 d.basic.bt3_lookup_table[ 8] = htole32(0xc0004000);
4191 d.basic.bt3_lookup_table[ 9] = htole32(0x00004000);
4192 d.basic.bt3_lookup_table[10] = htole32(0xf0005000);
4193 d.basic.bt3_lookup_table[11] = htole32(0xf0005000);
4194 d.basic.reduce_txpower = 0; /* as not implemented */
4195 d.basic.valid = IWN_BT_ALL_VALID_MASK;
4196 d.prio_boost = IWN_BT_PRIO_BOOST_DEF;
4197 d.tx_prio_boost = 0;
4198 d.rx_prio_boost = 0;
4199
4200 DPRINTF(("configuring advanced bluetooth coexistence v1\n"));
4201 error = iwn_cmd(sc, IWN_CMD_BT_COEX, &d, sizeof d, 0);
4202 if (error != 0) {
4203 aprint_error_dev(sc->sc_dev,
4204 "could not configure advanced bluetooth coexistence\n");
4205 return error;
4206 }
4207
4208 error = iwn_config_bt_coex_prio_table(sc);
4209 if (error != 0) {
4210 aprint_error_dev(sc->sc_dev,
4211 "could not configure send BT priority table\n");
4212 return error;
4213 }
4214
4215 return error;
4216 }
4217
4218 static int
4219 iwn_config(struct iwn_softc *sc)
4220 {
4221 struct iwn_ops *ops = &sc->ops;
4222 struct ieee80211com *ic = &sc->sc_ic;
4223 struct ifnet *ifp = ic->ic_ifp;
4224 uint32_t txmask;
4225 uint16_t rxchain;
4226 int error;
4227
4228 error = ops->config_bt_coex(sc);
4229 if (error != 0) {
4230 aprint_error_dev(sc->sc_dev,
4231 "could not configure bluetooth coexistence\n");
4232 return error;
4233 }
4234
4235 if (sc->hw_type == IWN_HW_REV_TYPE_6050 ||
4236 sc->hw_type == IWN_HW_REV_TYPE_6005) {
4237 /* Configure runtime DC calibration. */
4238 error = iwn5000_runtime_calib(sc);
4239 if (error != 0) {
4240 aprint_error_dev(sc->sc_dev,
4241 "could not configure runtime calibration\n");
4242 return error;
4243 }
4244 }
4245
4246 /* Configure valid TX chains for 5000 Series. */
4247 if (sc->hw_type != IWN_HW_REV_TYPE_4965) {
4248 txmask = htole32(sc->txchainmask);
4249 DPRINTF(("configuring valid TX chains 0x%x\n", txmask));
4250 error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask,
4251 sizeof txmask, 0);
4252 if (error != 0) {
4253 aprint_error_dev(sc->sc_dev,
4254 "could not configure valid TX chains\n");
4255 return error;
4256 }
4257 }
4258
4259 /* Set mode, channel, RX filter and enable RX. */
4260 memset(&sc->rxon, 0, sizeof (struct iwn_rxon));
4261 IEEE80211_ADDR_COPY(ic->ic_myaddr, CLLADDR(ifp->if_sadl));
4262 IEEE80211_ADDR_COPY(sc->rxon.myaddr, ic->ic_myaddr);
4263 IEEE80211_ADDR_COPY(sc->rxon.wlap, ic->ic_myaddr);
4264 sc->rxon.chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan);
4265 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4266 if (IEEE80211_IS_CHAN_2GHZ(ic->ic_ibss_chan))
4267 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4268 switch (ic->ic_opmode) {
4269 case IEEE80211_M_STA:
4270 sc->rxon.mode = IWN_MODE_STA;
4271 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST);
4272 break;
4273 case IEEE80211_M_MONITOR:
4274 sc->rxon.mode = IWN_MODE_MONITOR;
4275 sc->rxon.filter = htole32(IWN_FILTER_MULTICAST |
4276 IWN_FILTER_CTL | IWN_FILTER_PROMISC);
4277 break;
4278 default:
4279 /* Should not get there. */
4280 break;
4281 }
4282 sc->rxon.cck_mask = 0x0f; /* not yet negotiated */
4283 sc->rxon.ofdm_mask = 0xff; /* not yet negotiated */
4284 sc->rxon.ht_single_mask = 0xff;
4285 sc->rxon.ht_dual_mask = 0xff;
4286 sc->rxon.ht_triple_mask = 0xff;
4287 rxchain =
4288 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4289 IWN_RXCHAIN_MIMO_COUNT(2) |
4290 IWN_RXCHAIN_IDLE_COUNT(2);
4291 sc->rxon.rxchain = htole16(rxchain);
4292 DPRINTF(("setting configuration\n"));
4293 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 0);
4294 if (error != 0) {
4295 aprint_error_dev(sc->sc_dev,
4296 "RXON command failed\n");
4297 return error;
4298 }
4299
4300 if ((error = iwn_add_broadcast_node(sc, 0)) != 0) {
4301 aprint_error_dev(sc->sc_dev,
4302 "could not add broadcast node\n");
4303 return error;
4304 }
4305
4306 /* Configuration has changed, set TX power accordingly. */
4307 if ((error = ops->set_txpower(sc, 0)) != 0) {
4308 aprint_error_dev(sc->sc_dev,
4309 "could not set TX power\n");
4310 return error;
4311 }
4312
4313 if ((error = iwn_set_critical_temp(sc)) != 0) {
4314 aprint_error_dev(sc->sc_dev,
4315 "could not set critical temperature\n");
4316 return error;
4317 }
4318
4319 /* Set power saving level to CAM during initialization. */
4320 if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) {
4321 aprint_error_dev(sc->sc_dev,
4322 "could not set power saving level\n");
4323 return error;
4324 }
4325 return 0;
4326 }
4327
4328 static int
4329 iwn_scan(struct iwn_softc *sc, uint16_t flags)
4330 {
4331 struct ieee80211com *ic = &sc->sc_ic;
4332 struct iwn_scan_hdr *hdr;
4333 struct iwn_cmd_data *tx;
4334 struct iwn_scan_essid *essid;
4335 struct iwn_scan_chan *chan;
4336 struct ieee80211_frame *wh;
4337 struct ieee80211_rateset *rs;
4338 struct ieee80211_channel *c;
4339 uint8_t *buf, *frm;
4340 uint16_t rxchain;
4341 uint8_t txant;
4342 int buflen, error;
4343
4344 buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO);
4345 if (buf == NULL) {
4346 aprint_error_dev(sc->sc_dev,
4347 "could not allocate buffer for scan command\n");
4348 return ENOMEM;
4349 }
4350 hdr = (struct iwn_scan_hdr *)buf;
4351 /*
4352 * Move to the next channel if no frames are received within 10ms
4353 * after sending the probe request.
4354 */
4355 hdr->quiet_time = htole16(10); /* timeout in milliseconds */
4356 hdr->quiet_threshold = htole16(1); /* min # of packets */
4357
4358 /* Select antennas for scanning. */
4359 rxchain =
4360 IWN_RXCHAIN_VALID(sc->rxchainmask) |
4361 IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) |
4362 IWN_RXCHAIN_DRIVER_FORCE;
4363 if ((flags & IEEE80211_CHAN_5GHZ) &&
4364 sc->hw_type == IWN_HW_REV_TYPE_4965) {
4365 /* Ant A must be avoided in 5GHz because of an HW bug. */
4366 rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_BC);
4367 } else /* Use all available RX antennas. */
4368 rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask);
4369 hdr->rxchain = htole16(rxchain);
4370 hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON);
4371
4372 tx = (struct iwn_cmd_data *)(hdr + 1);
4373 tx->flags = htole32(IWN_TX_AUTO_SEQ);
4374 tx->id = sc->broadcast_id;
4375 tx->lifetime = htole32(IWN_LIFETIME_INFINITE);
4376
4377 if (flags & IEEE80211_CHAN_5GHZ) {
4378 hdr->crc_threshold = 0xffff;
4379 /* Send probe requests at 6Mbps. */
4380 tx->plcp = iwn_rates[IWN_RIDX_OFDM6].plcp;
4381 rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
4382 } else {
4383 hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO);
4384 /* Send probe requests at 1Mbps. */
4385 tx->plcp = iwn_rates[IWN_RIDX_CCK1].plcp;
4386 tx->rflags = IWN_RFLAG_CCK;
4387 rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
4388 }
4389 /* Use the first valid TX antenna. */
4390 txant = IWN_LSB(sc->txchainmask);
4391 tx->rflags |= IWN_RFLAG_ANT(txant);
4392
4393 essid = (struct iwn_scan_essid *)(tx + 1);
4394 if (ic->ic_des_esslen != 0) {
4395 essid[0].id = IEEE80211_ELEMID_SSID;
4396 essid[0].len = ic->ic_des_esslen;
4397 memcpy(essid[0].data, ic->ic_des_essid, ic->ic_des_esslen);
4398 }
4399 /*
4400 * Build a probe request frame. Most of the following code is a
4401 * copy & paste of what is done in net80211.
4402 */
4403 wh = (struct ieee80211_frame *)(essid + 20);
4404 wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
4405 IEEE80211_FC0_SUBTYPE_PROBE_REQ;
4406 wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
4407 IEEE80211_ADDR_COPY(wh->i_addr1, etherbroadcastaddr);
4408 IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr);
4409 IEEE80211_ADDR_COPY(wh->i_addr3, etherbroadcastaddr);
4410 *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
4411 *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
4412
4413 frm = (uint8_t *)(wh + 1);
4414 frm = ieee80211_add_ssid(frm, NULL, 0);
4415 frm = ieee80211_add_rates(frm, rs);
4416 #ifndef IEEE80211_NO_HT
4417 if (ic->ic_flags & IEEE80211_F_HTON)
4418 frm = ieee80211_add_htcaps(frm, ic);
4419 #endif
4420 if (rs->rs_nrates > IEEE80211_RATE_SIZE)
4421 frm = ieee80211_add_xrates(frm, rs);
4422
4423 /* Set length of probe request. */
4424 tx->len = htole16(frm - (uint8_t *)wh);
4425
4426 chan = (struct iwn_scan_chan *)frm;
4427 for (c = &ic->ic_channels[1];
4428 c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) {
4429 if ((c->ic_flags & flags) != flags)
4430 continue;
4431
4432 chan->chan = htole16(ieee80211_chan2ieee(ic, c));
4433 DPRINTFN(2, ("adding channel %d\n", chan->chan));
4434 chan->flags = 0;
4435 if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE))
4436 chan->flags |= htole32(IWN_CHAN_ACTIVE);
4437 if (ic->ic_des_esslen != 0)
4438 chan->flags |= htole32(IWN_CHAN_NPBREQS(1));
4439 chan->dsp_gain = 0x6e;
4440 if (IEEE80211_IS_CHAN_5GHZ(c)) {
4441 chan->rf_gain = 0x3b;
4442 chan->active = htole16(24);
4443 chan->passive = htole16(110);
4444 } else {
4445 chan->rf_gain = 0x28;
4446 chan->active = htole16(36);
4447 chan->passive = htole16(120);
4448 }
4449 hdr->nchan++;
4450 chan++;
4451 }
4452
4453 buflen = (uint8_t *)chan - buf;
4454 hdr->len = htole16(buflen);
4455
4456 DPRINTF(("sending scan command nchan=%d\n", hdr->nchan));
4457 error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1);
4458 free(buf, M_DEVBUF);
4459 return error;
4460 }
4461
4462 static int
4463 iwn_auth(struct iwn_softc *sc)
4464 {
4465 struct iwn_ops *ops = &sc->ops;
4466 struct ieee80211com *ic = &sc->sc_ic;
4467 struct ieee80211_node *ni = ic->ic_bss;
4468 int error;
4469
4470 /* Update adapter configuration. */
4471 IEEE80211_ADDR_COPY(sc->rxon.bssid, ni->ni_bssid);
4472 sc->rxon.chan = ieee80211_chan2ieee(ic, ni->ni_chan);
4473 sc->rxon.flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF);
4474 if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan))
4475 sc->rxon.flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ);
4476 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4477 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4478 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4479 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4480 switch (ic->ic_curmode) {
4481 case IEEE80211_MODE_11A:
4482 sc->rxon.cck_mask = 0;
4483 sc->rxon.ofdm_mask = 0x15;
4484 break;
4485 case IEEE80211_MODE_11B:
4486 sc->rxon.cck_mask = 0x03;
4487 sc->rxon.ofdm_mask = 0;
4488 break;
4489 default: /* Assume 802.11b/g. */
4490 sc->rxon.cck_mask = 0x0f;
4491 sc->rxon.ofdm_mask = 0x15;
4492 }
4493 DPRINTF(("rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon.chan,
4494 sc->rxon.flags, sc->rxon.cck_mask, sc->rxon.ofdm_mask));
4495 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4496 if (error != 0) {
4497 aprint_error_dev(sc->sc_dev,
4498 "RXON command failed\n");
4499 return error;
4500 }
4501
4502 /* Configuration has changed, set TX power accordingly. */
4503 if ((error = ops->set_txpower(sc, 1)) != 0) {
4504 aprint_error_dev(sc->sc_dev,
4505 "could not set TX power\n");
4506 return error;
4507 }
4508 /*
4509 * Reconfiguring RXON clears the firmware nodes table so we must
4510 * add the broadcast node again.
4511 */
4512 if ((error = iwn_add_broadcast_node(sc, 1)) != 0) {
4513 aprint_error_dev(sc->sc_dev,
4514 "could not add broadcast node\n");
4515 return error;
4516 }
4517 return 0;
4518 }
4519
4520 static int
4521 iwn_run(struct iwn_softc *sc)
4522 {
4523 struct iwn_ops *ops = &sc->ops;
4524 struct ieee80211com *ic = &sc->sc_ic;
4525 struct ieee80211_node *ni = ic->ic_bss;
4526 struct iwn_node_info node;
4527 int error;
4528
4529 if (ic->ic_opmode == IEEE80211_M_MONITOR) {
4530 /* Link LED blinks while monitoring. */
4531 iwn_set_led(sc, IWN_LED_LINK, 5, 5);
4532 return 0;
4533 }
4534 if ((error = iwn_set_timing(sc, ni)) != 0) {
4535 aprint_error_dev(sc->sc_dev,
4536 "could not set timing\n");
4537 return error;
4538 }
4539
4540 /* Update adapter configuration. */
4541 sc->rxon.associd = htole16(IEEE80211_AID(ni->ni_associd));
4542 /* Short preamble and slot time are negotiated when associating. */
4543 sc->rxon.flags &= ~htole32(IWN_RXON_SHPREAMBLE | IWN_RXON_SHSLOT);
4544 if (ic->ic_flags & IEEE80211_F_SHSLOT)
4545 sc->rxon.flags |= htole32(IWN_RXON_SHSLOT);
4546 if (ic->ic_flags & IEEE80211_F_SHPREAMBLE)
4547 sc->rxon.flags |= htole32(IWN_RXON_SHPREAMBLE);
4548 sc->rxon.filter |= htole32(IWN_FILTER_BSS);
4549 DPRINTF(("rxon chan %d flags %x\n", sc->rxon.chan, sc->rxon.flags));
4550 error = iwn_cmd(sc, IWN_CMD_RXON, &sc->rxon, sc->rxonsz, 1);
4551 if (error != 0) {
4552 aprint_error_dev(sc->sc_dev,
4553 "could not update configuration\n");
4554 return error;
4555 }
4556
4557 /* Configuration has changed, set TX power accordingly. */
4558 if ((error = ops->set_txpower(sc, 1)) != 0) {
4559 aprint_error_dev(sc->sc_dev,
4560 "could not set TX power\n");
4561 return error;
4562 }
4563
4564 /* Fake a join to initialize the TX rate. */
4565 ((struct iwn_node *)ni)->id = IWN_ID_BSS;
4566 iwn_newassoc(ni, 1);
4567
4568 /* Add BSS node. */
4569 memset(&node, 0, sizeof node);
4570 IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr);
4571 node.id = IWN_ID_BSS;
4572 #ifdef notyet
4573 node.htflags = htole32(IWN_AMDPU_SIZE_FACTOR(3) |
4574 IWN_AMDPU_DENSITY(5)); /* 2us */
4575 #endif
4576 DPRINTF(("adding BSS node\n"));
4577 error = ops->add_node(sc, &node, 1);
4578 if (error != 0) {
4579 aprint_error_dev(sc->sc_dev,
4580 "could not add BSS node\n");
4581 return error;
4582 }
4583 DPRINTF(("setting link quality for node %d\n", node.id));
4584 if ((error = iwn_set_link_quality(sc, ni)) != 0) {
4585 aprint_error_dev(sc->sc_dev,
4586 "could not setup link quality for node %d\n", node.id);
4587 return error;
4588 }
4589
4590 if ((error = iwn_init_sensitivity(sc)) != 0) {
4591 aprint_error_dev(sc->sc_dev,
4592 "could not set sensitivity\n");
4593 return error;
4594 }
4595 /* Start periodic calibration timer. */
4596 sc->calib.state = IWN_CALIB_STATE_ASSOC;
4597 sc->calib_cnt = 0;
4598 callout_schedule(&sc->calib_to, hz/2);
4599
4600 /* Link LED always on while associated. */
4601 iwn_set_led(sc, IWN_LED_LINK, 0, 1);
4602 return 0;
4603 }
4604
4605 #ifdef IWN_HWCRYPTO
4606 /*
4607 * We support CCMP hardware encryption/decryption of unicast frames only.
4608 * HW support for TKIP really sucks. We should let TKIP die anyway.
4609 */
4610 static int
4611 iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni,
4612 struct ieee80211_key *k)
4613 {
4614 struct iwn_softc *sc = ic->ic_softc;
4615 struct iwn_ops *ops = &sc->ops;
4616 struct iwn_node *wn = (void *)ni;
4617 struct iwn_node_info node;
4618 uint16_t kflags;
4619
4620 if ((k->k_flags & IEEE80211_KEY_GROUP) ||
4621 k->k_cipher != IEEE80211_CIPHER_CCMP)
4622 return ieee80211_set_key(ic, ni, k);
4623
4624 kflags = IWN_KFLAG_CCMP | IWN_KFLAG_MAP | IWN_KFLAG_KID(k->k_id);
4625 if (k->k_flags & IEEE80211_KEY_GROUP)
4626 kflags |= IWN_KFLAG_GROUP;
4627
4628 memset(&node, 0, sizeof node);
4629 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
4630 sc->broadcast_id : wn->id;
4631 node.control = IWN_NODE_UPDATE;
4632 node.flags = IWN_FLAG_SET_KEY;
4633 node.kflags = htole16(kflags);
4634 node.kid = k->k_id;
4635 memcpy(node.key, k->k_key, k->k_len);
4636 DPRINTF(("set key id=%d for node %d\n", k->k_id, node.id));
4637 return ops->add_node(sc, &node, 1);
4638 }
4639
4640 static void
4641 iwn_delete_key(struct ieee80211com *ic, struct ieee80211_node *ni,
4642 struct ieee80211_key *k)
4643 {
4644 struct iwn_softc *sc = ic->ic_softc;
4645 struct iwn_ops *ops = &sc->ops;
4646 struct iwn_node *wn = (void *)ni;
4647 struct iwn_node_info node;
4648
4649 if ((k->k_flags & IEEE80211_KEY_GROUP) ||
4650 k->k_cipher != IEEE80211_CIPHER_CCMP) {
4651 /* See comment about other ciphers above. */
4652 ieee80211_delete_key(ic, ni, k);
4653 return;
4654 }
4655 if (ic->ic_state != IEEE80211_S_RUN)
4656 return; /* Nothing to do. */
4657 memset(&node, 0, sizeof node);
4658 node.id = (k->k_flags & IEEE80211_KEY_GROUP) ?
4659 sc->broadcast_id : wn->id;
4660 node.control = IWN_NODE_UPDATE;
4661 node.flags = IWN_FLAG_SET_KEY;
4662 node.kflags = htole16(IWN_KFLAG_INVALID);
4663 node.kid = 0xff;
4664 DPRINTF(("delete keys for node %d\n", node.id));
4665 (void)ops->add_node(sc, &node, 1);
4666 }
4667 #endif
4668
4669 /* XXX Added for NetBSD (copied from rev 1.39). */
4670
4671 static int
4672 iwn_wme_update(struct ieee80211com *ic)
4673 {
4674 #define IWN_EXP2(v) htole16((1 << (v)) - 1)
4675 #define IWN_USEC(v) htole16(IEEE80211_TXOP_TO_US(v))
4676 struct iwn_softc *sc = ic->ic_ifp->if_softc;
4677 const struct wmeParams *wmep;
4678 struct iwn_edca_params cmd;
4679 int ac;
4680
4681 /* don't override default WME values if WME is not actually enabled */
4682 if (!(ic->ic_flags & IEEE80211_F_WME))
4683 return 0;
4684 cmd.flags = 0;
4685 for (ac = 0; ac < WME_NUM_AC; ac++) {
4686 wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac];
4687 cmd.ac[ac].aifsn = wmep->wmep_aifsn;
4688 cmd.ac[ac].cwmin = IWN_EXP2(wmep->wmep_logcwmin);
4689 cmd.ac[ac].cwmax = IWN_EXP2(wmep->wmep_logcwmax);
4690 cmd.ac[ac].txoplimit = IWN_USEC(wmep->wmep_txopLimit);
4691
4692 DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d "
4693 "txop=%d\n", ac, cmd.ac[ac].aifsn,
4694 cmd.ac[ac].cwmin,
4695 cmd.ac[ac].cwmax, cmd.ac[ac].txoplimit));
4696 }
4697 return iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1);
4698 #undef IWN_USEC
4699 #undef IWN_EXP2
4700 }
4701
4702 #ifndef IEEE80211_NO_HT
4703 /*
4704 * This function is called by upper layer when an ADDBA request is received
4705 * from another STA and before the ADDBA response is sent.
4706 */
4707 static int
4708 iwn_ampdu_rx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
4709 uint8_t tid)
4710 {
4711 struct ieee80211_rx_ba *ba = &ni->ni_rx_ba[tid];
4712 struct iwn_softc *sc = ic->ic_softc;
4713 struct iwn_ops *ops = &sc->ops;
4714 struct iwn_node *wn = (void *)ni;
4715 struct iwn_node_info node;
4716
4717 memset(&node, 0, sizeof node);
4718 node.id = wn->id;
4719 node.control = IWN_NODE_UPDATE;
4720 node.flags = IWN_FLAG_SET_ADDBA;
4721 node.addba_tid = tid;
4722 node.addba_ssn = htole16(ba->ba_winstart);
4723 DPRINTFN(2, ("ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid,
4724 ba->ba_winstart));
4725 return ops->add_node(sc, &node, 1);
4726 }
4727
4728 /*
4729 * This function is called by upper layer on teardown of an HT-immediate
4730 * Block Ack agreement (eg. uppon receipt of a DELBA frame).
4731 */
4732 static void
4733 iwn_ampdu_rx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
4734 uint8_t tid)
4735 {
4736 struct iwn_softc *sc = ic->ic_softc;
4737 struct iwn_ops *ops = &sc->ops;
4738 struct iwn_node *wn = (void *)ni;
4739 struct iwn_node_info node;
4740
4741 memset(&node, 0, sizeof node);
4742 node.id = wn->id;
4743 node.control = IWN_NODE_UPDATE;
4744 node.flags = IWN_FLAG_SET_DELBA;
4745 node.delba_tid = tid;
4746 DPRINTFN(2, ("DELBA RA=%d TID=%d\n", wn->id, tid));
4747 (void)ops->add_node(sc, &node, 1);
4748 }
4749
4750 /*
4751 * This function is called by upper layer when an ADDBA response is received
4752 * from another STA.
4753 */
4754 static int
4755 iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni,
4756 uint8_t tid)
4757 {
4758 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
4759 struct iwn_softc *sc = ic->ic_softc;
4760 struct iwn_ops *ops = &sc->ops;
4761 struct iwn_node *wn = (void *)ni;
4762 struct iwn_node_info node;
4763 int error;
4764
4765 /* Enable TX for the specified RA/TID. */
4766 wn->disable_tid &= ~(1 << tid);
4767 memset(&node, 0, sizeof node);
4768 node.id = wn->id;
4769 node.control = IWN_NODE_UPDATE;
4770 node.flags = IWN_FLAG_SET_DISABLE_TID;
4771 node.disable_tid = htole16(wn->disable_tid);
4772 error = ops->add_node(sc, &node, 1);
4773 if (error != 0)
4774 return error;
4775
4776 if ((error = iwn_nic_lock(sc)) != 0)
4777 return error;
4778 ops->ampdu_tx_start(sc, ni, tid, ba->ba_winstart);
4779 iwn_nic_unlock(sc);
4780 return 0;
4781 }
4782
4783 static void
4784 iwn_ampdu_tx_stop(struct ieee80211com *ic, struct ieee80211_node *ni,
4785 uint8_t tid)
4786 {
4787 struct ieee80211_tx_ba *ba = &ni->ni_tx_ba[tid];
4788 struct iwn_softc *sc = ic->ic_softc;
4789 struct iwn_ops *ops = &sc->ops;
4790
4791 if (iwn_nic_lock(sc) != 0)
4792 return;
4793 ops->ampdu_tx_stop(sc, tid, ba->ba_winstart);
4794 iwn_nic_unlock(sc);
4795 }
4796
4797 static void
4798 iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
4799 uint8_t tid, uint16_t ssn)
4800 {
4801 struct iwn_node *wn = (void *)ni;
4802 int qid = 7 + tid;
4803
4804 /* Stop TX scheduler while we're changing its configuration. */
4805 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4806 IWN4965_TXQ_STATUS_CHGACT);
4807
4808 /* Assign RA/TID translation to the queue. */
4809 iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid),
4810 wn->id << 4 | tid);
4811
4812 /* Enable chain-building mode for the queue. */
4813 iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid);
4814
4815 /* Set starting sequence number from the ADDBA request. */
4816 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4817 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
4818
4819 /* Set scheduler window size. */
4820 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid),
4821 IWN_SCHED_WINSZ);
4822 /* Set scheduler frame limit. */
4823 iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
4824 IWN_SCHED_LIMIT << 16);
4825
4826 /* Enable interrupts for the queue. */
4827 iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
4828
4829 /* Mark the queue as active. */
4830 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4831 IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA |
4832 iwn_tid2fifo[tid] << 1);
4833 }
4834
4835 static void
4836 iwn4965_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
4837 {
4838 int qid = 7 + tid;
4839
4840 /* Stop TX scheduler while we're changing its configuration. */
4841 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4842 IWN4965_TXQ_STATUS_CHGACT);
4843
4844 /* Set starting sequence number from the ADDBA request. */
4845 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4846 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn);
4847
4848 /* Disable interrupts for the queue. */
4849 iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid);
4850
4851 /* Mark the queue as inactive. */
4852 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
4853 IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1);
4854 }
4855
4856 static void
4857 iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni,
4858 uint8_t tid, uint16_t ssn)
4859 {
4860 struct iwn_node *wn = (void *)ni;
4861 int qid = 10 + tid;
4862
4863 /* Stop TX scheduler while we're changing its configuration. */
4864 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4865 IWN5000_TXQ_STATUS_CHGACT);
4866
4867 /* Assign RA/TID translation to the queue. */
4868 iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid),
4869 wn->id << 4 | tid);
4870
4871 /* Enable chain-building mode for the queue. */
4872 iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid);
4873
4874 /* Enable aggregation for the queue. */
4875 iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
4876
4877 /* Set starting sequence number from the ADDBA request. */
4878 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4879 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
4880
4881 /* Set scheduler window size and frame limit. */
4882 iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
4883 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
4884
4885 /* Enable interrupts for the queue. */
4886 iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
4887
4888 /* Mark the queue as active. */
4889 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4890 IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]);
4891 }
4892
4893 static void
4894 iwn5000_ampdu_tx_stop(struct iwn_softc *sc, uint8_t tid, uint16_t ssn)
4895 {
4896 int qid = 10 + tid;
4897
4898 /* Stop TX scheduler while we're changing its configuration. */
4899 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4900 IWN5000_TXQ_STATUS_CHGACT);
4901
4902 /* Disable aggregation for the queue. */
4903 iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid);
4904
4905 /* Set starting sequence number from the ADDBA request. */
4906 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff));
4907 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn);
4908
4909 /* Disable interrupts for the queue. */
4910 iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid);
4911
4912 /* Mark the queue as inactive. */
4913 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
4914 IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]);
4915 }
4916 #endif /* !IEEE80211_NO_HT */
4917
4918 /*
4919 * Query calibration tables from the initialization firmware. We do this
4920 * only once at first boot. Called from a process context.
4921 */
4922 static int
4923 iwn5000_query_calibration(struct iwn_softc *sc)
4924 {
4925 struct iwn5000_calib_config cmd;
4926 int error;
4927
4928 memset(&cmd, 0, sizeof cmd);
4929 cmd.ucode.once.enable = 0xffffffff;
4930 cmd.ucode.once.start = 0xffffffff;
4931 cmd.ucode.once.send = 0xffffffff;
4932 cmd.ucode.flags = 0xffffffff;
4933 DPRINTF(("sending calibration query\n"));
4934 error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0);
4935 if (error != 0)
4936 return error;
4937
4938 /* Wait at most two seconds for calibration to complete. */
4939 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE))
4940 error = tsleep(sc, PCATCH, "iwncal", 2 * hz);
4941 return error;
4942 }
4943
4944 /*
4945 * Send calibration results to the runtime firmware. These results were
4946 * obtained on first boot from the initialization firmware.
4947 */
4948 static int
4949 iwn5000_send_calibration(struct iwn_softc *sc)
4950 {
4951 int idx, error;
4952
4953 for (idx = 0; idx < 5; idx++) {
4954 if (sc->calibcmd[idx].buf == NULL)
4955 continue; /* No results available. */
4956 DPRINTF(("send calibration result idx=%d len=%d\n",
4957 idx, sc->calibcmd[idx].len));
4958 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf,
4959 sc->calibcmd[idx].len, 0);
4960 if (error != 0) {
4961 aprint_error_dev(sc->sc_dev,
4962 "could not send calibration result\n");
4963 return error;
4964 }
4965 }
4966 return 0;
4967 }
4968
4969 static int
4970 iwn5000_send_wimax_coex(struct iwn_softc *sc)
4971 {
4972 struct iwn5000_wimax_coex wimax;
4973
4974 #ifdef notyet
4975 if (sc->hw_type == IWN_HW_REV_TYPE_6050) {
4976 /* Enable WiMAX coexistence for combo adapters. */
4977 wimax.flags =
4978 IWN_WIMAX_COEX_ASSOC_WA_UNMASK |
4979 IWN_WIMAX_COEX_UNASSOC_WA_UNMASK |
4980 IWN_WIMAX_COEX_STA_TABLE_VALID |
4981 IWN_WIMAX_COEX_ENABLE;
4982 memcpy(wimax.events, iwn6050_wimax_events,
4983 sizeof iwn6050_wimax_events);
4984 } else
4985 #endif
4986 {
4987 /* Disable WiMAX coexistence. */
4988 wimax.flags = 0;
4989 memset(wimax.events, 0, sizeof wimax.events);
4990 }
4991 DPRINTF(("Configuring WiMAX coexistence\n"));
4992 return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0);
4993 }
4994
4995 /*
4996 * This function is called after the runtime firmware notifies us of its
4997 * readiness (called in a process context).
4998 */
4999 static int
5000 iwn4965_post_alive(struct iwn_softc *sc)
5001 {
5002 int error, qid;
5003
5004 if ((error = iwn_nic_lock(sc)) != 0)
5005 return error;
5006
5007 /* Clear TX scheduler state in SRAM. */
5008 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5009 iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0,
5010 IWN4965_SCHED_CTX_LEN / sizeof (uint32_t));
5011
5012 /* Set physical address of TX scheduler rings (1KB aligned). */
5013 iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5014
5015 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5016
5017 /* Disable chain mode for all our 16 queues. */
5018 iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0);
5019
5020 for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) {
5021 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0);
5022 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5023
5024 /* Set scheduler window size. */
5025 iwn_mem_write(sc, sc->sched_base +
5026 IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ);
5027 /* Set scheduler frame limit. */
5028 iwn_mem_write(sc, sc->sched_base +
5029 IWN4965_SCHED_QUEUE_OFFSET(qid) + 4,
5030 IWN_SCHED_LIMIT << 16);
5031 }
5032
5033 /* Enable interrupts for all our 16 queues. */
5034 iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff);
5035 /* Identify TX FIFO rings (0-7). */
5036 iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff);
5037
5038 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5039 for (qid = 0; qid < 7; qid++) {
5040 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 };
5041 iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid),
5042 IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1);
5043 }
5044 iwn_nic_unlock(sc);
5045 return 0;
5046 }
5047
5048 /*
5049 * This function is called after the initialization or runtime firmware
5050 * notifies us of its readiness (called in a process context).
5051 */
5052 static int
5053 iwn5000_post_alive(struct iwn_softc *sc)
5054 {
5055 int error, qid;
5056
5057 /* Switch to using ICT interrupt mode. */
5058 iwn5000_ict_reset(sc);
5059
5060 if ((error = iwn_nic_lock(sc)) != 0)
5061 return error;
5062
5063 /* Clear TX scheduler state in SRAM. */
5064 sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR);
5065 iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0,
5066 IWN5000_SCHED_CTX_LEN / sizeof (uint32_t));
5067
5068 /* Set physical address of TX scheduler rings (1KB aligned). */
5069 iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10);
5070
5071 IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY);
5072
5073 /* Enable chain mode for all queues, except command queue. */
5074 iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef);
5075 iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0);
5076
5077 for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) {
5078 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0);
5079 IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0);
5080
5081 iwn_mem_write(sc, sc->sched_base +
5082 IWN5000_SCHED_QUEUE_OFFSET(qid), 0);
5083 /* Set scheduler window size and frame limit. */
5084 iwn_mem_write(sc, sc->sched_base +
5085 IWN5000_SCHED_QUEUE_OFFSET(qid) + 4,
5086 IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ);
5087 }
5088
5089 /* Enable interrupts for all our 20 queues. */
5090 iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff);
5091 /* Identify TX FIFO rings (0-7). */
5092 iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff);
5093
5094 /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */
5095 for (qid = 0; qid < 7; qid++) {
5096 static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 };
5097 iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid),
5098 IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]);
5099 }
5100 iwn_nic_unlock(sc);
5101
5102 /* Configure WiMAX coexistence for combo adapters. */
5103 error = iwn5000_send_wimax_coex(sc);
5104 if (error != 0) {
5105 aprint_error_dev(sc->sc_dev,
5106 "could not configure WiMAX coexistence\n");
5107 return error;
5108 }
5109 if (sc->hw_type != IWN_HW_REV_TYPE_5150) {
5110 struct iwn5000_phy_calib_crystal cmd;
5111
5112 /* Perform crystal calibration. */
5113 memset(&cmd, 0, sizeof cmd);
5114 cmd.code = IWN5000_PHY_CALIB_CRYSTAL;
5115 cmd.ngroups = 1;
5116 cmd.isvalid = 1;
5117 cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff;
5118 cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff;
5119 DPRINTF(("sending crystal calibration %d, %d\n",
5120 cmd.cap_pin[0], cmd.cap_pin[1]));
5121 error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0);
5122 if (error != 0) {
5123 aprint_error_dev(sc->sc_dev,
5124 "crystal calibration failed\n");
5125 return error;
5126 }
5127 }
5128 if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) {
5129 /* Query calibration from the initialization firmware. */
5130 if ((error = iwn5000_query_calibration(sc)) != 0) {
5131 aprint_error_dev(sc->sc_dev,
5132 "could not query calibration\n");
5133 return error;
5134 }
5135 /*
5136 * We have the calibration results now, reboot with the
5137 * runtime firmware (call ourselves recursively!)
5138 */
5139 iwn_hw_stop(sc);
5140 error = iwn_hw_init(sc);
5141 } else {
5142 /* Send calibration results to runtime firmware. */
5143 error = iwn5000_send_calibration(sc);
5144 }
5145 return error;
5146 }
5147
5148 /*
5149 * The firmware boot code is small and is intended to be copied directly into
5150 * the NIC internal memory (no DMA transfer).
5151 */
5152 static int
5153 iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size)
5154 {
5155 int error, ntries;
5156
5157 size /= sizeof (uint32_t);
5158
5159 if ((error = iwn_nic_lock(sc)) != 0)
5160 return error;
5161
5162 /* Copy microcode image into NIC memory. */
5163 iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE,
5164 (const uint32_t *)ucode, size);
5165
5166 iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0);
5167 iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE);
5168 iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size);
5169
5170 /* Start boot load now. */
5171 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START);
5172
5173 /* Wait for transfer to complete. */
5174 for (ntries = 0; ntries < 1000; ntries++) {
5175 if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) &
5176 IWN_BSM_WR_CTRL_START))
5177 break;
5178 DELAY(10);
5179 }
5180 if (ntries == 1000) {
5181 aprint_error_dev(sc->sc_dev,
5182 "could not load boot firmware\n");
5183 iwn_nic_unlock(sc);
5184 return ETIMEDOUT;
5185 }
5186
5187 /* Enable boot after power up. */
5188 iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN);
5189
5190 iwn_nic_unlock(sc);
5191 return 0;
5192 }
5193
5194 static int
5195 iwn4965_load_firmware(struct iwn_softc *sc)
5196 {
5197 struct iwn_fw_info *fw = &sc->fw;
5198 struct iwn_dma_info *dma = &sc->fw_dma;
5199 int error;
5200
5201 /* Copy initialization sections into pre-allocated DMA-safe memory. */
5202 memcpy(dma->vaddr, fw->init.data, fw->init.datasz);
5203 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->init.datasz,
5204 BUS_DMASYNC_PREWRITE);
5205 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5206 fw->init.text, fw->init.textsz);
5207 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
5208 fw->init.textsz, BUS_DMASYNC_PREWRITE);
5209
5210 /* Tell adapter where to find initialization sections. */
5211 if ((error = iwn_nic_lock(sc)) != 0)
5212 return error;
5213 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5214 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz);
5215 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5216 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5217 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz);
5218 iwn_nic_unlock(sc);
5219
5220 /* Load firmware boot code. */
5221 error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz);
5222 if (error != 0) {
5223 aprint_error_dev(sc->sc_dev,
5224 "could not load boot firmware\n");
5225 return error;
5226 }
5227 /* Now press "execute". */
5228 IWN_WRITE(sc, IWN_RESET, 0);
5229
5230 /* Wait at most one second for first alive notification. */
5231 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) {
5232 aprint_error_dev(sc->sc_dev,
5233 "timeout waiting for adapter to initialize\n");
5234 return error;
5235 }
5236
5237 /* Retrieve current temperature for initial TX power calibration. */
5238 sc->rawtemp = sc->ucode_info.temp[3].chan20MHz;
5239 sc->temp = iwn4965_get_temperature(sc);
5240
5241 /* Copy runtime sections into pre-allocated DMA-safe memory. */
5242 memcpy(dma->vaddr, fw->main.data, fw->main.datasz);
5243 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, fw->main.datasz,
5244 BUS_DMASYNC_PREWRITE);
5245 memcpy((char *)dma->vaddr + IWN4965_FW_DATA_MAXSZ,
5246 fw->main.text, fw->main.textsz);
5247 bus_dmamap_sync(sc->sc_dmat, dma->map, IWN4965_FW_DATA_MAXSZ,
5248 fw->main.textsz, BUS_DMASYNC_PREWRITE);
5249
5250 /* Tell adapter where to find runtime sections. */
5251 if ((error = iwn_nic_lock(sc)) != 0)
5252 return error;
5253 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4);
5254 iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz);
5255 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR,
5256 (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4);
5257 iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE,
5258 IWN_FW_UPDATED | fw->main.textsz);
5259 iwn_nic_unlock(sc);
5260
5261 return 0;
5262 }
5263
5264 static int
5265 iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst,
5266 const uint8_t *section, int size)
5267 {
5268 struct iwn_dma_info *dma = &sc->fw_dma;
5269 int error;
5270
5271 /* Copy firmware section into pre-allocated DMA-safe memory. */
5272 memcpy(dma->vaddr, section, size);
5273 bus_dmamap_sync(sc->sc_dmat, dma->map, 0, size, BUS_DMASYNC_PREWRITE);
5274
5275 if ((error = iwn_nic_lock(sc)) != 0)
5276 return error;
5277
5278 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5279 IWN_FH_TX_CONFIG_DMA_PAUSE);
5280
5281 IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst);
5282 IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL),
5283 IWN_LOADDR(dma->paddr));
5284 IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL),
5285 IWN_HIADDR(dma->paddr) << 28 | size);
5286 IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL),
5287 IWN_FH_TXBUF_STATUS_TBNUM(1) |
5288 IWN_FH_TXBUF_STATUS_TBIDX(1) |
5289 IWN_FH_TXBUF_STATUS_TFBD_VALID);
5290
5291 /* Kick Flow Handler to start DMA transfer. */
5292 IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL),
5293 IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD);
5294
5295 iwn_nic_unlock(sc);
5296
5297 /* Wait at most five seconds for FH DMA transfer to complete. */
5298 return tsleep(sc, PCATCH, "iwninit", 5 * hz);
5299 }
5300
5301 static int
5302 iwn5000_load_firmware(struct iwn_softc *sc)
5303 {
5304 struct iwn_fw_part *fw;
5305 int error;
5306
5307 /* Load the initialization firmware on first boot only. */
5308 fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ?
5309 &sc->fw.main : &sc->fw.init;
5310
5311 error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE,
5312 fw->text, fw->textsz);
5313 if (error != 0) {
5314 aprint_error_dev(sc->sc_dev,
5315 "could not load firmware %s section\n", ".text");
5316 return error;
5317 }
5318 error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE,
5319 fw->data, fw->datasz);
5320 if (error != 0) {
5321 aprint_error_dev(sc->sc_dev,
5322 "could not load firmware %s section\n", ".data");
5323 return error;
5324 }
5325
5326 /* Now press "execute". */
5327 IWN_WRITE(sc, IWN_RESET, 0);
5328 return 0;
5329 }
5330
5331 /*
5332 * Extract text and data sections from a legacy firmware image.
5333 */
5334 static int
5335 iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw)
5336 {
5337 const uint32_t *ptr;
5338 size_t hdrlen = 24;
5339 uint32_t rev;
5340
5341 ptr = (const uint32_t *)fw->data;
5342 rev = le32toh(*ptr++);
5343
5344 /* Check firmware API version. */
5345 if (IWN_FW_API(rev) <= 1) {
5346 aprint_error_dev(sc->sc_dev,
5347 "bad firmware, need API version >=2\n");
5348 return EINVAL;
5349 }
5350 if (IWN_FW_API(rev) >= 3) {
5351 /* Skip build number (version 2 header). */
5352 hdrlen += 4;
5353 ptr++;
5354 }
5355 if (fw->size < hdrlen) {
5356 aprint_error_dev(sc->sc_dev,
5357 "firmware too short: %zd bytes\n", fw->size);
5358 return EINVAL;
5359 }
5360 fw->main.textsz = le32toh(*ptr++);
5361 fw->main.datasz = le32toh(*ptr++);
5362 fw->init.textsz = le32toh(*ptr++);
5363 fw->init.datasz = le32toh(*ptr++);
5364 fw->boot.textsz = le32toh(*ptr++);
5365
5366 /* Check that all firmware sections fit. */
5367 if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz +
5368 fw->init.textsz + fw->init.datasz + fw->boot.textsz) {
5369 aprint_error_dev(sc->sc_dev,
5370 "firmware too short: %zd bytes\n", fw->size);
5371 return EINVAL;
5372 }
5373
5374 /* Get pointers to firmware sections. */
5375 fw->main.text = (const uint8_t *)ptr;
5376 fw->main.data = fw->main.text + fw->main.textsz;
5377 fw->init.text = fw->main.data + fw->main.datasz;
5378 fw->init.data = fw->init.text + fw->init.textsz;
5379 fw->boot.text = fw->init.data + fw->init.datasz;
5380 return 0;
5381 }
5382
5383 /*
5384 * Extract text and data sections from a TLV firmware image.
5385 */
5386 static int
5387 iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw,
5388 uint16_t alt)
5389 {
5390 const struct iwn_fw_tlv_hdr *hdr;
5391 const struct iwn_fw_tlv *tlv;
5392 const uint8_t *ptr, *end;
5393 uint64_t altmask;
5394 uint32_t len;
5395
5396 if (fw->size < sizeof (*hdr)) {
5397 aprint_error_dev(sc->sc_dev,
5398 "firmware too short: %zd bytes\n", fw->size);
5399 return EINVAL;
5400 }
5401 hdr = (const struct iwn_fw_tlv_hdr *)fw->data;
5402 if (hdr->signature != htole32(IWN_FW_SIGNATURE)) {
5403 aprint_error_dev(sc->sc_dev,
5404 "bad firmware signature 0x%08x\n", le32toh(hdr->signature));
5405 return EINVAL;
5406 }
5407 DPRINTF(("FW: \"%.64s\", build 0x%x\n", hdr->descr,
5408 le32toh(hdr->build)));
5409
5410 /*
5411 * Select the closest supported alternative that is less than
5412 * or equal to the specified one.
5413 */
5414 altmask = le64toh(hdr->altmask);
5415 while (alt > 0 && !(altmask & (1ULL << alt)))
5416 alt--; /* Downgrade. */
5417 DPRINTF(("using alternative %d\n", alt));
5418
5419 ptr = (const uint8_t *)(hdr + 1);
5420 end = (const uint8_t *)(fw->data + fw->size);
5421
5422 /* Parse type-length-value fields. */
5423 while (ptr + sizeof (*tlv) <= end) {
5424 tlv = (const struct iwn_fw_tlv *)ptr;
5425 len = le32toh(tlv->len);
5426
5427 ptr += sizeof (*tlv);
5428 if (ptr + len > end) {
5429 aprint_error_dev(sc->sc_dev,
5430 "firmware too short: %zd bytes\n", fw->size);
5431 return EINVAL;
5432 }
5433 /* Skip other alternatives. */
5434 if (tlv->alt != 0 && tlv->alt != htole16(alt))
5435 goto next;
5436
5437 switch (le16toh(tlv->type)) {
5438 case IWN_FW_TLV_MAIN_TEXT:
5439 fw->main.text = ptr;
5440 fw->main.textsz = len;
5441 break;
5442 case IWN_FW_TLV_MAIN_DATA:
5443 fw->main.data = ptr;
5444 fw->main.datasz = len;
5445 break;
5446 case IWN_FW_TLV_INIT_TEXT:
5447 fw->init.text = ptr;
5448 fw->init.textsz = len;
5449 break;
5450 case IWN_FW_TLV_INIT_DATA:
5451 fw->init.data = ptr;
5452 fw->init.datasz = len;
5453 break;
5454 case IWN_FW_TLV_BOOT_TEXT:
5455 fw->boot.text = ptr;
5456 fw->boot.textsz = len;
5457 break;
5458 default:
5459 DPRINTF(("TLV type %d not handled\n",
5460 le16toh(tlv->type)));
5461 break;
5462 }
5463 next: /* TLV fields are 32-bit aligned. */
5464 ptr += (len + 3) & ~3;
5465 }
5466 return 0;
5467 }
5468
5469 static int
5470 iwn_read_firmware(struct iwn_softc *sc)
5471 {
5472 struct iwn_fw_info *fw = &sc->fw;
5473 firmware_handle_t fwh;
5474 int error;
5475
5476 /* Initialize for error returns */
5477 fw->data = NULL;
5478 fw->size = 0;
5479
5480 /* Open firmware image. */
5481 if ((error = firmware_open("if_iwn", sc->fwname, &fwh)) != 0) {
5482 aprint_error_dev(sc->sc_dev,
5483 "could not get firmware handle %s\n", sc->fwname);
5484 return error;
5485 }
5486 fw->size = firmware_get_size(fwh);
5487 if (fw->size < sizeof (uint32_t)) {
5488 aprint_error_dev(sc->sc_dev,
5489 "firmware too short: %zd bytes\n", fw->size);
5490 firmware_close(fwh);
5491 return EINVAL;
5492 }
5493
5494 /* Read the firmware. */
5495 fw->data = firmware_malloc(fw->size);
5496 if (fw->data == NULL) {
5497 aprint_error_dev(sc->sc_dev,
5498 "not enough memory to stock firmware %s\n", sc->fwname);
5499 firmware_close(fwh);
5500 return ENOMEM;
5501 }
5502 error = firmware_read(fwh, 0, fw->data, fw->size);
5503 firmware_close(fwh);
5504 if (error != 0) {
5505 aprint_error_dev(sc->sc_dev,
5506 "could not read firmware %s\n", sc->fwname);
5507 goto out;
5508 }
5509
5510 /* Retrieve text and data sections. */
5511 if (*(const uint32_t *)fw->data != 0) /* Legacy image. */
5512 error = iwn_read_firmware_leg(sc, fw);
5513 else
5514 error = iwn_read_firmware_tlv(sc, fw, 1);
5515 if (error != 0) {
5516 aprint_error_dev(sc->sc_dev,
5517 "could not read firmware sections\n");
5518 goto out;
5519 }
5520
5521 /* Make sure text and data sections fit in hardware memory. */
5522 if (fw->main.textsz > sc->fw_text_maxsz ||
5523 fw->main.datasz > sc->fw_data_maxsz ||
5524 fw->init.textsz > sc->fw_text_maxsz ||
5525 fw->init.datasz > sc->fw_data_maxsz ||
5526 fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ ||
5527 (fw->boot.textsz & 3) != 0) {
5528 aprint_error_dev(sc->sc_dev,
5529 "firmware sections too large\n");
5530 goto out;
5531 }
5532
5533 /* We can proceed with loading the firmware. */
5534 return 0;
5535 out:
5536 firmware_free(fw->data, fw->size);
5537 fw->data = NULL;
5538 fw->size = 0;
5539 return error ? error : EINVAL;
5540 }
5541
5542 static int
5543 iwn_clock_wait(struct iwn_softc *sc)
5544 {
5545 int ntries;
5546
5547 /* Set "initialization complete" bit. */
5548 IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5549
5550 /* Wait for clock stabilization. */
5551 for (ntries = 0; ntries < 2500; ntries++) {
5552 if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY)
5553 return 0;
5554 DELAY(10);
5555 }
5556 aprint_error_dev(sc->sc_dev,
5557 "timeout waiting for clock stabilization\n");
5558 return ETIMEDOUT;
5559 }
5560
5561 static int
5562 iwn_apm_init(struct iwn_softc *sc)
5563 {
5564 pcireg_t reg;
5565 int error;
5566
5567 /* Disable L0s exit timer (NMI bug workaround). */
5568 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER);
5569 /* Don't wait for ICH L0s (ICH bug workaround). */
5570 IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX);
5571
5572 /* Set FH wait threshold to max (HW bug under stress workaround). */
5573 IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000);
5574
5575 /* Enable HAP INTA to move adapter from L1a to L0s. */
5576 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A);
5577
5578 /* Retrieve PCIe Active State Power Management (ASPM). */
5579 reg = pci_conf_read(sc->sc_pct, sc->sc_pcitag,
5580 sc->sc_cap_off + PCIE_LCSR);
5581 /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */
5582 if (reg & PCIE_LCSR_ASPM_L1) /* L1 Entry enabled. */
5583 IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
5584 else
5585 IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA);
5586
5587 if (sc->hw_type != IWN_HW_REV_TYPE_4965 &&
5588 sc->hw_type <= IWN_HW_REV_TYPE_1000)
5589 IWN_SETBITS(sc, IWN_ANA_PLL, IWN_ANA_PLL_INIT);
5590
5591 /* Wait for clock stabilization before accessing prph. */
5592 if ((error = iwn_clock_wait(sc)) != 0)
5593 return error;
5594
5595 if ((error = iwn_nic_lock(sc)) != 0)
5596 return error;
5597 if (sc->hw_type == IWN_HW_REV_TYPE_4965) {
5598 /* Enable DMA and BSM (Bootstrap State Machine). */
5599 iwn_prph_write(sc, IWN_APMG_CLK_EN,
5600 IWN_APMG_CLK_CTRL_DMA_CLK_RQT |
5601 IWN_APMG_CLK_CTRL_BSM_CLK_RQT);
5602 } else {
5603 /* Enable DMA. */
5604 iwn_prph_write(sc, IWN_APMG_CLK_EN,
5605 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
5606 }
5607 DELAY(20);
5608 /* Disable L1-Active. */
5609 iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS);
5610 iwn_nic_unlock(sc);
5611
5612 return 0;
5613 }
5614
5615 static void
5616 iwn_apm_stop_master(struct iwn_softc *sc)
5617 {
5618 int ntries;
5619
5620 /* Stop busmaster DMA activity. */
5621 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER);
5622 for (ntries = 0; ntries < 100; ntries++) {
5623 if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED)
5624 return;
5625 DELAY(10);
5626 }
5627 aprint_error_dev(sc->sc_dev,
5628 "timeout waiting for master\n");
5629 }
5630
5631 static void
5632 iwn_apm_stop(struct iwn_softc *sc)
5633 {
5634 iwn_apm_stop_master(sc);
5635
5636 /* Reset the entire device. */
5637 IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW);
5638 DELAY(10);
5639 /* Clear "initialization complete" bit. */
5640 IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE);
5641 }
5642
5643 static int
5644 iwn4965_nic_config(struct iwn_softc *sc)
5645 {
5646 if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) {
5647 /*
5648 * I don't believe this to be correct but this is what the
5649 * vendor driver is doing. Probably the bits should not be
5650 * shifted in IWN_RFCFG_*.
5651 */
5652 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5653 IWN_RFCFG_TYPE(sc->rfcfg) |
5654 IWN_RFCFG_STEP(sc->rfcfg) |
5655 IWN_RFCFG_DASH(sc->rfcfg));
5656 }
5657 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5658 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
5659 return 0;
5660 }
5661
5662 static int
5663 iwn5000_nic_config(struct iwn_softc *sc)
5664 {
5665 uint32_t tmp;
5666 int error;
5667
5668 if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) {
5669 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5670 IWN_RFCFG_TYPE(sc->rfcfg) |
5671 IWN_RFCFG_STEP(sc->rfcfg) |
5672 IWN_RFCFG_DASH(sc->rfcfg));
5673 }
5674 IWN_SETBITS(sc, IWN_HW_IF_CONFIG,
5675 IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI);
5676
5677 if ((error = iwn_nic_lock(sc)) != 0)
5678 return error;
5679 iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS);
5680
5681 if (sc->hw_type == IWN_HW_REV_TYPE_1000) {
5682 /*
5683 * Select first Switching Voltage Regulator (1.32V) to
5684 * solve a stability issue related to noisy DC2DC line
5685 * in the silicon of 1000 Series.
5686 */
5687 tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR);
5688 tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK;
5689 tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32;
5690 iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp);
5691 }
5692 iwn_nic_unlock(sc);
5693
5694 if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) {
5695 /* Use internal power amplifier only. */
5696 IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA);
5697 }
5698 if ((sc->hw_type == IWN_HW_REV_TYPE_6050 ||
5699 sc->hw_type == IWN_HW_REV_TYPE_6005) && sc->calib_ver >= 6) {
5700 /* Indicate that ROM calibration version is >=6. */
5701 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6);
5702 }
5703 if (sc->hw_type == IWN_HW_REV_TYPE_6005)
5704 IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_6050_1X2);
5705 return 0;
5706 }
5707
5708 /*
5709 * Take NIC ownership over Intel Active Management Technology (AMT).
5710 */
5711 static int
5712 iwn_hw_prepare(struct iwn_softc *sc)
5713 {
5714 int ntries;
5715
5716 /* Check if hardware is ready. */
5717 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
5718 for (ntries = 0; ntries < 5; ntries++) {
5719 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
5720 IWN_HW_IF_CONFIG_NIC_READY)
5721 return 0;
5722 DELAY(10);
5723 }
5724
5725 /* Hardware not ready, force into ready state. */
5726 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE);
5727 for (ntries = 0; ntries < 15000; ntries++) {
5728 if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) &
5729 IWN_HW_IF_CONFIG_PREPARE_DONE))
5730 break;
5731 DELAY(10);
5732 }
5733 if (ntries == 15000)
5734 return ETIMEDOUT;
5735
5736 /* Hardware should be ready now. */
5737 IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY);
5738 for (ntries = 0; ntries < 5; ntries++) {
5739 if (IWN_READ(sc, IWN_HW_IF_CONFIG) &
5740 IWN_HW_IF_CONFIG_NIC_READY)
5741 return 0;
5742 DELAY(10);
5743 }
5744 return ETIMEDOUT;
5745 }
5746
5747 static int
5748 iwn_hw_init(struct iwn_softc *sc)
5749 {
5750 struct iwn_ops *ops = &sc->ops;
5751 int error, chnl, qid;
5752
5753 /* Clear pending interrupts. */
5754 IWN_WRITE(sc, IWN_INT, 0xffffffff);
5755
5756 if ((error = iwn_apm_init(sc)) != 0) {
5757 aprint_error_dev(sc->sc_dev,
5758 "could not power ON adapter\n");
5759 return error;
5760 }
5761
5762 /* Select VMAIN power source. */
5763 if ((error = iwn_nic_lock(sc)) != 0)
5764 return error;
5765 iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK);
5766 iwn_nic_unlock(sc);
5767
5768 /* Perform adapter-specific initialization. */
5769 if ((error = ops->nic_config(sc)) != 0)
5770 return error;
5771
5772 /* Initialize RX ring. */
5773 if ((error = iwn_nic_lock(sc)) != 0)
5774 return error;
5775 IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0);
5776 IWN_WRITE(sc, IWN_FH_RX_WPTR, 0);
5777 /* Set physical address of RX ring (256-byte aligned). */
5778 IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8);
5779 /* Set physical address of RX status (16-byte aligned). */
5780 IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4);
5781 /* Enable RX. */
5782 IWN_WRITE(sc, IWN_FH_RX_CONFIG,
5783 IWN_FH_RX_CONFIG_ENA |
5784 IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */
5785 IWN_FH_RX_CONFIG_IRQ_DST_HOST |
5786 IWN_FH_RX_CONFIG_SINGLE_FRAME |
5787 IWN_FH_RX_CONFIG_RB_TIMEOUT(0) |
5788 IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG));
5789 iwn_nic_unlock(sc);
5790 IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7);
5791
5792 if ((error = iwn_nic_lock(sc)) != 0)
5793 return error;
5794
5795 /* Initialize TX scheduler. */
5796 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
5797
5798 /* Set physical address of "keep warm" page (16-byte aligned). */
5799 IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4);
5800
5801 /* Initialize TX rings. */
5802 for (qid = 0; qid < sc->ntxqs; qid++) {
5803 struct iwn_tx_ring *txq = &sc->txq[qid];
5804
5805 /* Set physical address of TX ring (256-byte aligned). */
5806 IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid),
5807 txq->desc_dma.paddr >> 8);
5808 }
5809 iwn_nic_unlock(sc);
5810
5811 /* Enable DMA channels. */
5812 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
5813 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl),
5814 IWN_FH_TX_CONFIG_DMA_ENA |
5815 IWN_FH_TX_CONFIG_DMA_CREDIT_ENA);
5816 }
5817
5818 /* Clear "radio off" and "commands blocked" bits. */
5819 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
5820 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED);
5821
5822 /* Clear pending interrupts. */
5823 IWN_WRITE(sc, IWN_INT, 0xffffffff);
5824 /* Enable interrupt coalescing. */
5825 IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8);
5826 /* Enable interrupts. */
5827 IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask);
5828
5829 /* _Really_ make sure "radio off" bit is cleared! */
5830 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
5831 IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL);
5832
5833 /* Enable shadow registers. */
5834 if (sc->hw_type >= IWN_HW_REV_TYPE_6000)
5835 IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff);
5836
5837 if ((error = ops->load_firmware(sc)) != 0) {
5838 aprint_error_dev(sc->sc_dev,
5839 "could not load firmware\n");
5840 return error;
5841 }
5842 /* Wait at most one second for firmware alive notification. */
5843 if ((error = tsleep(sc, PCATCH, "iwninit", hz)) != 0) {
5844 aprint_error_dev(sc->sc_dev,
5845 "timeout waiting for adapter to initialize\n");
5846 return error;
5847 }
5848 /* Do post-firmware initialization. */
5849 return ops->post_alive(sc);
5850 }
5851
5852 static void
5853 iwn_hw_stop(struct iwn_softc *sc)
5854 {
5855 int chnl, qid, ntries;
5856
5857 IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO);
5858
5859 /* Disable interrupts. */
5860 IWN_WRITE(sc, IWN_INT_MASK, 0);
5861 IWN_WRITE(sc, IWN_INT, 0xffffffff);
5862 IWN_WRITE(sc, IWN_FH_INT, 0xffffffff);
5863 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
5864
5865 /* Make sure we no longer hold the NIC lock. */
5866 iwn_nic_unlock(sc);
5867
5868 /* Stop TX scheduler. */
5869 iwn_prph_write(sc, sc->sched_txfact_addr, 0);
5870
5871 /* Stop all DMA channels. */
5872 if (iwn_nic_lock(sc) == 0) {
5873 for (chnl = 0; chnl < sc->ndmachnls; chnl++) {
5874 IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0);
5875 for (ntries = 0; ntries < 200; ntries++) {
5876 if (IWN_READ(sc, IWN_FH_TX_STATUS) &
5877 IWN_FH_TX_STATUS_IDLE(chnl))
5878 break;
5879 DELAY(10);
5880 }
5881 }
5882 iwn_nic_unlock(sc);
5883 }
5884
5885 /* Stop RX ring. */
5886 iwn_reset_rx_ring(sc, &sc->rxq);
5887
5888 /* Reset all TX rings. */
5889 for (qid = 0; qid < sc->ntxqs; qid++)
5890 iwn_reset_tx_ring(sc, &sc->txq[qid]);
5891
5892 if (iwn_nic_lock(sc) == 0) {
5893 iwn_prph_write(sc, IWN_APMG_CLK_DIS,
5894 IWN_APMG_CLK_CTRL_DMA_CLK_RQT);
5895 iwn_nic_unlock(sc);
5896 }
5897 DELAY(5);
5898 /* Power OFF adapter. */
5899 iwn_apm_stop(sc);
5900 }
5901
5902 static int
5903 iwn_init(struct ifnet *ifp)
5904 {
5905 struct iwn_softc *sc = ifp->if_softc;
5906 struct ieee80211com *ic = &sc->sc_ic;
5907 int error;
5908
5909 mutex_enter(&sc->sc_mtx);
5910 if (sc->sc_flags & IWN_FLAG_HW_INITED)
5911 goto out;
5912 if ((error = iwn_hw_prepare(sc)) != 0) {
5913 aprint_error_dev(sc->sc_dev,
5914 "hardware not ready\n");
5915 goto fail;
5916 }
5917
5918 /* Check that the radio is not disabled by hardware switch. */
5919 if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) {
5920 aprint_error_dev(sc->sc_dev,
5921 "radio is disabled by hardware switch\n");
5922 error = EPERM; /* :-) */
5923 goto fail;
5924 }
5925
5926 /* Read firmware images from the filesystem. */
5927 if ((error = iwn_read_firmware(sc)) != 0) {
5928 aprint_error_dev(sc->sc_dev,
5929 "could not read firmware\n");
5930 goto fail;
5931 }
5932
5933 /* Initialize interrupt mask to default value. */
5934 sc->int_mask = IWN_INT_MASK_DEF;
5935 sc->sc_flags &= ~IWN_FLAG_USE_ICT;
5936
5937 /* Initialize hardware and upload firmware. */
5938 KASSERT(sc->fw.data != NULL && sc->fw.size > 0);
5939 error = iwn_hw_init(sc);
5940 firmware_free(sc->fw.data, sc->fw.size);
5941 sc->fw.data = NULL;
5942 sc->fw.size = 0;
5943 if (error != 0) {
5944 aprint_error_dev(sc->sc_dev,
5945 "could not initialize hardware\n");
5946 goto fail;
5947 }
5948
5949 /* Configure adapter now that it is ready. */
5950 if ((error = iwn_config(sc)) != 0) {
5951 aprint_error_dev(sc->sc_dev,
5952 "could not configure device\n");
5953 goto fail;
5954 }
5955
5956 ifp->if_flags &= ~IFF_OACTIVE;
5957 ifp->if_flags |= IFF_RUNNING;
5958
5959 if (ic->ic_opmode != IEEE80211_M_MONITOR)
5960 ieee80211_begin_scan(ic, 0);
5961 else
5962 ieee80211_new_state(ic, IEEE80211_S_RUN, -1);
5963
5964 sc->sc_flags |= IWN_FLAG_HW_INITED;
5965 out:
5966 mutex_exit(&sc->sc_mtx);
5967 return 0;
5968
5969 fail: mutex_exit(&sc->sc_mtx);
5970 iwn_stop(ifp, 1);
5971 return error;
5972 }
5973
5974 static void
5975 iwn_stop(struct ifnet *ifp, int disable)
5976 {
5977 struct iwn_softc *sc = ifp->if_softc;
5978 struct ieee80211com *ic = &sc->sc_ic;
5979
5980 if (!disable)
5981 mutex_enter(&sc->sc_mtx);
5982 sc->sc_flags &= ~IWN_FLAG_HW_INITED;
5983 ifp->if_timer = sc->sc_tx_timer = 0;
5984 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE);
5985
5986 ieee80211_new_state(ic, IEEE80211_S_INIT, -1);
5987
5988 /* Power OFF hardware. */
5989 iwn_hw_stop(sc);
5990
5991 if (!disable)
5992 mutex_exit(&sc->sc_mtx);
5993 }
5994
5995 /*
5996 * XXX MCLGETI alternative
5997 *
5998 * With IWN_USE_RBUF defined it uses the rbuf cache for receive buffers
5999 * as long as there are available free buffers then it uses MEXTMALLOC.,
6000 * Without IWN_USE_RBUF defined it uses MEXTMALLOC exclusively.
6001 * The MCLGET4K code is used for testing an alternative mbuf cache.
6002 */
6003
6004 static struct mbuf *
6005 MCLGETIalt(struct iwn_softc *sc, int how,
6006 struct ifnet *ifp __unused, u_int size)
6007 {
6008 struct mbuf *m;
6009 #ifdef IWN_USE_RBUF
6010 struct iwn_rbuf *rbuf;
6011 #endif
6012
6013 MGETHDR(m, how, MT_DATA);
6014 if (m == NULL)
6015 return NULL;
6016
6017 #ifdef IWN_USE_RBUF
6018 if (sc->rxq.nb_free_entries > 0 &&
6019 (rbuf = iwn_alloc_rbuf(sc)) != NULL) {
6020 /* Attach buffer to mbuf header. */
6021 MEXTADD(m, rbuf->vaddr, size, 0, iwn_free_rbuf, rbuf);
6022 m->m_flags |= M_EXT_RW;
6023 }
6024 else {
6025 MEXTMALLOC(m, size, how);
6026 if ((m->m_flags & M_EXT) == 0) {
6027 m_freem(m);
6028 return NULL;
6029 }
6030 }
6031
6032 #else
6033 #ifdef MCLGET4K
6034 if (size == 4096)
6035 MCLGET4K(m, how);
6036 else
6037 panic("size must be 4k");
6038 #else
6039 MEXTMALLOC(m, size, how);
6040 #endif
6041 if ((m->m_flags & M_EXT) == 0) {
6042 m_freem(m);
6043 return NULL;
6044 }
6045 #endif
6046
6047 return m;
6048 }
6049
6050 #ifdef IWN_USE_RBUF
6051 static struct iwn_rbuf *
6052 iwn_alloc_rbuf(struct iwn_softc *sc)
6053 {
6054 struct iwn_rbuf *rbuf;
6055 mutex_enter(&sc->rxq.freelist_mtx);
6056
6057 rbuf = SLIST_FIRST(&sc->rxq.freelist);
6058 if (rbuf != NULL) {
6059 SLIST_REMOVE_HEAD(&sc->rxq.freelist, next);
6060 sc->rxq.nb_free_entries --;
6061 }
6062 mutex_exit(&sc->rxq.freelist_mtx);
6063 return rbuf;
6064 }
6065
6066 /*
6067 * This is called automatically by the network stack when the mbuf to which
6068 * our RX buffer is attached is freed.
6069 */
6070 static void
6071 iwn_free_rbuf(struct mbuf* m, void *buf, size_t size, void *arg)
6072 {
6073 struct iwn_rbuf *rbuf = arg;
6074 struct iwn_softc *sc = rbuf->sc;
6075
6076 /* Put the RX buffer back in the free list. */
6077 mutex_enter(&sc->rxq.freelist_mtx);
6078 SLIST_INSERT_HEAD(&sc->rxq.freelist, rbuf, next);
6079 mutex_exit(&sc->rxq.freelist_mtx);
6080
6081 sc->rxq.nb_free_entries ++;
6082 if (__predict_true(m != NULL))
6083 pool_cache_put(mb_cache, m);
6084 }
6085
6086 static int
6087 iwn_alloc_rpool(struct iwn_softc *sc)
6088 {
6089 struct iwn_rx_ring *ring = &sc->rxq;
6090 struct iwn_rbuf *rbuf;
6091 int i, error;
6092
6093 mutex_init(&ring->freelist_mtx, MUTEX_DEFAULT, IPL_NET);
6094
6095 /* Allocate a big chunk of DMA'able memory... */
6096 error = iwn_dma_contig_alloc(sc->sc_dmat, &ring->buf_dma, NULL,
6097 IWN_RBUF_COUNT * IWN_RBUF_SIZE, PAGE_SIZE);
6098 if (error != 0) {
6099 aprint_error_dev(sc->sc_dev,
6100 "could not allocate RX buffers DMA memory\n");
6101 return error;
6102 }
6103 /* ...and split it into chunks of IWN_RBUF_SIZE bytes. */
6104 SLIST_INIT(&ring->freelist);
6105 for (i = 0; i < IWN_RBUF_COUNT; i++) {
6106 rbuf = &ring->rbuf[i];
6107
6108 rbuf->sc = sc; /* Backpointer for callbacks. */
6109 rbuf->vaddr = (void *)((vaddr_t)ring->buf_dma.vaddr + i * IWN_RBUF_SIZE);
6110 rbuf->paddr = ring->buf_dma.paddr + i * IWN_RBUF_SIZE;
6111
6112 SLIST_INSERT_HEAD(&ring->freelist, rbuf, next);
6113 }
6114 ring->nb_free_entries = IWN_RBUF_COUNT;
6115 return 0;
6116 }
6117
6118 static void
6119 iwn_free_rpool(struct iwn_softc *sc)
6120 {
6121 iwn_dma_contig_free(&sc->rxq.buf_dma);
6122 }
6123 #endif
6124
6125 /*
6126 * XXX code from OpenBSD src/sys/net80211/ieee80211_output.c
6127 * Copyright (c) 2001 Atsushi Onoe
6128 * Copyright (c) 2002, 2003 Sam Leffler, Errno Consulting
6129 * Copyright (c) 2007-2009 Damien Bergamini
6130 * All rights reserved.
6131 */
6132
6133 /*
6134 * Add an SSID element to a frame (see 7.3.2.1).
6135 */
6136 static u_int8_t *
6137 ieee80211_add_ssid(u_int8_t *frm, const u_int8_t *ssid, u_int len)
6138 {
6139 *frm++ = IEEE80211_ELEMID_SSID;
6140 *frm++ = len;
6141 memcpy(frm, ssid, len);
6142 return frm + len;
6143 }
6144
6145 /*
6146 * Add a supported rates element to a frame (see 7.3.2.2).
6147 */
6148 static u_int8_t *
6149 ieee80211_add_rates(u_int8_t *frm, const struct ieee80211_rateset *rs)
6150 {
6151 int nrates;
6152
6153 *frm++ = IEEE80211_ELEMID_RATES;
6154 nrates = min(rs->rs_nrates, IEEE80211_RATE_SIZE);
6155 *frm++ = nrates;
6156 memcpy(frm, rs->rs_rates, nrates);
6157 return frm + nrates;
6158 }
6159
6160 /*
6161 * Add an extended supported rates element to a frame (see 7.3.2.14).
6162 */
6163 static u_int8_t *
6164 ieee80211_add_xrates(u_int8_t *frm, const struct ieee80211_rateset *rs)
6165 {
6166 int nrates;
6167
6168 KASSERT(rs->rs_nrates > IEEE80211_RATE_SIZE);
6169
6170 *frm++ = IEEE80211_ELEMID_XRATES;
6171 nrates = rs->rs_nrates - IEEE80211_RATE_SIZE;
6172 *frm++ = nrates;
6173 memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates);
6174 return frm + nrates;
6175 }
6176
6177 /*
6178 * XXX: Hack to set the current channel to the value advertised in beacons or
6179 * probe responses. Only used during AP detection.
6180 * XXX: Duplicated from if_iwi.c
6181 */
6182 static void
6183 iwn_fix_channel(struct ieee80211com *ic, struct mbuf *m)
6184 {
6185 struct ieee80211_frame *wh;
6186 uint8_t subtype;
6187 uint8_t *frm, *efrm;
6188
6189 wh = mtod(m, struct ieee80211_frame *);
6190
6191 if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_MGT)
6192 return;
6193
6194 subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
6195
6196 if (subtype != IEEE80211_FC0_SUBTYPE_BEACON &&
6197 subtype != IEEE80211_FC0_SUBTYPE_PROBE_RESP)
6198 return;
6199
6200 frm = (uint8_t *)(wh + 1);
6201 efrm = mtod(m, uint8_t *) + m->m_len;
6202
6203 frm += 12; /* skip tstamp, bintval and capinfo fields */
6204 while (frm < efrm) {
6205 if (*frm == IEEE80211_ELEMID_DSPARMS)
6206 #if IEEE80211_CHAN_MAX < 255
6207 if (frm[2] <= IEEE80211_CHAN_MAX)
6208 #endif
6209 ic->ic_curchan = &ic->ic_channels[frm[2]];
6210
6211 frm += frm[1] + 2;
6212 }
6213 }
6214
6215 #ifdef notyetMODULE
6216
6217 MODULE(MODULE_CLASS_DRIVER, if_iwn, "pci");
6218
6219 #ifdef _MODULE
6220 #include "ioconf.c"
6221 #endif
6222
6223 static int
6224 if_iwn_modcmd(modcmd_t cmd, void *data)
6225 {
6226 int error = 0;
6227
6228 switch (cmd) {
6229 case MODULE_CMD_INIT:
6230 #ifdef _MODULE
6231 error = config_init_component(cfdriver_ioconf_if_iwn,
6232 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn);
6233 #endif
6234 return error;
6235 case MODULE_CMD_FINI:
6236 #ifdef _MODULE
6237 error = config_fini_component(cfdriver_ioconf_if_iwn,
6238 cfattach_ioconf_if_iwn, cfdata_ioconf_if_iwn);
6239 #endif
6240 return error;
6241 case MODULE_CMD_AUTOUNLOAD:
6242 #ifdef _MODULE
6243 /* XXX This is not optional! */
6244 #endif
6245 return error;
6246 default:
6247 return ENOTTY;
6248 }
6249 }
6250 #endif
6251