if_bwfm_pci.c revision 1.1.2.4 1 /* $NetBSD: if_bwfm_pci.c,v 1.1.2.4 2018/10/20 06:58:31 pgoyette Exp $ */
2 /* $OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $ */
3 /*
4 * Copyright (c) 2010-2016 Broadcom Corporation
5 * Copyright (c) 2017 Patrick Wildt <patrick (at) blueri.se>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/kernel.h>
24 #include <sys/kmem.h>
25 #include <sys/device.h>
26 #include <sys/pool.h>
27 #include <sys/workqueue.h>
28 #include <sys/socket.h>
29
30 #include <net/bpf.h>
31 #include <net/if.h>
32 #include <net/if_dl.h>
33 #include <net/if_ether.h>
34 #include <net/if_media.h>
35
36 #include <netinet/in.h>
37
38 #include <net80211/ieee80211_var.h>
39
40 #include <dev/firmload.h>
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/ic/bwfmvar.h>
47 #include <dev/ic/bwfmreg.h>
48 #include <dev/pci/if_bwfm_pci.h>
49
50 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN 8
51 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN 1024
52 #define BWFM_DMA_H2D_IOCTL_BUF_LEN ETHER_MAX_LEN
53
54 #define BWFM_NUM_TX_MSGRINGS 2
55 #define BWFM_NUM_RX_MSGRINGS 3
56
57 #define BWFM_NUM_TX_PKTIDS 2048
58 #define BWFM_NUM_RX_PKTIDS 1024
59
60 #define BWFM_NUM_TX_DESCS 1
61 #define BWFM_NUM_RX_DESCS 1
62
63 #ifdef BWFM_DEBUG
64 #define DPRINTF(x) do { if (bwfm_debug > 0) printf x; } while (0)
65 #define DPRINTFN(n, x) do { if (bwfm_debug >= (n)) printf x; } while (0)
66 static int bwfm_debug = 2;
67 #else
68 #define DPRINTF(x) do { ; } while (0)
69 #define DPRINTFN(n, x) do { ; } while (0)
70 #endif
71
72 #define DEVNAME(sc) device_xname((sc)->sc_sc.sc_dev)
73 #define letoh16 htole16
74 #define letoh32 htole32
75 #define nitems(x) __arraycount(x)
76
77 enum ring_status {
78 RING_CLOSED,
79 RING_CLOSING,
80 RING_OPEN,
81 RING_OPENING,
82 };
83
84 struct bwfm_pci_msgring {
85 uint32_t w_idx_addr;
86 uint32_t r_idx_addr;
87 uint32_t w_ptr;
88 uint32_t r_ptr;
89 int nitem;
90 int itemsz;
91 enum ring_status status;
92 struct bwfm_pci_dmamem *ring;
93 struct mbuf *m;
94
95 int fifo;
96 uint8_t mac[ETHER_ADDR_LEN];
97 };
98
99 struct bwfm_pci_buf {
100 bus_dmamap_t bb_map;
101 struct mbuf *bb_m;
102 };
103
104 struct bwfm_pci_pkts {
105 struct bwfm_pci_buf *pkts;
106 uint32_t npkt;
107 int last;
108 };
109
110 struct if_rxring {
111 u_int rxr_total;
112 u_int rxr_inuse;
113 };
114
115 struct bwfm_cmd_flowring_create {
116 struct work wq_cookie;
117 struct bwfm_pci_softc *sc;
118 struct mbuf *m;
119 int flowid;
120 int prio;
121 };
122
123 struct bwfm_pci_softc {
124 struct bwfm_softc sc_sc;
125 pci_chipset_tag_t sc_pc;
126 pcitag_t sc_tag;
127 pcireg_t sc_id;
128 void *sc_ih;
129 pci_intr_handle_t *sc_pihp;
130
131 bus_space_tag_t sc_reg_iot;
132 bus_space_handle_t sc_reg_ioh;
133 bus_size_t sc_reg_ios;
134
135 bus_space_tag_t sc_tcm_iot;
136 bus_space_handle_t sc_tcm_ioh;
137 bus_size_t sc_tcm_ios;
138
139 bus_dma_tag_t sc_dmat;
140
141 uint32_t sc_shared_address;
142 uint32_t sc_shared_flags;
143 uint8_t sc_shared_version;
144
145 uint8_t sc_dma_idx_sz;
146 struct bwfm_pci_dmamem *sc_dma_idx_buf;
147 size_t sc_dma_idx_bufsz;
148
149 uint16_t sc_max_rxbufpost;
150 uint32_t sc_rx_dataoffset;
151 uint32_t sc_htod_mb_data_addr;
152 uint32_t sc_dtoh_mb_data_addr;
153 uint32_t sc_ring_info_addr;
154
155 uint32_t sc_console_base_addr;
156 uint32_t sc_console_buf_addr;
157 uint32_t sc_console_buf_size;
158 uint32_t sc_console_readidx;
159
160 struct pool sc_flowring_pool;
161 struct workqueue *flowring_wq;
162
163 uint16_t sc_max_flowrings;
164 uint16_t sc_max_submissionrings;
165 uint16_t sc_max_completionrings;
166
167 struct bwfm_pci_msgring sc_ctrl_submit;
168 struct bwfm_pci_msgring sc_rxpost_submit;
169 struct bwfm_pci_msgring sc_ctrl_complete;
170 struct bwfm_pci_msgring sc_tx_complete;
171 struct bwfm_pci_msgring sc_rx_complete;
172 struct bwfm_pci_msgring *sc_flowrings;
173
174 struct bwfm_pci_dmamem *sc_scratch_buf;
175 struct bwfm_pci_dmamem *sc_ringupd_buf;
176
177 struct bwfm_pci_dmamem *sc_ioctl_buf;
178 int sc_ioctl_reqid;
179 uint32_t sc_ioctl_resp_pktid;
180 uint32_t sc_ioctl_resp_ret_len;
181 uint32_t sc_ioctl_resp_status;
182 int sc_ioctl_poll;
183
184 struct if_rxring sc_ioctl_ring;
185 struct if_rxring sc_event_ring;
186 struct if_rxring sc_rxbuf_ring;
187
188 struct bwfm_pci_pkts sc_rx_pkts;
189 struct bwfm_pci_pkts sc_tx_pkts;
190 int sc_tx_pkts_full;
191 };
192
193 struct bwfm_pci_dmamem {
194 bus_dmamap_t bdm_map;
195 bus_dma_segment_t bdm_seg;
196 size_t bdm_size;
197 char * bdm_kva;
198 };
199
200 #define BWFM_PCI_DMA_MAP(_bdm) ((_bdm)->bdm_map)
201 #define BWFM_PCI_DMA_LEN(_bdm) ((_bdm)->bdm_size)
202 #define BWFM_PCI_DMA_DVA(_bdm) ((_bdm)->bdm_map->dm_segs[0].ds_addr)
203 #define BWFM_PCI_DMA_KVA(_bdm) ((_bdm)->bdm_kva)
204
205 static u_int if_rxr_get(struct if_rxring *rxr, unsigned int max);
206 static void if_rxr_put(struct if_rxring *rxr, unsigned int n);
207 static void if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
208
209 int bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
210 void bwfm_pci_attachhook(device_t);
211 void bwfm_pci_attach(device_t, device_t, void *);
212 int bwfm_pci_detach(device_t, int);
213
214 int bwfm_pci_intr(void *);
215 void bwfm_pci_intr_enable(struct bwfm_pci_softc *);
216 void bwfm_pci_intr_disable(struct bwfm_pci_softc *);
217 int bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
218 size_t);
219 void bwfm_pci_select_core(struct bwfm_pci_softc *, int );
220
221 struct bwfm_pci_dmamem *
222 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
223 bus_size_t);
224 void bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
225 int bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
226 struct bwfm_pci_pkts *);
227 int bwfm_pci_pktid_new(struct bwfm_pci_softc *,
228 struct bwfm_pci_pkts *, struct mbuf **,
229 uint32_t *, paddr_t *);
230 struct mbuf * bwfm_pci_pktid_free(struct bwfm_pci_softc *,
231 struct bwfm_pci_pkts *, uint32_t);
232 void bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
233 struct if_rxring *, uint32_t);
234 void bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
235 void bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
236 int bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
237 int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
238 int bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
239 int, size_t);
240
241 void bwfm_pci_ring_bell(struct bwfm_pci_softc *,
242 struct bwfm_pci_msgring *);
243 void bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
244 struct bwfm_pci_msgring *);
245 void bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
246 struct bwfm_pci_msgring *);
247 void bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
248 struct bwfm_pci_msgring *);
249 void bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
250 struct bwfm_pci_msgring *);
251 void * bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
252 struct bwfm_pci_msgring *);
253 void * bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
254 struct bwfm_pci_msgring *, int, int *);
255 void * bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
256 struct bwfm_pci_msgring *, int *);
257 void bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
258 struct bwfm_pci_msgring *, int);
259 void bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
260 struct bwfm_pci_msgring *);
261 void bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
262 struct bwfm_pci_msgring *, int);
263
264 void bwfm_pci_ring_rx(struct bwfm_pci_softc *,
265 struct bwfm_pci_msgring *);
266 void bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
267
268 uint32_t bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
269 void bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
270 uint32_t);
271 int bwfm_pci_buscore_prepare(struct bwfm_softc *);
272 int bwfm_pci_buscore_reset(struct bwfm_softc *);
273 void bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
274
275 int bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
276 struct mbuf *);
277 void bwfm_pci_flowring_create(struct bwfm_pci_softc *,
278 struct mbuf *);
279 void bwfm_pci_flowring_create_cb(struct work *, void *);
280 void bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
281
282 void bwfm_pci_stop(struct bwfm_softc *);
283 int bwfm_pci_txcheck(struct bwfm_softc *);
284 int bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
285
286 #ifdef BWFM_DEBUG
287 void bwfm_pci_debug_console(struct bwfm_pci_softc *);
288 #endif
289
290 int bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
291 int, char *, size_t *);
292 int bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
293 int, char *, size_t);
294
295 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
296 .bc_read = bwfm_pci_buscore_read,
297 .bc_write = bwfm_pci_buscore_write,
298 .bc_prepare = bwfm_pci_buscore_prepare,
299 .bc_reset = bwfm_pci_buscore_reset,
300 .bc_setup = NULL,
301 .bc_activate = bwfm_pci_buscore_activate,
302 };
303
304 struct bwfm_bus_ops bwfm_pci_bus_ops = {
305 .bs_init = NULL,
306 .bs_stop = bwfm_pci_stop,
307 .bs_txcheck = bwfm_pci_txcheck,
308 .bs_txdata = bwfm_pci_txdata,
309 .bs_txctl = NULL,
310 .bs_rxctl = NULL,
311 };
312
313 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
314 .proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
315 .proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
316 };
317
318
319 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
320 bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
321
322 static const struct bwfm_pci_matchid {
323 pci_vendor_id_t bwfm_vendor;
324 pci_product_id_t bwfm_product;
325 } bwfm_pci_devices[] = {
326 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
327 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
328 };
329
330 static struct mbuf *
331 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
332 struct ifnet *ifp __unused, u_int size)
333 {
334 struct mbuf *m;
335
336 MGETHDR(m, how, MT_DATA);
337 if (m == NULL)
338 return NULL;
339
340 MEXTMALLOC(m, size, how);
341 if ((m->m_flags & M_EXT) == 0) {
342 m_freem(m);
343 return NULL;
344 }
345 return m;
346 }
347
348 int
349 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
350 {
351 struct pci_attach_args *pa = aux;
352
353 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BROADCOM)
354 return 0;
355
356 for (size_t i = 0; i < __arraycount(bwfm_pci_devices); i++)
357 if (PCI_PRODUCT(pa->pa_id) == bwfm_pci_devices[i].bwfm_product)
358 return 1;
359
360 return 0;
361 }
362
363 void
364 bwfm_pci_attach(device_t parent, device_t self, void *aux)
365 {
366 struct bwfm_pci_softc *sc = device_private(self);
367 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
368 const char *intrstr;
369 char intrbuf[PCI_INTRSTR_LEN];
370
371 sc->sc_sc.sc_dev = self;
372
373 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
374 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
375 NULL, &sc->sc_reg_ios)) {
376 printf(": can't map bar0\n");
377 return;
378 }
379
380 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
381 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
382 NULL, &sc->sc_tcm_ios)) {
383 printf(": can't map bar1\n");
384 goto bar0;
385 }
386
387 sc->sc_pc = pa->pa_pc;
388 sc->sc_tag = pa->pa_tag;
389 sc->sc_id = pa->pa_id;
390
391 if (pci_dma64_available(pa))
392 sc->sc_dmat = pa->pa_dmat64;
393 else
394 sc->sc_dmat = pa->pa_dmat;
395
396 /* Map and establish the interrupt. */
397 if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
398 printf(": couldn't map interrupt\n");
399 goto bar1;
400 }
401 intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
402
403 sc->sc_ih = pci_intr_establish(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
404 bwfm_pci_intr, sc);
405 if (sc->sc_ih == NULL) {
406 printf(": couldn't establish interrupt");
407 if (intrstr != NULL)
408 printf(" at %s", intrstr);
409 printf("\n");
410 goto bar1;
411 }
412 printf(": %s\n", intrstr);
413
414 config_mountroot(self, bwfm_pci_attachhook);
415 return;
416
417 bar1:
418 bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
419 bar0:
420 bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
421 }
422
423 void
424 bwfm_pci_attachhook(device_t self)
425 {
426 struct bwfm_pci_softc *sc = device_private(self);
427 struct bwfm_softc *bwfm = (void *)sc;
428 struct bwfm_pci_ringinfo ringinfo;
429 const char *name = NULL;
430 firmware_handle_t fwh;
431 u_char *ucode; size_t size;
432 uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
433 uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
434 uint32_t idx_offset, reg;
435 int i;
436 int error;
437
438 sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
439 if (bwfm_chip_attach(&sc->sc_sc) != 0) {
440 printf("%s: cannot attach chip\n", DEVNAME(sc));
441 return;
442 }
443
444 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
445 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
446 BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
447 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
448 BWFM_PCI_PCIE2REG_CONFIGDATA);
449 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
450 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
451
452 switch (bwfm->sc_chip.ch_chip)
453 {
454 case BRCM_CC_4350_CHIP_ID:
455 if (bwfm->sc_chip.ch_chiprev > 7)
456 name = "brcmfmac4350-pcie.bin";
457 else
458 name = "brcmfmac4350c2-pcie.bin";
459 break;
460 case BRCM_CC_43602_CHIP_ID:
461 name = "brcmfmac43602-pcie.bin";
462 break;
463 default:
464 printf("%s: unknown firmware for chip %s\n",
465 DEVNAME(sc), bwfm->sc_chip.ch_name);
466 return;
467 }
468
469 if (firmware_open("if_bwfm", name, &fwh) != 0) {
470 printf("%s: failed firmware_open of file %s\n",
471 DEVNAME(sc), name);
472 return;
473 }
474 size = firmware_get_size(fwh);
475 ucode = firmware_malloc(size);
476 if (ucode == NULL) {
477 printf("%s: failed to allocate firmware memory\n",
478 DEVNAME(sc));
479 firmware_close(fwh);
480 return;
481 }
482 error = firmware_read(fwh, 0, ucode, size);
483 firmware_close(fwh);
484 if (error != 0) {
485 printf("%s: failed to read firmware (error %d)\n",
486 DEVNAME(sc), error);
487 firmware_free(ucode, size);
488 return;
489 }
490
491 /* Retrieve RAM size from firmware. */
492 if (size >= BWFM_RAMSIZE + 8) {
493 uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
494 if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
495 bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
496 }
497
498 if (bwfm_pci_load_microcode(sc, ucode, size) != 0) {
499 printf("%s: could not load microcode\n",
500 DEVNAME(sc));
501 kmem_free(ucode, size);
502 return;
503 }
504
505 firmware_free(ucode, size);
506
507 sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
508 sc->sc_shared_address + BWFM_SHARED_INFO);
509 sc->sc_shared_version = sc->sc_shared_flags;
510 if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
511 sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
512 printf("%s: PCIe version %d unsupported\n",
513 DEVNAME(sc), sc->sc_shared_version);
514 return;
515 }
516
517 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
518 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
519 sc->sc_dma_idx_sz = sizeof(uint16_t);
520 else
521 sc->sc_dma_idx_sz = sizeof(uint32_t);
522 }
523
524 /* Maximum RX data buffers in the ring. */
525 sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
526 sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
527 if (sc->sc_max_rxbufpost == 0)
528 sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
529
530 /* Alternative offset of data in a packet */
531 sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
532 sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
533
534 /* For Power Management */
535 sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
536 sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
537 sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
538 sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
539
540 /* Ring information */
541 sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
542 sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
543
544 /* Firmware's "dmesg" */
545 sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
546 sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
547 sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
548 sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
549 sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
550 sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
551
552 /* Read ring information. */
553 bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
554 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
555
556 if (sc->sc_shared_version >= 6) {
557 sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
558 sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
559 sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
560 } else {
561 sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
562 sc->sc_max_flowrings = sc->sc_max_submissionrings -
563 BWFM_NUM_TX_MSGRINGS;
564 sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
565 }
566
567 if (sc->sc_dma_idx_sz == 0) {
568 d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
569 d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
570 h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
571 h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
572 idx_offset = sizeof(uint32_t);
573 } else {
574 uint64_t address;
575
576 /* Each TX/RX Ring has a Read and Write Ptr */
577 sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
578 sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
579 sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
580 sc->sc_dma_idx_bufsz, 8);
581 if (sc->sc_dma_idx_buf == NULL) {
582 /* XXX: Fallback to TCM? */
583 printf("%s: cannot allocate idx buf\n",
584 DEVNAME(sc));
585 return;
586 }
587
588 idx_offset = sc->sc_dma_idx_sz;
589 h2d_w_idx_ptr = 0;
590 address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
591 ringinfo.h2d_w_idx_hostaddr_low =
592 htole32(address & 0xffffffff);
593 ringinfo.h2d_w_idx_hostaddr_high =
594 htole32(address >> 32);
595
596 h2d_r_idx_ptr = h2d_w_idx_ptr +
597 sc->sc_max_submissionrings * idx_offset;
598 address += sc->sc_max_submissionrings * idx_offset;
599 ringinfo.h2d_r_idx_hostaddr_low =
600 htole32(address & 0xffffffff);
601 ringinfo.h2d_r_idx_hostaddr_high =
602 htole32(address >> 32);
603
604 d2h_w_idx_ptr = h2d_r_idx_ptr +
605 sc->sc_max_submissionrings * idx_offset;
606 address += sc->sc_max_submissionrings * idx_offset;
607 ringinfo.d2h_w_idx_hostaddr_low =
608 htole32(address & 0xffffffff);
609 ringinfo.d2h_w_idx_hostaddr_high =
610 htole32(address >> 32);
611
612 d2h_r_idx_ptr = d2h_w_idx_ptr +
613 sc->sc_max_completionrings * idx_offset;
614 address += sc->sc_max_completionrings * idx_offset;
615 ringinfo.d2h_r_idx_hostaddr_low =
616 htole32(address & 0xffffffff);
617 ringinfo.d2h_r_idx_hostaddr_high =
618 htole32(address >> 32);
619
620 bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
621 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
622 }
623
624 uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
625 /* TX ctrl ring: Send ctrl buffers, send IOCTLs */
626 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
627 h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
628 &ring_mem_ptr))
629 goto cleanup;
630 /* TX rxpost ring: Send clean data mbufs for RX */
631 if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
632 h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
633 &ring_mem_ptr))
634 goto cleanup;
635 /* RX completion rings: recv our filled buffers back */
636 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
637 d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
638 &ring_mem_ptr))
639 goto cleanup;
640 if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
641 d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
642 &ring_mem_ptr))
643 goto cleanup;
644 if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
645 d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
646 &ring_mem_ptr))
647 goto cleanup;
648
649 /* Dynamic TX rings for actual data */
650 sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
651 sizeof(struct bwfm_pci_msgring), KM_SLEEP);
652 for (i = 0; i < sc->sc_max_flowrings; i++) {
653 struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
654 ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
655 ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
656 }
657
658 pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
659 0, 0, 0, "bwfmpl", NULL, IPL_NET);
660
661 /* Scratch and ring update buffers for firmware */
662 if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
663 BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
664 goto cleanup;
665 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
666 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
667 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
668 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
669 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
670 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
671 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
672 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
673 BWFM_DMA_D2H_SCRATCH_BUF_LEN);
674
675 if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
676 BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
677 goto cleanup;
678 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
679 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
680 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
681 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
682 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
683 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
684 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
685 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
686 BWFM_DMA_D2H_RINGUPD_BUF_LEN);
687
688 if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
689 BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
690 goto cleanup;
691
692 if (workqueue_create(&sc->flowring_wq, "bwfmflow",
693 bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
694 goto cleanup;
695
696 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
697 bwfm_pci_intr_enable(sc);
698
699 /* Maps RX mbufs to a packet id and back. */
700 sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
701 sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
702 sizeof(struct bwfm_pci_buf), KM_SLEEP);
703 for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
704 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
705 BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
706 &sc->sc_rx_pkts.pkts[i].bb_map);
707
708 /* Maps TX mbufs to a packet id and back. */
709 sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
710 sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
711 * sizeof(struct bwfm_pci_buf), KM_SLEEP);
712 for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
713 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
714 BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
715 &sc->sc_tx_pkts.pkts[i].bb_map);
716
717 /*
718 * For whatever reason, could also be a bug somewhere in this
719 * driver, the firmware needs a bunch of RX buffers otherwise
720 * it won't send any RX complete messages. 64 buffers don't
721 * suffice, but 128 buffers are enough.
722 */
723 if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
724 if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
725 if_rxr_init(&sc->sc_event_ring, 8, 8);
726 bwfm_pci_fill_rx_rings(sc);
727
728
729 #ifdef BWFM_DEBUG
730 sc->sc_console_readidx = 0;
731 bwfm_pci_debug_console(sc);
732 #endif
733
734 sc->sc_ioctl_poll = 1;
735 sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
736 sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
737 bwfm_attach(&sc->sc_sc);
738 sc->sc_ioctl_poll = 0;
739 return;
740
741 cleanup:
742 if (sc->flowring_wq != NULL)
743 workqueue_destroy(sc->flowring_wq);
744 if (sc->sc_ih != NULL) {
745 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
746 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
747 }
748 if (sc->sc_ioctl_buf)
749 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
750 if (sc->sc_ringupd_buf)
751 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
752 if (sc->sc_scratch_buf)
753 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
754 if (sc->sc_rx_complete.ring)
755 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
756 if (sc->sc_tx_complete.ring)
757 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
758 if (sc->sc_ctrl_complete.ring)
759 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
760 if (sc->sc_rxpost_submit.ring)
761 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
762 if (sc->sc_ctrl_submit.ring)
763 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
764 if (sc->sc_dma_idx_buf)
765 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
766 }
767
768 int
769 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
770 {
771 struct bwfm_softc *bwfm = (void *)sc;
772 struct bwfm_core *core;
773 uint32_t shared;
774 int i;
775
776 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
777 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
778 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
779 BWFM_PCI_ARMCR4REG_BANKIDX, 5);
780 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
781 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
782 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
783 BWFM_PCI_ARMCR4REG_BANKIDX, 7);
784 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
785 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
786 }
787
788 for (i = 0; i < size; i++)
789 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
790 bwfm->sc_chip.ch_rambase + i, ucode[i]);
791
792 /* Firmware replaces this with a pointer once up. */
793 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
794 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
795
796 /* TODO: restore NVRAM */
797
798 /* Load reset vector from firmware and kickstart core. */
799 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
800 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
801 bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
802 }
803 bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
804
805 for (i = 0; i < 40; i++) {
806 delay(50 * 1000);
807 shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
808 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
809 if (shared)
810 break;
811 }
812 if (!shared) {
813 printf("%s: firmware did not come up\n", DEVNAME(sc));
814 return 1;
815 }
816
817 sc->sc_shared_address = shared;
818 return 0;
819 }
820
821 int
822 bwfm_pci_detach(device_t self, int flags)
823 {
824 struct bwfm_pci_softc *sc = device_private(self);
825
826 bwfm_detach(&sc->sc_sc, flags);
827
828 /* FIXME: free RX buffers */
829 /* FIXME: free TX buffers */
830 /* FIXME: free more memory */
831
832 kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
833 * sizeof(struct bwfm_pci_msgring));
834 pool_destroy(&sc->sc_flowring_pool);
835
836 workqueue_destroy(sc->flowring_wq);
837 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
838 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
839 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
840 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
841 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
842 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
843 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
844 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
845 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
846 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
847 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
848 return 0;
849 }
850
851 /* DMA code */
852 struct bwfm_pci_dmamem *
853 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
854 {
855 struct bwfm_pci_dmamem *bdm;
856 int nsegs;
857
858 bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
859 bdm->bdm_size = size;
860
861 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
862 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
863 goto bdmfree;
864
865 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
866 &nsegs, BUS_DMA_WAITOK) != 0)
867 goto destroy;
868
869 if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
870 (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
871 goto free;
872
873 if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
874 NULL, BUS_DMA_WAITOK) != 0)
875 goto unmap;
876
877 bzero(bdm->bdm_kva, size);
878
879 return (bdm);
880
881 unmap:
882 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
883 free:
884 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
885 destroy:
886 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
887 bdmfree:
888 kmem_free(bdm, sizeof(*bdm));
889
890 return (NULL);
891 }
892
893 void
894 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
895 {
896 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
897 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
898 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
899 kmem_free(bdm, sizeof(*bdm));
900 }
901
902 /*
903 * We need a simple mapping from a packet ID to mbufs, because when
904 * a transfer completed, we only know the ID so we have to look up
905 * the memory for the ID. This simply looks for an empty slot.
906 */
907 int
908 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
909 {
910 int i, idx;
911
912 idx = pkts->last + 1;
913 for (i = 0; i < pkts->npkt; i++) {
914 if (idx == pkts->npkt)
915 idx = 0;
916 if (pkts->pkts[idx].bb_m == NULL)
917 return 0;
918 idx++;
919 }
920 return ENOBUFS;
921 }
922
923 int
924 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
925 struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
926 {
927 int i, idx;
928
929 idx = pkts->last + 1;
930 for (i = 0; i < pkts->npkt; i++) {
931 if (idx == pkts->npkt)
932 idx = 0;
933 if (pkts->pkts[idx].bb_m == NULL) {
934 if (bus_dmamap_load_mbuf(sc->sc_dmat,
935 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
936 /*
937 * Didn't fit. Maybe it has too many
938 * segments. If it has only one
939 * segment, fail; otherwise try to
940 * compact it into a single mbuf
941 * segment.
942 */
943 if ((*mp)->m_next == NULL)
944 return ENOBUFS;
945 struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
946 NULL, MSGBUF_MAX_PKT_SIZE);
947 if (m0 == NULL)
948 return ENOBUFS;
949 m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
950 mtod(m0, void *));
951 m0->m_pkthdr.len = m0->m_len =
952 (*mp)->m_pkthdr.len;
953 m_freem(*mp);
954 *mp = m0;
955 if (bus_dmamap_load_mbuf(sc->sc_dmat,
956 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
957 return EFBIG;
958 }
959 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
960 0, pkts->pkts[idx].bb_map->dm_mapsize,
961 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
962 pkts->last = idx;
963 pkts->pkts[idx].bb_m = *mp;
964 *pktid = idx;
965 *paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
966 return 0;
967 }
968 idx++;
969 }
970 return ENOBUFS;
971 }
972
973 struct mbuf *
974 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
975 uint32_t pktid)
976 {
977 struct mbuf *m;
978
979 if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
980 return NULL;
981 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
982 pkts->pkts[pktid].bb_map->dm_mapsize,
983 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
984 bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
985 m = pkts->pkts[pktid].bb_m;
986 pkts->pkts[pktid].bb_m = NULL;
987 return m;
988 }
989
990 void
991 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
992 {
993 bwfm_pci_fill_rx_buf_ring(sc);
994 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
995 MSGBUF_TYPE_IOCTLRESP_BUF_POST);
996 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
997 MSGBUF_TYPE_EVENT_BUF_POST);
998 }
999
1000 void
1001 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1002 uint32_t msgtype)
1003 {
1004 struct msgbuf_rx_ioctl_resp_or_event *req;
1005 struct mbuf *m;
1006 uint32_t pktid;
1007 paddr_t paddr;
1008 int s, slots;
1009
1010 s = splnet();
1011 for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1012 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1013 break;
1014 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1015 if (req == NULL)
1016 break;
1017 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1018 if (m == NULL) {
1019 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1020 break;
1021 }
1022 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1023 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1024 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1025 m_freem(m);
1026 break;
1027 }
1028 memset(req, 0, sizeof(*req));
1029 req->msg.msgtype = msgtype;
1030 req->msg.request_id = htole32(pktid);
1031 req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1032 req->host_buf_addr.high_addr = htole32(paddr >> 32);
1033 req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1034 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1035 }
1036 if_rxr_put(rxring, slots);
1037 splx(s);
1038 }
1039
1040 void
1041 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1042 {
1043 struct msgbuf_rx_bufpost *req;
1044 struct mbuf *m;
1045 uint32_t pktid;
1046 paddr_t paddr;
1047 int s, slots;
1048
1049 s = splnet();
1050 for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1051 slots > 0; slots--) {
1052 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1053 break;
1054 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1055 if (req == NULL)
1056 break;
1057 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1058 if (m == NULL) {
1059 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1060 break;
1061 }
1062 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1063 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1064 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1065 m_freem(m);
1066 break;
1067 }
1068 memset(req, 0, sizeof(*req));
1069 req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1070 req->msg.request_id = htole32(pktid);
1071 req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1072 req->data_buf_addr.high_addr = htole32(paddr >> 32);
1073 req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1074 bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1075 }
1076 if_rxr_put(&sc->sc_rxbuf_ring, slots);
1077 splx(s);
1078 }
1079
1080 int
1081 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1082 int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1083 int idx, uint32_t idx_off, uint32_t *ring_mem)
1084 {
1085 ring->w_idx_addr = w_idx + idx * idx_off;
1086 ring->r_idx_addr = r_idx + idx * idx_off;
1087 ring->nitem = nitem;
1088 ring->itemsz = itemsz;
1089 bwfm_pci_ring_write_rptr(sc, ring);
1090 bwfm_pci_ring_write_wptr(sc, ring);
1091
1092 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1093 if (ring->ring == NULL)
1094 return ENOMEM;
1095 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1096 *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1097 BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1098 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1099 *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1100 BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1101 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1102 *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1103 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1104 *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1105 *ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1106 return 0;
1107 }
1108
1109 int
1110 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1111 int nitem, size_t itemsz)
1112 {
1113 ring->w_ptr = 0;
1114 ring->r_ptr = 0;
1115 ring->nitem = nitem;
1116 ring->itemsz = itemsz;
1117 bwfm_pci_ring_write_rptr(sc, ring);
1118 bwfm_pci_ring_write_wptr(sc, ring);
1119
1120 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1121 if (ring->ring == NULL)
1122 return ENOMEM;
1123 return 0;
1124 }
1125
1126 /* Ring helpers */
1127 void
1128 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1129 struct bwfm_pci_msgring *ring)
1130 {
1131 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1132 BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1133 }
1134
1135 void
1136 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1137 struct bwfm_pci_msgring *ring)
1138 {
1139 if (sc->sc_dma_idx_sz == 0) {
1140 ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1141 sc->sc_tcm_ioh, ring->r_idx_addr);
1142 } else {
1143 bus_dmamap_sync(sc->sc_dmat,
1144 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1145 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1146 ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1147 + ring->r_idx_addr);
1148 }
1149 }
1150
1151 static u_int
1152 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1153 {
1154 u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1155
1156 KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1157 "rxr->rxr_inuse: %d\n"
1158 "taken: %d\n"
1159 "rxr->rxr_total: %d\n",
1160 rxr->rxr_inuse, taken, rxr->rxr_total);
1161 rxr->rxr_inuse += taken;
1162
1163 return taken;
1164 }
1165
1166 static void
1167 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1168 {
1169 KASSERTMSG(rxr->rxr_inuse >= n,
1170 "rxr->rxr_inuse: %d\n"
1171 "n: %d\n"
1172 "rxr->rxr_total: %d\n",
1173 rxr->rxr_inuse, n, rxr->rxr_total);
1174
1175 rxr->rxr_inuse -= n;
1176 }
1177
1178 static void
1179 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1180 {
1181 (void) lwm;
1182
1183 rxr->rxr_total = hwm;
1184 rxr->rxr_inuse = 0;
1185 }
1186
1187 void
1188 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1189 struct bwfm_pci_msgring *ring)
1190 {
1191 if (sc->sc_dma_idx_sz == 0) {
1192 ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1193 sc->sc_tcm_ioh, ring->w_idx_addr);
1194 } else {
1195 ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1196 + ring->w_idx_addr);
1197 bus_dmamap_sync(sc->sc_dmat,
1198 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1199 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1200 }
1201 }
1202
1203 void
1204 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1205 struct bwfm_pci_msgring *ring)
1206 {
1207 if (sc->sc_dma_idx_sz == 0) {
1208 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1209 ring->r_idx_addr, ring->r_ptr);
1210 } else {
1211 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1212 + ring->r_idx_addr) = ring->r_ptr;
1213 bus_dmamap_sync(sc->sc_dmat,
1214 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1215 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1216 }
1217 }
1218
1219 void
1220 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1221 struct bwfm_pci_msgring *ring)
1222 {
1223 if (sc->sc_dma_idx_sz == 0) {
1224 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1225 ring->w_idx_addr, ring->w_ptr);
1226 } else {
1227 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1228 + ring->w_idx_addr) = ring->w_ptr;
1229 bus_dmamap_sync(sc->sc_dmat,
1230 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1231 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1232 }
1233 }
1234
1235 /*
1236 * Retrieve a free descriptor to put new stuff in, but don't commit
1237 * to it yet so we can rollback later if any error occurs.
1238 */
1239 void *
1240 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1241 struct bwfm_pci_msgring *ring)
1242 {
1243 int available;
1244 char *ret;
1245
1246 bwfm_pci_ring_update_rptr(sc, ring);
1247
1248 if (ring->r_ptr > ring->w_ptr)
1249 available = ring->r_ptr - ring->w_ptr;
1250 else
1251 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1252
1253 if (available < 1)
1254 return NULL;
1255
1256 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1257 ring->w_ptr += 1;
1258 if (ring->w_ptr == ring->nitem)
1259 ring->w_ptr = 0;
1260 return ret;
1261 }
1262
1263 void *
1264 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1265 struct bwfm_pci_msgring *ring, int count, int *avail)
1266 {
1267 int available;
1268 char *ret;
1269
1270 bwfm_pci_ring_update_rptr(sc, ring);
1271
1272 if (ring->r_ptr > ring->w_ptr)
1273 available = ring->r_ptr - ring->w_ptr;
1274 else
1275 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1276
1277 if (available < 1)
1278 return NULL;
1279
1280 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1281 *avail = uimin(count, available - 1);
1282 if (*avail + ring->w_ptr > ring->nitem)
1283 *avail = ring->nitem - ring->w_ptr;
1284 ring->w_ptr += *avail;
1285 if (ring->w_ptr == ring->nitem)
1286 ring->w_ptr = 0;
1287 return ret;
1288 }
1289
1290 /*
1291 * Read number of descriptors available (submitted by the firmware)
1292 * and retrieve pointer to first descriptor.
1293 */
1294 void *
1295 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1296 struct bwfm_pci_msgring *ring, int *avail)
1297 {
1298 bwfm_pci_ring_update_wptr(sc, ring);
1299
1300 if (ring->w_ptr >= ring->r_ptr)
1301 *avail = ring->w_ptr - ring->r_ptr;
1302 else
1303 *avail = ring->nitem - ring->r_ptr;
1304
1305 if (*avail == 0)
1306 return NULL;
1307 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1308 ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1309 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1310 return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1311 }
1312
1313 /*
1314 * Let firmware know we read N descriptors.
1315 */
1316 void
1317 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1318 struct bwfm_pci_msgring *ring, int nitem)
1319 {
1320 ring->r_ptr += nitem;
1321 if (ring->r_ptr == ring->nitem)
1322 ring->r_ptr = 0;
1323 bwfm_pci_ring_write_rptr(sc, ring);
1324 }
1325
1326 /*
1327 * Let firmware know that we submitted some descriptors.
1328 */
1329 void
1330 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1331 struct bwfm_pci_msgring *ring)
1332 {
1333 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1334 0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1335 BUS_DMASYNC_PREWRITE);
1336 bwfm_pci_ring_write_wptr(sc, ring);
1337 bwfm_pci_ring_bell(sc, ring);
1338 }
1339
1340 /*
1341 * Rollback N descriptors in case we don't actually want
1342 * to commit to it.
1343 */
1344 void
1345 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1346 struct bwfm_pci_msgring *ring, int nitem)
1347 {
1348 if (ring->w_ptr == 0)
1349 ring->w_ptr = ring->nitem - nitem;
1350 else
1351 ring->w_ptr -= nitem;
1352 }
1353
1354 /*
1355 * Foreach written descriptor on the ring, pass the descriptor to
1356 * a message handler and let the firmware know we handled it.
1357 */
1358 void
1359 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1360 {
1361 char *buf;
1362 int avail, processed;
1363
1364 again:
1365 buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1366 if (buf == NULL)
1367 return;
1368
1369 processed = 0;
1370 while (avail) {
1371 bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1372 buf += ring->itemsz;
1373 processed++;
1374 if (processed == 48) {
1375 bwfm_pci_ring_read_commit(sc, ring, processed);
1376 processed = 0;
1377 }
1378 avail--;
1379 }
1380 if (processed)
1381 bwfm_pci_ring_read_commit(sc, ring, processed);
1382 if (ring->r_ptr == 0)
1383 goto again;
1384 }
1385
1386 void
1387 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1388 {
1389 struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1390 struct msgbuf_ioctl_resp_hdr *resp;
1391 struct msgbuf_tx_status *tx;
1392 struct msgbuf_rx_complete *rx;
1393 struct msgbuf_rx_event *event;
1394 struct msgbuf_common_hdr *msg;
1395 struct msgbuf_flowring_create_resp *fcr;
1396 struct msgbuf_flowring_delete_resp *fdr;
1397 struct bwfm_pci_msgring *ring;
1398 struct mbuf *m;
1399 int flowid;
1400
1401 msg = (struct msgbuf_common_hdr *)buf;
1402 switch (msg->msgtype)
1403 {
1404 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1405 fcr = (struct msgbuf_flowring_create_resp *)buf;
1406 flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1407 if (flowid < 2)
1408 break;
1409 flowid -= 2;
1410 if (flowid >= sc->sc_max_flowrings)
1411 break;
1412 ring = &sc->sc_flowrings[flowid];
1413 if (ring->status != RING_OPENING)
1414 break;
1415 if (fcr->compl_hdr.status) {
1416 printf("%s: failed to open flowring %d\n",
1417 DEVNAME(sc), flowid);
1418 ring->status = RING_CLOSED;
1419 if (ring->m) {
1420 m_freem(ring->m);
1421 ring->m = NULL;
1422 }
1423 ifp->if_flags &= ~IFF_OACTIVE;
1424 ifp->if_start(ifp);
1425 break;
1426 }
1427 ring->status = RING_OPEN;
1428 if (ring->m != NULL) {
1429 m = ring->m;
1430 ring->m = NULL;
1431 if (bwfm_pci_txdata(&sc->sc_sc, &m))
1432 m_freem(ring->m);
1433 }
1434 ifp->if_flags &= ~IFF_OACTIVE;
1435 ifp->if_start(ifp);
1436 break;
1437 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1438 fdr = (struct msgbuf_flowring_delete_resp *)buf;
1439 flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1440 if (flowid < 2)
1441 break;
1442 flowid -= 2;
1443 if (flowid >= sc->sc_max_flowrings)
1444 break;
1445 ring = &sc->sc_flowrings[flowid];
1446 if (ring->status != RING_CLOSING)
1447 break;
1448 if (fdr->compl_hdr.status) {
1449 printf("%s: failed to delete flowring %d\n",
1450 DEVNAME(sc), flowid);
1451 break;
1452 }
1453 bwfm_pci_dmamem_free(sc, ring->ring);
1454 ring->status = RING_CLOSED;
1455 break;
1456 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1457 break;
1458 case MSGBUF_TYPE_IOCTL_CMPLT:
1459 resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1460 sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1461 sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1462 sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1463 if_rxr_put(&sc->sc_ioctl_ring, 1);
1464 bwfm_pci_fill_rx_rings(sc);
1465 wakeup(&sc->sc_ioctl_buf);
1466 break;
1467 case MSGBUF_TYPE_WL_EVENT:
1468 event = (struct msgbuf_rx_event *)buf;
1469 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1470 letoh32(event->msg.request_id));
1471 if (m == NULL)
1472 break;
1473 m_adj(m, sc->sc_rx_dataoffset);
1474 m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1475 bwfm_rx(&sc->sc_sc, m);
1476 if_rxr_put(&sc->sc_event_ring, 1);
1477 bwfm_pci_fill_rx_rings(sc);
1478 break;
1479 case MSGBUF_TYPE_TX_STATUS:
1480 tx = (struct msgbuf_tx_status *)buf;
1481 m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1482 letoh32(tx->msg.request_id));
1483 if (m == NULL)
1484 break;
1485 m_freem(m);
1486 if (sc->sc_tx_pkts_full) {
1487 sc->sc_tx_pkts_full = 0;
1488 ifp->if_flags &= ~IFF_OACTIVE;
1489 ifp->if_start(ifp);
1490 }
1491 break;
1492 case MSGBUF_TYPE_RX_CMPLT:
1493 rx = (struct msgbuf_rx_complete *)buf;
1494 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1495 letoh32(rx->msg.request_id));
1496 if (m == NULL)
1497 break;
1498 if (letoh16(rx->data_offset))
1499 m_adj(m, letoh16(rx->data_offset));
1500 else if (sc->sc_rx_dataoffset)
1501 m_adj(m, sc->sc_rx_dataoffset);
1502 m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1503 bwfm_rx(&sc->sc_sc, m);
1504 if_rxr_put(&sc->sc_rxbuf_ring, 1);
1505 bwfm_pci_fill_rx_rings(sc);
1506 break;
1507 default:
1508 printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1509 break;
1510 }
1511 }
1512
1513 /* Bus core helpers */
1514 void
1515 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1516 {
1517 struct bwfm_softc *bwfm = (void *)sc;
1518 struct bwfm_core *core;
1519
1520 core = bwfm_chip_get_core(bwfm, id);
1521 if (core == NULL) {
1522 printf("%s: could not find core to select", DEVNAME(sc));
1523 return;
1524 }
1525
1526 pci_conf_write(sc->sc_pc, sc->sc_tag,
1527 BWFM_PCI_BAR0_WINDOW, core->co_base);
1528 if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1529 BWFM_PCI_BAR0_WINDOW) != core->co_base)
1530 pci_conf_write(sc->sc_pc, sc->sc_tag,
1531 BWFM_PCI_BAR0_WINDOW, core->co_base);
1532 }
1533
1534 uint32_t
1535 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1536 {
1537 struct bwfm_pci_softc *sc = (void *)bwfm;
1538 uint32_t page, offset;
1539
1540 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1541 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1542 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1543 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1544 }
1545
1546 void
1547 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1548 {
1549 struct bwfm_pci_softc *sc = (void *)bwfm;
1550 uint32_t page, offset;
1551
1552 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1553 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1554 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1555 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1556 }
1557
1558 int
1559 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1560 {
1561 return 0;
1562 }
1563
1564 int
1565 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1566 {
1567 struct bwfm_pci_softc *sc = (void *)bwfm;
1568 struct bwfm_core *core;
1569 uint32_t reg;
1570 int i;
1571
1572 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1573 reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1574 BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1575 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1576 reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1577
1578 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1579 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1580 BWFM_CHIP_REG_WATCHDOG, 4);
1581 delay(100 * 1000);
1582
1583 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1584 pci_conf_write(sc->sc_pc, sc->sc_tag,
1585 BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1586
1587 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1588 if (core->co_rev <= 13) {
1589 uint16_t cfg_offset[] = {
1590 BWFM_PCI_CFGREG_STATUS_CMD,
1591 BWFM_PCI_CFGREG_PM_CSR,
1592 BWFM_PCI_CFGREG_MSI_CAP,
1593 BWFM_PCI_CFGREG_MSI_ADDR_L,
1594 BWFM_PCI_CFGREG_MSI_ADDR_H,
1595 BWFM_PCI_CFGREG_MSI_DATA,
1596 BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1597 BWFM_PCI_CFGREG_RBAR_CTRL,
1598 BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1599 BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1600 BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1601 };
1602
1603 for (i = 0; i < nitems(cfg_offset); i++) {
1604 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1605 BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1606 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1607 BWFM_PCI_PCIE2REG_CONFIGDATA);
1608 DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1609 DEVNAME(sc), cfg_offset[i], reg));
1610 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1611 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1612 }
1613 }
1614
1615 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1616 BWFM_PCI_PCIE2REG_MAILBOXINT);
1617 if (reg != 0xffffffff)
1618 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1619 BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1620
1621 return 0;
1622 }
1623
1624 void
1625 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1626 {
1627 struct bwfm_pci_softc *sc = (void *)bwfm;
1628 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1629 }
1630
1631 static int bwfm_pci_prio2fifo[8] = {
1632 1, /* best effort */
1633 0, /* IPTOS_PREC_IMMEDIATE */
1634 0, /* IPTOS_PREC_PRIORITY */
1635 1, /* IPTOS_PREC_FLASH */
1636 2, /* IPTOS_PREC_FLASHOVERRIDE */
1637 2, /* IPTOS_PREC_CRITIC_ECP */
1638 3, /* IPTOS_PREC_INTERNETCONTROL */
1639 3, /* IPTOS_PREC_NETCONTROL */
1640 };
1641
1642 int
1643 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1644 {
1645 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1646 uint8_t *da = mtod(m, uint8_t *);
1647 struct ether_header *eh;
1648 int flowid, prio, fifo;
1649 int i, found, ac;
1650
1651 /* No QoS for EAPOL frames. */
1652 eh = mtod(m, struct ether_header *);
1653 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1654 M_WME_GETAC(m) : WME_AC_BE;
1655
1656 prio = ac;
1657 fifo = bwfm_pci_prio2fifo[prio];
1658
1659 switch (ic->ic_opmode)
1660 {
1661 case IEEE80211_M_STA:
1662 flowid = fifo;
1663 break;
1664 #ifndef IEEE80211_STA_ONLY
1665 case IEEE80211_M_HOSTAP:
1666 if (ETHER_IS_MULTICAST(da))
1667 da = __UNCONST(etherbroadcastaddr);
1668 flowid = da[5] * 2 + fifo;
1669 break;
1670 #endif
1671 default:
1672 printf("%s: state not supported\n", DEVNAME(sc));
1673 return ENOBUFS;
1674 }
1675
1676 found = 0;
1677 flowid = flowid % sc->sc_max_flowrings;
1678 for (i = 0; i < sc->sc_max_flowrings; i++) {
1679 if (ic->ic_opmode == IEEE80211_M_STA &&
1680 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1681 sc->sc_flowrings[flowid].fifo == fifo) {
1682 found = 1;
1683 break;
1684 }
1685 #ifndef IEEE80211_STA_ONLY
1686 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1687 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1688 sc->sc_flowrings[flowid].fifo == fifo &&
1689 !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1690 found = 1;
1691 break;
1692 }
1693 #endif
1694 flowid = (flowid + 1) % sc->sc_max_flowrings;
1695 }
1696
1697 if (found)
1698 return flowid;
1699
1700 return -1;
1701 }
1702
1703 void
1704 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1705 {
1706 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1707 struct bwfm_cmd_flowring_create * cmd;
1708 uint8_t *da = mtod(m, uint8_t *);
1709 struct ether_header *eh;
1710 struct bwfm_pci_msgring *ring;
1711 int flowid, prio, fifo;
1712 int i, found, ac;
1713
1714 cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1715 if (__predict_false(cmd == NULL))
1716 return;
1717
1718 /* No QoS for EAPOL frames. */
1719 eh = mtod(m, struct ether_header *);
1720 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1721 M_WME_GETAC(m) : WME_AC_BE;
1722
1723 prio = ac;
1724 fifo = bwfm_pci_prio2fifo[prio];
1725
1726 switch (ic->ic_opmode)
1727 {
1728 case IEEE80211_M_STA:
1729 flowid = fifo;
1730 break;
1731 #ifndef IEEE80211_STA_ONLY
1732 case IEEE80211_M_HOSTAP:
1733 if (ETHER_IS_MULTICAST(da))
1734 da = __UNCONST(etherbroadcastaddr);
1735 flowid = da[5] * 2 + fifo;
1736 break;
1737 #endif
1738 default:
1739 printf("%s: state not supported\n", DEVNAME(sc));
1740 return;
1741 }
1742
1743 found = 0;
1744 flowid = flowid % sc->sc_max_flowrings;
1745 for (i = 0; i < sc->sc_max_flowrings; i++) {
1746 ring = &sc->sc_flowrings[flowid];
1747 if (ring->status == RING_CLOSED) {
1748 ring->status = RING_OPENING;
1749 found = 1;
1750 break;
1751 }
1752 flowid = (flowid + 1) % sc->sc_max_flowrings;
1753 }
1754
1755 /*
1756 * We cannot recover from that so far. Only a stop/init
1757 * cycle can revive this if it ever happens at all.
1758 */
1759 if (!found) {
1760 printf("%s: no flowring available\n", DEVNAME(sc));
1761 return;
1762 }
1763
1764 cmd->sc = sc;
1765 cmd->m = m;
1766 cmd->prio = prio;
1767 cmd->flowid = flowid;
1768 workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1769 }
1770
1771 void
1772 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1773 {
1774 struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1775 struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1776 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1777 struct msgbuf_tx_flowring_create_req *req;
1778 struct bwfm_pci_msgring *ring;
1779 uint8_t *da, *sa;
1780
1781 da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1782 sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1783
1784 ring = &sc->sc_flowrings[cmd->flowid];
1785 if (ring->status != RING_OPENING) {
1786 printf("%s: flowring not opening\n", DEVNAME(sc));
1787 return;
1788 }
1789
1790 if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1791 printf("%s: cannot setup flowring\n", DEVNAME(sc));
1792 return;
1793 }
1794
1795 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1796 if (req == NULL) {
1797 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1798 return;
1799 }
1800
1801 ring->status = RING_OPENING;
1802 ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1803 ring->m = cmd->m;
1804 memcpy(ring->mac, da, ETHER_ADDR_LEN);
1805 #ifndef IEEE80211_STA_ONLY
1806 if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1807 memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1808 #endif
1809
1810 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1811 req->msg.ifidx = 0;
1812 req->msg.request_id = 0;
1813 req->tid = bwfm_pci_prio2fifo[cmd->prio];
1814 req->flow_ring_id = letoh16(cmd->flowid + 2);
1815 memcpy(req->da, da, ETHER_ADDR_LEN);
1816 memcpy(req->sa, sa, ETHER_ADDR_LEN);
1817 req->flow_ring_addr.high_addr =
1818 letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1819 req->flow_ring_addr.low_addr =
1820 letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1821 req->max_items = letoh16(512);
1822 req->len_item = letoh16(48);
1823
1824 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1825 pool_put(&sc->sc_flowring_pool, cmd);
1826 }
1827
1828 void
1829 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1830 {
1831 struct msgbuf_tx_flowring_delete_req *req;
1832 struct bwfm_pci_msgring *ring;
1833
1834 ring = &sc->sc_flowrings[flowid];
1835 if (ring->status != RING_OPEN) {
1836 printf("%s: flowring not open\n", DEVNAME(sc));
1837 return;
1838 }
1839
1840 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1841 if (req == NULL) {
1842 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1843 return;
1844 }
1845
1846 ring->status = RING_CLOSING;
1847
1848 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1849 req->msg.ifidx = 0;
1850 req->msg.request_id = 0;
1851 req->flow_ring_id = letoh16(flowid + 2);
1852 req->reason = 0;
1853
1854 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1855 }
1856
1857 void
1858 bwfm_pci_stop(struct bwfm_softc *bwfm)
1859 {
1860 struct bwfm_pci_softc *sc = (void *)bwfm;
1861 struct bwfm_pci_msgring *ring;
1862 int i;
1863
1864 for (i = 0; i < sc->sc_max_flowrings; i++) {
1865 ring = &sc->sc_flowrings[i];
1866 if (ring->status == RING_OPEN)
1867 bwfm_pci_flowring_delete(sc, i);
1868 }
1869 }
1870
1871 int
1872 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1873 {
1874 struct bwfm_pci_softc *sc = (void *)bwfm;
1875 struct bwfm_pci_msgring *ring;
1876 int i;
1877
1878 /* If we are transitioning, we cannot send. */
1879 for (i = 0; i < sc->sc_max_flowrings; i++) {
1880 ring = &sc->sc_flowrings[i];
1881 if (ring->status == RING_OPENING)
1882 return ENOBUFS;
1883 }
1884
1885 if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1886 sc->sc_tx_pkts_full = 1;
1887 return ENOBUFS;
1888 }
1889
1890 return 0;
1891 }
1892
1893 int
1894 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1895 {
1896 struct bwfm_pci_softc *sc = (void *)bwfm;
1897 struct bwfm_pci_msgring *ring;
1898 struct msgbuf_tx_msghdr *tx;
1899 uint32_t pktid;
1900 paddr_t paddr;
1901 struct ether_header *eh;
1902 int flowid, ret, ac;
1903
1904 flowid = bwfm_pci_flowring_lookup(sc, *mp);
1905 if (flowid < 0) {
1906 /*
1907 * We cannot send the packet right now as there is
1908 * no flowring yet. The flowring will be created
1909 * asynchronously. While the ring is transitioning
1910 * the TX check will tell the upper layers that we
1911 * cannot send packets right now. When the flowring
1912 * is created the queue will be restarted and this
1913 * mbuf will be transmitted.
1914 */
1915 bwfm_pci_flowring_create(sc, *mp);
1916 return 0;
1917 }
1918
1919 ring = &sc->sc_flowrings[flowid];
1920 if (ring->status == RING_OPENING ||
1921 ring->status == RING_CLOSING) {
1922 printf("%s: tried to use a flow that was "
1923 "transitioning in status %d\n",
1924 DEVNAME(sc), ring->status);
1925 return ENOBUFS;
1926 }
1927
1928 tx = bwfm_pci_ring_write_reserve(sc, ring);
1929 if (tx == NULL)
1930 return ENOBUFS;
1931
1932 /* No QoS for EAPOL frames. */
1933 eh = mtod(*mp, struct ether_header *);
1934 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1935 M_WME_GETAC(*mp) : WME_AC_BE;
1936
1937 memset(tx, 0, sizeof(*tx));
1938 tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1939 tx->msg.ifidx = 0;
1940 tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1941 tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1942 tx->seg_cnt = 1;
1943 memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1944
1945 ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1946 if (ret) {
1947 if (ret == ENOBUFS) {
1948 printf("%s: no pktid available for TX\n",
1949 DEVNAME(sc));
1950 sc->sc_tx_pkts_full = 1;
1951 }
1952 bwfm_pci_ring_write_cancel(sc, ring, 1);
1953 return ret;
1954 }
1955 paddr += ETHER_HDR_LEN;
1956
1957 tx->msg.request_id = htole32(pktid);
1958 tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1959 tx->data_buf_addr.high_addr = htole32(paddr >> 32);
1960 tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1961
1962 bwfm_pci_ring_write_commit(sc, ring);
1963 return 0;
1964 }
1965
1966 #ifdef BWFM_DEBUG
1967 void
1968 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1969 {
1970 uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1971 sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1972
1973 if (newidx != sc->sc_console_readidx)
1974 DPRINTFN(3, ("BWFM CONSOLE: "));
1975 while (newidx != sc->sc_console_readidx) {
1976 uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1977 sc->sc_console_buf_addr + sc->sc_console_readidx);
1978 sc->sc_console_readidx++;
1979 if (sc->sc_console_readidx == sc->sc_console_buf_size)
1980 sc->sc_console_readidx = 0;
1981 if (ch == '\r')
1982 continue;
1983 DPRINTFN(3, ("%c", ch));
1984 }
1985 }
1986 #endif
1987
1988 int
1989 bwfm_pci_intr(void *v)
1990 {
1991 struct bwfm_pci_softc *sc = (void *)v;
1992 uint32_t status;
1993
1994 if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1995 BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
1996 return 0;
1997
1998 bwfm_pci_intr_disable(sc);
1999 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2000 BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2001
2002 if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2003 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
2004 printf("%s: handle MB data\n", __func__);
2005
2006 if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
2007 bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
2008 bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
2009 bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
2010 }
2011
2012 #ifdef BWFM_DEBUG
2013 bwfm_pci_debug_console(sc);
2014 #endif
2015
2016 bwfm_pci_intr_enable(sc);
2017 return 1;
2018 }
2019
2020 void
2021 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2022 {
2023 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2024 BWFM_PCI_PCIE2REG_MAILBOXMASK,
2025 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2026 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2027 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2028 }
2029
2030 void
2031 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2032 {
2033 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2034 BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2035 }
2036
2037 /* Msgbuf protocol implementation */
2038 int
2039 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2040 int cmd, char *buf, size_t *len)
2041 {
2042 struct bwfm_pci_softc *sc = (void *)bwfm;
2043 struct msgbuf_ioctl_req_hdr *req;
2044 struct mbuf *m;
2045 size_t buflen;
2046 int s;
2047
2048 s = splnet();
2049 sc->sc_ioctl_resp_pktid = -1;
2050 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2051 if (req == NULL) {
2052 printf("%s: cannot reserve for write\n", DEVNAME(sc));
2053 splx(s);
2054 return 1;
2055 }
2056 req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2057 req->msg.ifidx = 0;
2058 req->msg.flags = 0;
2059 req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2060 req->cmd = htole32(cmd);
2061 req->output_buf_len = htole16(*len);
2062 req->trans_id = htole16(sc->sc_ioctl_reqid++);
2063
2064 buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2065 req->input_buf_len = htole16(buflen);
2066 req->req_buf_addr.high_addr =
2067 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2068 req->req_buf_addr.low_addr =
2069 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2070 if (buf)
2071 memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2072 else
2073 memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2074
2075 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2076 splx(s);
2077
2078 if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2079 printf("%s: timeout waiting for ioctl response\n",
2080 DEVNAME(sc));
2081 return 1;
2082 }
2083
2084 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2085 if (m == NULL)
2086 return 1;
2087
2088 *len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2089 if (buf)
2090 memcpy(buf, mtod(m, char *), *len);
2091 m_freem(m);
2092 splx(s);
2093
2094 return 0;
2095 }
2096
2097 int
2098 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2099 int cmd, char *buf, size_t len)
2100 {
2101 return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2102 }
2103