if_bwfm_pci.c revision 1.8 1 /* $NetBSD: if_bwfm_pci.c,v 1.8 2020/05/30 13:41:58 jdolecek Exp $ */
2 /* $OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $ */
3 /*
4 * Copyright (c) 2010-2016 Broadcom Corporation
5 * Copyright (c) 2017 Patrick Wildt <patrick (at) blueri.se>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/kernel.h>
24 #include <sys/kmem.h>
25 #include <sys/device.h>
26 #include <sys/pool.h>
27 #include <sys/workqueue.h>
28 #include <sys/socket.h>
29
30 #include <net/bpf.h>
31 #include <net/if.h>
32 #include <net/if_dl.h>
33 #include <net/if_ether.h>
34 #include <net/if_media.h>
35
36 #include <netinet/in.h>
37
38 #include <net80211/ieee80211_var.h>
39
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
42 #include <dev/pci/pcidevs.h>
43
44 #include <dev/ic/bwfmvar.h>
45 #include <dev/ic/bwfmreg.h>
46 #include <dev/pci/if_bwfm_pci.h>
47
48 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN 8
49 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN 1024
50 #define BWFM_DMA_H2D_IOCTL_BUF_LEN ETHER_MAX_LEN
51
52 #define BWFM_NUM_TX_MSGRINGS 2
53 #define BWFM_NUM_RX_MSGRINGS 3
54
55 #define BWFM_NUM_TX_PKTIDS 2048
56 #define BWFM_NUM_RX_PKTIDS 1024
57
58 #define BWFM_NUM_TX_DESCS 1
59 #define BWFM_NUM_RX_DESCS 1
60
61 #ifdef BWFM_DEBUG
62 #define DPRINTF(x) do { if (bwfm_debug > 0) printf x; } while (0)
63 #define DPRINTFN(n, x) do { if (bwfm_debug >= (n)) printf x; } while (0)
64 static int bwfm_debug = 2;
65 #else
66 #define DPRINTF(x) do { ; } while (0)
67 #define DPRINTFN(n, x) do { ; } while (0)
68 #endif
69
70 #define DEVNAME(sc) device_xname((sc)->sc_sc.sc_dev)
71 #define letoh16 htole16
72 #define letoh32 htole32
73 #define nitems(x) __arraycount(x)
74
75 enum ring_status {
76 RING_CLOSED,
77 RING_CLOSING,
78 RING_OPEN,
79 RING_OPENING,
80 };
81
82 struct bwfm_pci_msgring {
83 uint32_t w_idx_addr;
84 uint32_t r_idx_addr;
85 uint32_t w_ptr;
86 uint32_t r_ptr;
87 int nitem;
88 int itemsz;
89 enum ring_status status;
90 struct bwfm_pci_dmamem *ring;
91 struct mbuf *m;
92
93 int fifo;
94 uint8_t mac[ETHER_ADDR_LEN];
95 };
96
97 struct bwfm_pci_buf {
98 bus_dmamap_t bb_map;
99 struct mbuf *bb_m;
100 };
101
102 struct bwfm_pci_pkts {
103 struct bwfm_pci_buf *pkts;
104 uint32_t npkt;
105 int last;
106 };
107
108 struct if_rxring {
109 u_int rxr_total;
110 u_int rxr_inuse;
111 };
112
113 struct bwfm_cmd_flowring_create {
114 struct work wq_cookie;
115 struct bwfm_pci_softc *sc;
116 struct mbuf *m;
117 int flowid;
118 int prio;
119 };
120
121 struct bwfm_pci_softc {
122 struct bwfm_softc sc_sc;
123 pci_chipset_tag_t sc_pc;
124 pcitag_t sc_tag;
125 pcireg_t sc_id;
126 void *sc_ih;
127 pci_intr_handle_t *sc_pihp;
128
129 bus_space_tag_t sc_reg_iot;
130 bus_space_handle_t sc_reg_ioh;
131 bus_size_t sc_reg_ios;
132
133 bus_space_tag_t sc_tcm_iot;
134 bus_space_handle_t sc_tcm_ioh;
135 bus_size_t sc_tcm_ios;
136
137 bus_dma_tag_t sc_dmat;
138
139 uint32_t sc_shared_address;
140 uint32_t sc_shared_flags;
141 uint8_t sc_shared_version;
142
143 uint8_t sc_dma_idx_sz;
144 struct bwfm_pci_dmamem *sc_dma_idx_buf;
145 size_t sc_dma_idx_bufsz;
146
147 uint16_t sc_max_rxbufpost;
148 uint32_t sc_rx_dataoffset;
149 uint32_t sc_htod_mb_data_addr;
150 uint32_t sc_dtoh_mb_data_addr;
151 uint32_t sc_ring_info_addr;
152
153 uint32_t sc_console_base_addr;
154 uint32_t sc_console_buf_addr;
155 uint32_t sc_console_buf_size;
156 uint32_t sc_console_readidx;
157
158 struct pool sc_flowring_pool;
159 struct workqueue *flowring_wq;
160
161 uint16_t sc_max_flowrings;
162 uint16_t sc_max_submissionrings;
163 uint16_t sc_max_completionrings;
164
165 struct bwfm_pci_msgring sc_ctrl_submit;
166 struct bwfm_pci_msgring sc_rxpost_submit;
167 struct bwfm_pci_msgring sc_ctrl_complete;
168 struct bwfm_pci_msgring sc_tx_complete;
169 struct bwfm_pci_msgring sc_rx_complete;
170 struct bwfm_pci_msgring *sc_flowrings;
171
172 struct bwfm_pci_dmamem *sc_scratch_buf;
173 struct bwfm_pci_dmamem *sc_ringupd_buf;
174
175 struct bwfm_pci_dmamem *sc_ioctl_buf;
176 int sc_ioctl_reqid;
177 uint32_t sc_ioctl_resp_pktid;
178 uint32_t sc_ioctl_resp_ret_len;
179 uint32_t sc_ioctl_resp_status;
180 int sc_ioctl_poll;
181
182 struct if_rxring sc_ioctl_ring;
183 struct if_rxring sc_event_ring;
184 struct if_rxring sc_rxbuf_ring;
185
186 struct bwfm_pci_pkts sc_rx_pkts;
187 struct bwfm_pci_pkts sc_tx_pkts;
188 int sc_tx_pkts_full;
189 };
190
191 struct bwfm_pci_dmamem {
192 bus_dmamap_t bdm_map;
193 bus_dma_segment_t bdm_seg;
194 size_t bdm_size;
195 char * bdm_kva;
196 };
197
198 #define BWFM_PCI_DMA_MAP(_bdm) ((_bdm)->bdm_map)
199 #define BWFM_PCI_DMA_LEN(_bdm) ((_bdm)->bdm_size)
200 #define BWFM_PCI_DMA_DVA(_bdm) (uint64_t)((_bdm)->bdm_map->dm_segs[0].ds_addr)
201 #define BWFM_PCI_DMA_KVA(_bdm) ((_bdm)->bdm_kva)
202
203 static u_int if_rxr_get(struct if_rxring *rxr, unsigned int max);
204 static void if_rxr_put(struct if_rxring *rxr, unsigned int n);
205 static void if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
206
207 int bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
208 void bwfm_pci_attachhook(device_t);
209 void bwfm_pci_attach(device_t, device_t, void *);
210 int bwfm_pci_detach(device_t, int);
211
212 int bwfm_pci_intr(void *);
213 void bwfm_pci_intr_enable(struct bwfm_pci_softc *);
214 void bwfm_pci_intr_disable(struct bwfm_pci_softc *);
215 int bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
216 size_t);
217 void bwfm_pci_select_core(struct bwfm_pci_softc *, int );
218
219 struct bwfm_pci_dmamem *
220 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
221 bus_size_t);
222 void bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
223 int bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
224 struct bwfm_pci_pkts *);
225 int bwfm_pci_pktid_new(struct bwfm_pci_softc *,
226 struct bwfm_pci_pkts *, struct mbuf **,
227 uint32_t *, paddr_t *);
228 struct mbuf * bwfm_pci_pktid_free(struct bwfm_pci_softc *,
229 struct bwfm_pci_pkts *, uint32_t);
230 void bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
231 struct if_rxring *, uint32_t);
232 void bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
233 void bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
234 int bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
235 int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
236 int bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
237 int, size_t);
238
239 void bwfm_pci_ring_bell(struct bwfm_pci_softc *,
240 struct bwfm_pci_msgring *);
241 void bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
242 struct bwfm_pci_msgring *);
243 void bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
244 struct bwfm_pci_msgring *);
245 void bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
246 struct bwfm_pci_msgring *);
247 void bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
248 struct bwfm_pci_msgring *);
249 void * bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
250 struct bwfm_pci_msgring *);
251 void * bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
252 struct bwfm_pci_msgring *, int, int *);
253 void * bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
254 struct bwfm_pci_msgring *, int *);
255 void bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
256 struct bwfm_pci_msgring *, int);
257 void bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
258 struct bwfm_pci_msgring *);
259 void bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
260 struct bwfm_pci_msgring *, int);
261
262 void bwfm_pci_ring_rx(struct bwfm_pci_softc *,
263 struct bwfm_pci_msgring *);
264 void bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
265
266 uint32_t bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
267 void bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
268 uint32_t);
269 int bwfm_pci_buscore_prepare(struct bwfm_softc *);
270 int bwfm_pci_buscore_reset(struct bwfm_softc *);
271 void bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
272
273 int bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
274 struct mbuf *);
275 void bwfm_pci_flowring_create(struct bwfm_pci_softc *,
276 struct mbuf *);
277 void bwfm_pci_flowring_create_cb(struct work *, void *);
278 void bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
279
280 void bwfm_pci_stop(struct bwfm_softc *);
281 int bwfm_pci_txcheck(struct bwfm_softc *);
282 int bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
283
284 #ifdef BWFM_DEBUG
285 void bwfm_pci_debug_console(struct bwfm_pci_softc *);
286 #endif
287
288 int bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
289 int, char *, size_t *);
290 int bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
291 int, char *, size_t);
292
293 static const struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
294 .bc_read = bwfm_pci_buscore_read,
295 .bc_write = bwfm_pci_buscore_write,
296 .bc_prepare = bwfm_pci_buscore_prepare,
297 .bc_reset = bwfm_pci_buscore_reset,
298 .bc_setup = NULL,
299 .bc_activate = bwfm_pci_buscore_activate,
300 };
301
302 static const struct bwfm_bus_ops bwfm_pci_bus_ops = {
303 .bs_init = NULL,
304 .bs_stop = bwfm_pci_stop,
305 .bs_txcheck = bwfm_pci_txcheck,
306 .bs_txdata = bwfm_pci_txdata,
307 .bs_txctl = NULL,
308 .bs_rxctl = NULL,
309 };
310
311 static const struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
312 .proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
313 .proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
314 };
315
316
317 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
318 bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
319
320 static const struct bwfm_firmware_selector bwfm_pci_fwtab[] = {
321 BWFM_FW_ENTRY(BRCM_CC_43602_CHIP_ID,
322 BWFM_FWSEL_ALLREVS, "brcmfmac43602-pcie"),
323
324 BWFM_FW_ENTRY(BRCM_CC_43465_CHIP_ID,
325 BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
326
327 BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
328 BWFM_FWSEL_REV_LE(7), "brcmfmac4350c2-pcie"),
329 BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
330 BWFM_FWSEL_REV_GE(8), "brcmfmac4350-pcie"),
331
332 BWFM_FW_ENTRY(BRCM_CC_43525_CHIP_ID,
333 BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
334
335 BWFM_FW_ENTRY(BRCM_CC_4356_CHIP_ID,
336 BWFM_FWSEL_ALLREVS, "brcmfmac4356-pcie"),
337
338 BWFM_FW_ENTRY(BRCM_CC_43567_CHIP_ID,
339 BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
340 BWFM_FW_ENTRY(BRCM_CC_43569_CHIP_ID,
341 BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
342 BWFM_FW_ENTRY(BRCM_CC_43570_CHIP_ID,
343 BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
344
345 BWFM_FW_ENTRY(BRCM_CC_4358_CHIP_ID,
346 BWFM_FWSEL_ALLREVS, "brcmfmac4358-pcie"),
347
348 BWFM_FW_ENTRY(BRCM_CC_4359_CHIP_ID,
349 BWFM_FWSEL_ALLREVS, "brcmfmac4359-pcie"),
350
351 BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
352 BWFM_FWSEL_REV_LE(3), "brcmfmac4365b-pcie"),
353 BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
354 BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
355
356 BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
357 BWFM_FWSEL_REV_LE(3), "brcmfmac4366b-pcie"),
358 BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
359 BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
360 BWFM_FW_ENTRY(BRCM_CC_43664_CHIP_ID,
361 BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
362
363 BWFM_FW_ENTRY(BRCM_CC_4371_CHIP_ID,
364 BWFM_FWSEL_ALLREVS, "brcmfmac4371-pcie"),
365
366 BWFM_FW_ENTRY_END
367 };
368
369 static const struct bwfm_pci_matchid {
370 pci_vendor_id_t bwfm_vendor;
371 pci_product_id_t bwfm_product;
372 } bwfm_pci_devices[] = {
373 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
374 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
375 };
376
377 static struct mbuf *
378 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
379 struct ifnet *ifp __unused, u_int size)
380 {
381 struct mbuf *m;
382
383 MGETHDR(m, how, MT_DATA);
384 if (m == NULL)
385 return NULL;
386
387 MEXTMALLOC(m, size, how);
388 if ((m->m_flags & M_EXT) == 0) {
389 m_freem(m);
390 return NULL;
391 }
392 return m;
393 }
394
395 int
396 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
397 {
398 struct pci_attach_args *pa = aux;
399
400 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BROADCOM)
401 return 0;
402
403 for (size_t i = 0; i < __arraycount(bwfm_pci_devices); i++)
404 if (PCI_PRODUCT(pa->pa_id) == bwfm_pci_devices[i].bwfm_product)
405 return 1;
406
407 return 0;
408 }
409
410 void
411 bwfm_pci_attach(device_t parent, device_t self, void *aux)
412 {
413 struct bwfm_pci_softc *sc = device_private(self);
414 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
415 const char *intrstr;
416 char intrbuf[PCI_INTRSTR_LEN];
417
418 sc->sc_sc.sc_dev = self;
419
420 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
421 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
422 NULL, &sc->sc_reg_ios)) {
423 printf(": can't map bar0\n");
424 return;
425 }
426
427 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
428 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
429 NULL, &sc->sc_tcm_ios)) {
430 printf(": can't map bar1\n");
431 goto bar0;
432 }
433
434 sc->sc_pc = pa->pa_pc;
435 sc->sc_tag = pa->pa_tag;
436 sc->sc_id = pa->pa_id;
437
438 if (pci_dma64_available(pa))
439 sc->sc_dmat = pa->pa_dmat64;
440 else
441 sc->sc_dmat = pa->pa_dmat;
442
443 /* Map and establish the interrupt. */
444 if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
445 printf(": couldn't map interrupt\n");
446 goto bar1;
447 }
448 intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
449
450 sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
451 bwfm_pci_intr, sc, device_xname(self));
452 if (sc->sc_ih == NULL) {
453 printf(": couldn't establish interrupt");
454 if (intrstr != NULL)
455 printf(" at %s", intrstr);
456 printf("\n");
457 goto bar1;
458 }
459 printf(": %s\n", intrstr);
460
461 config_mountroot(self, bwfm_pci_attachhook);
462 return;
463
464 bar1:
465 bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
466 bar0:
467 bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
468 }
469
470 void
471 bwfm_pci_attachhook(device_t self)
472 {
473 struct bwfm_pci_softc *sc = device_private(self);
474 struct bwfm_softc *bwfm = (void *)sc;
475 struct bwfm_pci_ringinfo ringinfo;
476 struct bwfm_firmware_context fwctx;
477 uint8_t *ucode;
478 size_t ucsize;
479 uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
480 uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
481 uint32_t idx_offset, reg;
482 int i;
483
484 sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
485 if (bwfm_chip_attach(&sc->sc_sc) != 0) {
486 aprint_error_dev(bwfm->sc_dev, "cannot attach chip\n");
487 return;
488 }
489
490 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
491 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
492 BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
493 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
494 BWFM_PCI_PCIE2REG_CONFIGDATA);
495 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
496 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
497
498 bwfm_firmware_context_init(&fwctx,
499 bwfm->sc_chip.ch_chip, bwfm->sc_chip.ch_chiprev, NULL,
500 BWFM_FWREQ(BWFM_FILETYPE_UCODE));
501
502 if (!bwfm_firmware_open(bwfm, bwfm_pci_fwtab, &fwctx)) {
503 /* Error message already displayed. */
504 goto err;
505 }
506
507 ucode = bwfm_firmware_data(&fwctx, BWFM_FILETYPE_UCODE, &ucsize);
508 KASSERT(ucode != NULL);
509
510 /* Retrieve RAM size from firmware. */
511 if (ucsize >= BWFM_RAMSIZE + 8) {
512 uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
513 if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
514 bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
515 }
516
517 if (bwfm_pci_load_microcode(sc, ucode, ucsize) != 0) {
518 aprint_error_dev(bwfm->sc_dev, "could not load microcode\n");
519 goto err;
520 }
521
522 bwfm_firmware_close(&fwctx);
523
524 sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
525 sc->sc_shared_address + BWFM_SHARED_INFO);
526 sc->sc_shared_version = sc->sc_shared_flags;
527 if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
528 sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
529 aprint_error_dev(bwfm->sc_dev,
530 "PCIe version %d unsupported\n", sc->sc_shared_version);
531 return;
532 }
533
534 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
535 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
536 sc->sc_dma_idx_sz = sizeof(uint16_t);
537 else
538 sc->sc_dma_idx_sz = sizeof(uint32_t);
539 }
540
541 /* Maximum RX data buffers in the ring. */
542 sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
543 sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
544 if (sc->sc_max_rxbufpost == 0)
545 sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
546
547 /* Alternative offset of data in a packet */
548 sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
549 sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
550
551 /* For Power Management */
552 sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
553 sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
554 sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
555 sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
556
557 /* Ring information */
558 sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
559 sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
560
561 /* Firmware's "dmesg" */
562 sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
563 sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
564 sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
565 sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
566 sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
567 sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
568
569 /* Read ring information. */
570 bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
571 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
572
573 if (sc->sc_shared_version >= 6) {
574 sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
575 sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
576 sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
577 } else {
578 sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
579 sc->sc_max_flowrings = sc->sc_max_submissionrings -
580 BWFM_NUM_TX_MSGRINGS;
581 sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
582 }
583
584 if (sc->sc_dma_idx_sz == 0) {
585 d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
586 d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
587 h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
588 h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
589 idx_offset = sizeof(uint32_t);
590 } else {
591 uint64_t address;
592
593 /* Each TX/RX Ring has a Read and Write Ptr */
594 sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
595 sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
596 sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
597 sc->sc_dma_idx_bufsz, 8);
598 if (sc->sc_dma_idx_buf == NULL) {
599 /* XXX: Fallback to TCM? */
600 aprint_error_dev(bwfm->sc_dev,
601 "cannot allocate idx buf\n");
602 return;
603 }
604
605 idx_offset = sc->sc_dma_idx_sz;
606 h2d_w_idx_ptr = 0;
607 address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
608 ringinfo.h2d_w_idx_hostaddr_low =
609 htole32(address & 0xffffffff);
610 ringinfo.h2d_w_idx_hostaddr_high =
611 htole32(address >> 32);
612
613 h2d_r_idx_ptr = h2d_w_idx_ptr +
614 sc->sc_max_submissionrings * idx_offset;
615 address += sc->sc_max_submissionrings * idx_offset;
616 ringinfo.h2d_r_idx_hostaddr_low =
617 htole32(address & 0xffffffff);
618 ringinfo.h2d_r_idx_hostaddr_high =
619 htole32(address >> 32);
620
621 d2h_w_idx_ptr = h2d_r_idx_ptr +
622 sc->sc_max_submissionrings * idx_offset;
623 address += sc->sc_max_submissionrings * idx_offset;
624 ringinfo.d2h_w_idx_hostaddr_low =
625 htole32(address & 0xffffffff);
626 ringinfo.d2h_w_idx_hostaddr_high =
627 htole32(address >> 32);
628
629 d2h_r_idx_ptr = d2h_w_idx_ptr +
630 sc->sc_max_completionrings * idx_offset;
631 address += sc->sc_max_completionrings * idx_offset;
632 ringinfo.d2h_r_idx_hostaddr_low =
633 htole32(address & 0xffffffff);
634 ringinfo.d2h_r_idx_hostaddr_high =
635 htole32(address >> 32);
636
637 bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
638 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
639 }
640
641 uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
642 /* TX ctrl ring: Send ctrl buffers, send IOCTLs */
643 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
644 h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
645 &ring_mem_ptr))
646 goto cleanup;
647 /* TX rxpost ring: Send clean data mbufs for RX */
648 if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
649 h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
650 &ring_mem_ptr))
651 goto cleanup;
652 /* RX completion rings: recv our filled buffers back */
653 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
654 d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
655 &ring_mem_ptr))
656 goto cleanup;
657 if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
658 d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
659 &ring_mem_ptr))
660 goto cleanup;
661 if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
662 d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
663 &ring_mem_ptr))
664 goto cleanup;
665
666 /* Dynamic TX rings for actual data */
667 sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
668 sizeof(struct bwfm_pci_msgring), KM_SLEEP);
669 for (i = 0; i < sc->sc_max_flowrings; i++) {
670 struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
671 ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
672 ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
673 }
674
675 pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
676 0, 0, 0, "bwfmpl", NULL, IPL_NET);
677
678 /* Scratch and ring update buffers for firmware */
679 if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
680 BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
681 goto cleanup;
682 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
683 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
684 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
685 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
686 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
687 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
688 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
689 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
690 BWFM_DMA_D2H_SCRATCH_BUF_LEN);
691
692 if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
693 BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
694 goto cleanup;
695 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
696 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
697 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
698 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
699 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
700 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
701 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
702 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
703 BWFM_DMA_D2H_RINGUPD_BUF_LEN);
704
705 if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
706 BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
707 goto cleanup;
708
709 if (workqueue_create(&sc->flowring_wq, "bwfmflow",
710 bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
711 goto cleanup;
712
713 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
714 bwfm_pci_intr_enable(sc);
715
716 /* Maps RX mbufs to a packet id and back. */
717 sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
718 sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
719 sizeof(struct bwfm_pci_buf), KM_SLEEP);
720 for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
721 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
722 BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
723 &sc->sc_rx_pkts.pkts[i].bb_map);
724
725 /* Maps TX mbufs to a packet id and back. */
726 sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
727 sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
728 * sizeof(struct bwfm_pci_buf), KM_SLEEP);
729 for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
730 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
731 BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
732 &sc->sc_tx_pkts.pkts[i].bb_map);
733
734 /*
735 * For whatever reason, could also be a bug somewhere in this
736 * driver, the firmware needs a bunch of RX buffers otherwise
737 * it won't send any RX complete messages. 64 buffers don't
738 * suffice, but 128 buffers are enough.
739 */
740 if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
741 if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
742 if_rxr_init(&sc->sc_event_ring, 8, 8);
743 bwfm_pci_fill_rx_rings(sc);
744
745
746 #ifdef BWFM_DEBUG
747 sc->sc_console_readidx = 0;
748 bwfm_pci_debug_console(sc);
749 #endif
750
751 sc->sc_ioctl_poll = 1;
752 sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
753 sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
754 bwfm_attach(&sc->sc_sc);
755 sc->sc_ioctl_poll = 0;
756 return;
757
758 cleanup:
759 if (sc->flowring_wq != NULL)
760 workqueue_destroy(sc->flowring_wq);
761 if (sc->sc_ih != NULL) {
762 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
763 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
764 }
765 if (sc->sc_ioctl_buf)
766 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
767 if (sc->sc_ringupd_buf)
768 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
769 if (sc->sc_scratch_buf)
770 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
771 if (sc->sc_rx_complete.ring)
772 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
773 if (sc->sc_tx_complete.ring)
774 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
775 if (sc->sc_ctrl_complete.ring)
776 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
777 if (sc->sc_rxpost_submit.ring)
778 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
779 if (sc->sc_ctrl_submit.ring)
780 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
781 if (sc->sc_dma_idx_buf)
782 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
783
784 err:
785 bwfm_firmware_close(&fwctx);
786 }
787
788 int
789 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
790 {
791 struct bwfm_softc *bwfm = (void *)sc;
792 struct bwfm_core *core;
793 uint32_t shared;
794 int i;
795
796 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
797 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
798 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
799 BWFM_PCI_ARMCR4REG_BANKIDX, 5);
800 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
801 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
802 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
803 BWFM_PCI_ARMCR4REG_BANKIDX, 7);
804 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
805 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
806 }
807
808 for (i = 0; i < size; i++)
809 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
810 bwfm->sc_chip.ch_rambase + i, ucode[i]);
811
812 /* Firmware replaces this with a pointer once up. */
813 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
814 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
815
816 /* TODO: restore NVRAM */
817
818 /* Load reset vector from firmware and kickstart core. */
819 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
820 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
821 bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
822 }
823 bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
824
825 for (i = 0; i < 40; i++) {
826 delay(50 * 1000);
827 shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
828 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
829 if (shared)
830 break;
831 }
832 if (!shared) {
833 printf("%s: firmware did not come up\n", DEVNAME(sc));
834 return 1;
835 }
836
837 sc->sc_shared_address = shared;
838 return 0;
839 }
840
841 int
842 bwfm_pci_detach(device_t self, int flags)
843 {
844 struct bwfm_pci_softc *sc = device_private(self);
845
846 bwfm_detach(&sc->sc_sc, flags);
847
848 /* FIXME: free RX buffers */
849 /* FIXME: free TX buffers */
850 /* FIXME: free more memory */
851
852 kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
853 * sizeof(struct bwfm_pci_msgring));
854 pool_destroy(&sc->sc_flowring_pool);
855
856 workqueue_destroy(sc->flowring_wq);
857 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
858 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
859 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
860 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
861 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
862 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
863 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
864 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
865 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
866 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
867 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
868 return 0;
869 }
870
871 /* DMA code */
872 struct bwfm_pci_dmamem *
873 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
874 {
875 struct bwfm_pci_dmamem *bdm;
876 int nsegs;
877
878 bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
879 bdm->bdm_size = size;
880
881 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
882 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
883 goto bdmfree;
884
885 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
886 &nsegs, BUS_DMA_WAITOK) != 0)
887 goto destroy;
888
889 if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
890 (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
891 goto free;
892
893 if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
894 NULL, BUS_DMA_WAITOK) != 0)
895 goto unmap;
896
897 bzero(bdm->bdm_kva, size);
898
899 return (bdm);
900
901 unmap:
902 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
903 free:
904 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
905 destroy:
906 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
907 bdmfree:
908 kmem_free(bdm, sizeof(*bdm));
909
910 return (NULL);
911 }
912
913 void
914 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
915 {
916 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
917 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
918 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
919 kmem_free(bdm, sizeof(*bdm));
920 }
921
922 /*
923 * We need a simple mapping from a packet ID to mbufs, because when
924 * a transfer completed, we only know the ID so we have to look up
925 * the memory for the ID. This simply looks for an empty slot.
926 */
927 int
928 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
929 {
930 int i, idx;
931
932 idx = pkts->last + 1;
933 for (i = 0; i < pkts->npkt; i++) {
934 if (idx == pkts->npkt)
935 idx = 0;
936 if (pkts->pkts[idx].bb_m == NULL)
937 return 0;
938 idx++;
939 }
940 return ENOBUFS;
941 }
942
943 int
944 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
945 struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
946 {
947 int i, idx;
948
949 idx = pkts->last + 1;
950 for (i = 0; i < pkts->npkt; i++) {
951 if (idx == pkts->npkt)
952 idx = 0;
953 if (pkts->pkts[idx].bb_m == NULL) {
954 if (bus_dmamap_load_mbuf(sc->sc_dmat,
955 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
956 /*
957 * Didn't fit. Maybe it has too many
958 * segments. If it has only one
959 * segment, fail; otherwise try to
960 * compact it into a single mbuf
961 * segment.
962 */
963 if ((*mp)->m_next == NULL)
964 return ENOBUFS;
965 struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
966 NULL, MSGBUF_MAX_PKT_SIZE);
967 if (m0 == NULL)
968 return ENOBUFS;
969 m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
970 mtod(m0, void *));
971 m0->m_pkthdr.len = m0->m_len =
972 (*mp)->m_pkthdr.len;
973 m_freem(*mp);
974 *mp = m0;
975 if (bus_dmamap_load_mbuf(sc->sc_dmat,
976 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
977 return EFBIG;
978 }
979 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
980 0, pkts->pkts[idx].bb_map->dm_mapsize,
981 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
982 pkts->last = idx;
983 pkts->pkts[idx].bb_m = *mp;
984 *pktid = idx;
985 *paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
986 return 0;
987 }
988 idx++;
989 }
990 return ENOBUFS;
991 }
992
993 struct mbuf *
994 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
995 uint32_t pktid)
996 {
997 struct mbuf *m;
998
999 if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
1000 return NULL;
1001 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1002 pkts->pkts[pktid].bb_map->dm_mapsize,
1003 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1004 bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1005 m = pkts->pkts[pktid].bb_m;
1006 pkts->pkts[pktid].bb_m = NULL;
1007 return m;
1008 }
1009
1010 void
1011 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1012 {
1013 bwfm_pci_fill_rx_buf_ring(sc);
1014 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1015 MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1016 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1017 MSGBUF_TYPE_EVENT_BUF_POST);
1018 }
1019
1020 void
1021 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1022 uint32_t msgtype)
1023 {
1024 struct msgbuf_rx_ioctl_resp_or_event *req;
1025 struct mbuf *m;
1026 uint32_t pktid;
1027 paddr_t paddr;
1028 int s, slots;
1029 uint64_t devaddr;
1030
1031 s = splnet();
1032 for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1033 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1034 break;
1035 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1036 if (req == NULL)
1037 break;
1038 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1039 if (m == NULL) {
1040 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1041 break;
1042 }
1043 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1044 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1045 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1046 m_freem(m);
1047 break;
1048 }
1049 devaddr = paddr;
1050 memset(req, 0, sizeof(*req));
1051 req->msg.msgtype = msgtype;
1052 req->msg.request_id = htole32(pktid);
1053 req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1054 req->host_buf_addr.high_addr = htole32(devaddr >> 32);
1055 req->host_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1056 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1057 }
1058 if_rxr_put(rxring, slots);
1059 splx(s);
1060 }
1061
1062 void
1063 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1064 {
1065 struct msgbuf_rx_bufpost *req;
1066 struct mbuf *m;
1067 uint32_t pktid;
1068 paddr_t paddr;
1069 int s, slots;
1070 uint64_t devaddr;
1071
1072 s = splnet();
1073 for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1074 slots > 0; slots--) {
1075 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1076 break;
1077 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1078 if (req == NULL)
1079 break;
1080 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1081 if (m == NULL) {
1082 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1083 break;
1084 }
1085 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1086 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1087 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1088 m_freem(m);
1089 break;
1090 }
1091 devaddr = paddr;
1092 memset(req, 0, sizeof(*req));
1093 req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1094 req->msg.request_id = htole32(pktid);
1095 req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1096 req->data_buf_addr.high_addr = htole32(devaddr >> 32);
1097 req->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1098 bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1099 }
1100 if_rxr_put(&sc->sc_rxbuf_ring, slots);
1101 splx(s);
1102 }
1103
1104 int
1105 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1106 int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1107 int idx, uint32_t idx_off, uint32_t *ring_mem)
1108 {
1109 ring->w_idx_addr = w_idx + idx * idx_off;
1110 ring->r_idx_addr = r_idx + idx * idx_off;
1111 ring->nitem = nitem;
1112 ring->itemsz = itemsz;
1113 bwfm_pci_ring_write_rptr(sc, ring);
1114 bwfm_pci_ring_write_wptr(sc, ring);
1115
1116 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1117 if (ring->ring == NULL)
1118 return ENOMEM;
1119 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1120 *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1121 BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1122 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1123 *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1124 BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1125 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1126 *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1127 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1128 *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1129 *ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1130 return 0;
1131 }
1132
1133 int
1134 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1135 int nitem, size_t itemsz)
1136 {
1137 ring->w_ptr = 0;
1138 ring->r_ptr = 0;
1139 ring->nitem = nitem;
1140 ring->itemsz = itemsz;
1141 bwfm_pci_ring_write_rptr(sc, ring);
1142 bwfm_pci_ring_write_wptr(sc, ring);
1143
1144 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1145 if (ring->ring == NULL)
1146 return ENOMEM;
1147 return 0;
1148 }
1149
1150 /* Ring helpers */
1151 void
1152 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1153 struct bwfm_pci_msgring *ring)
1154 {
1155 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1156 BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1157 }
1158
1159 void
1160 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1161 struct bwfm_pci_msgring *ring)
1162 {
1163 if (sc->sc_dma_idx_sz == 0) {
1164 ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1165 sc->sc_tcm_ioh, ring->r_idx_addr);
1166 } else {
1167 bus_dmamap_sync(sc->sc_dmat,
1168 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1169 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1170 ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1171 + ring->r_idx_addr);
1172 }
1173 }
1174
1175 static u_int
1176 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1177 {
1178 u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1179
1180 KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1181 "rxr->rxr_inuse: %d\n"
1182 "taken: %d\n"
1183 "rxr->rxr_total: %d\n",
1184 rxr->rxr_inuse, taken, rxr->rxr_total);
1185 rxr->rxr_inuse += taken;
1186
1187 return taken;
1188 }
1189
1190 static void
1191 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1192 {
1193 KASSERTMSG(rxr->rxr_inuse >= n,
1194 "rxr->rxr_inuse: %d\n"
1195 "n: %d\n"
1196 "rxr->rxr_total: %d\n",
1197 rxr->rxr_inuse, n, rxr->rxr_total);
1198
1199 rxr->rxr_inuse -= n;
1200 }
1201
1202 static void
1203 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1204 {
1205 (void) lwm;
1206
1207 rxr->rxr_total = hwm;
1208 rxr->rxr_inuse = 0;
1209 }
1210
1211 void
1212 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1213 struct bwfm_pci_msgring *ring)
1214 {
1215 if (sc->sc_dma_idx_sz == 0) {
1216 ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1217 sc->sc_tcm_ioh, ring->w_idx_addr);
1218 } else {
1219 ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1220 + ring->w_idx_addr);
1221 bus_dmamap_sync(sc->sc_dmat,
1222 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1223 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1224 }
1225 }
1226
1227 void
1228 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1229 struct bwfm_pci_msgring *ring)
1230 {
1231 if (sc->sc_dma_idx_sz == 0) {
1232 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1233 ring->r_idx_addr, ring->r_ptr);
1234 } else {
1235 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1236 + ring->r_idx_addr) = ring->r_ptr;
1237 bus_dmamap_sync(sc->sc_dmat,
1238 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1239 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1240 }
1241 }
1242
1243 void
1244 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1245 struct bwfm_pci_msgring *ring)
1246 {
1247 if (sc->sc_dma_idx_sz == 0) {
1248 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1249 ring->w_idx_addr, ring->w_ptr);
1250 } else {
1251 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1252 + ring->w_idx_addr) = ring->w_ptr;
1253 bus_dmamap_sync(sc->sc_dmat,
1254 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1255 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1256 }
1257 }
1258
1259 /*
1260 * Retrieve a free descriptor to put new stuff in, but don't commit
1261 * to it yet so we can rollback later if any error occurs.
1262 */
1263 void *
1264 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1265 struct bwfm_pci_msgring *ring)
1266 {
1267 int available;
1268 char *ret;
1269
1270 bwfm_pci_ring_update_rptr(sc, ring);
1271
1272 if (ring->r_ptr > ring->w_ptr)
1273 available = ring->r_ptr - ring->w_ptr;
1274 else
1275 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1276
1277 if (available < 1)
1278 return NULL;
1279
1280 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1281 ring->w_ptr += 1;
1282 if (ring->w_ptr == ring->nitem)
1283 ring->w_ptr = 0;
1284 return ret;
1285 }
1286
1287 void *
1288 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1289 struct bwfm_pci_msgring *ring, int count, int *avail)
1290 {
1291 int available;
1292 char *ret;
1293
1294 bwfm_pci_ring_update_rptr(sc, ring);
1295
1296 if (ring->r_ptr > ring->w_ptr)
1297 available = ring->r_ptr - ring->w_ptr;
1298 else
1299 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1300
1301 if (available < 1)
1302 return NULL;
1303
1304 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1305 *avail = uimin(count, available - 1);
1306 if (*avail + ring->w_ptr > ring->nitem)
1307 *avail = ring->nitem - ring->w_ptr;
1308 ring->w_ptr += *avail;
1309 if (ring->w_ptr == ring->nitem)
1310 ring->w_ptr = 0;
1311 return ret;
1312 }
1313
1314 /*
1315 * Read number of descriptors available (submitted by the firmware)
1316 * and retrieve pointer to first descriptor.
1317 */
1318 void *
1319 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1320 struct bwfm_pci_msgring *ring, int *avail)
1321 {
1322 bwfm_pci_ring_update_wptr(sc, ring);
1323
1324 if (ring->w_ptr >= ring->r_ptr)
1325 *avail = ring->w_ptr - ring->r_ptr;
1326 else
1327 *avail = ring->nitem - ring->r_ptr;
1328
1329 if (*avail == 0)
1330 return NULL;
1331 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1332 ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1333 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1334 return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1335 }
1336
1337 /*
1338 * Let firmware know we read N descriptors.
1339 */
1340 void
1341 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1342 struct bwfm_pci_msgring *ring, int nitem)
1343 {
1344 ring->r_ptr += nitem;
1345 if (ring->r_ptr == ring->nitem)
1346 ring->r_ptr = 0;
1347 bwfm_pci_ring_write_rptr(sc, ring);
1348 }
1349
1350 /*
1351 * Let firmware know that we submitted some descriptors.
1352 */
1353 void
1354 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1355 struct bwfm_pci_msgring *ring)
1356 {
1357 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1358 0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1359 BUS_DMASYNC_PREWRITE);
1360 bwfm_pci_ring_write_wptr(sc, ring);
1361 bwfm_pci_ring_bell(sc, ring);
1362 }
1363
1364 /*
1365 * Rollback N descriptors in case we don't actually want
1366 * to commit to it.
1367 */
1368 void
1369 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1370 struct bwfm_pci_msgring *ring, int nitem)
1371 {
1372 if (ring->w_ptr == 0)
1373 ring->w_ptr = ring->nitem - nitem;
1374 else
1375 ring->w_ptr -= nitem;
1376 }
1377
1378 /*
1379 * Foreach written descriptor on the ring, pass the descriptor to
1380 * a message handler and let the firmware know we handled it.
1381 */
1382 void
1383 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1384 {
1385 char *buf;
1386 int avail, processed;
1387
1388 again:
1389 buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1390 if (buf == NULL)
1391 return;
1392
1393 processed = 0;
1394 while (avail) {
1395 bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1396 buf += ring->itemsz;
1397 processed++;
1398 if (processed == 48) {
1399 bwfm_pci_ring_read_commit(sc, ring, processed);
1400 processed = 0;
1401 }
1402 avail--;
1403 }
1404 if (processed)
1405 bwfm_pci_ring_read_commit(sc, ring, processed);
1406 if (ring->r_ptr == 0)
1407 goto again;
1408 }
1409
1410 void
1411 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1412 {
1413 struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1414 struct msgbuf_ioctl_resp_hdr *resp;
1415 struct msgbuf_tx_status *tx;
1416 struct msgbuf_rx_complete *rx;
1417 struct msgbuf_rx_event *event;
1418 struct msgbuf_common_hdr *msg;
1419 struct msgbuf_flowring_create_resp *fcr;
1420 struct msgbuf_flowring_delete_resp *fdr;
1421 struct bwfm_pci_msgring *ring;
1422 struct mbuf *m;
1423 int flowid;
1424
1425 msg = (struct msgbuf_common_hdr *)buf;
1426 switch (msg->msgtype)
1427 {
1428 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1429 fcr = (struct msgbuf_flowring_create_resp *)buf;
1430 flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1431 if (flowid < 2)
1432 break;
1433 flowid -= 2;
1434 if (flowid >= sc->sc_max_flowrings)
1435 break;
1436 ring = &sc->sc_flowrings[flowid];
1437 if (ring->status != RING_OPENING)
1438 break;
1439 if (fcr->compl_hdr.status) {
1440 printf("%s: failed to open flowring %d\n",
1441 DEVNAME(sc), flowid);
1442 ring->status = RING_CLOSED;
1443 if (ring->m) {
1444 m_freem(ring->m);
1445 ring->m = NULL;
1446 }
1447 ifp->if_flags &= ~IFF_OACTIVE;
1448 ifp->if_start(ifp);
1449 break;
1450 }
1451 ring->status = RING_OPEN;
1452 if (ring->m != NULL) {
1453 m = ring->m;
1454 ring->m = NULL;
1455 if (bwfm_pci_txdata(&sc->sc_sc, &m))
1456 m_freem(ring->m);
1457 }
1458 ifp->if_flags &= ~IFF_OACTIVE;
1459 ifp->if_start(ifp);
1460 break;
1461 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1462 fdr = (struct msgbuf_flowring_delete_resp *)buf;
1463 flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1464 if (flowid < 2)
1465 break;
1466 flowid -= 2;
1467 if (flowid >= sc->sc_max_flowrings)
1468 break;
1469 ring = &sc->sc_flowrings[flowid];
1470 if (ring->status != RING_CLOSING)
1471 break;
1472 if (fdr->compl_hdr.status) {
1473 printf("%s: failed to delete flowring %d\n",
1474 DEVNAME(sc), flowid);
1475 break;
1476 }
1477 bwfm_pci_dmamem_free(sc, ring->ring);
1478 ring->status = RING_CLOSED;
1479 break;
1480 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1481 break;
1482 case MSGBUF_TYPE_IOCTL_CMPLT:
1483 resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1484 sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1485 sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1486 sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1487 if_rxr_put(&sc->sc_ioctl_ring, 1);
1488 bwfm_pci_fill_rx_rings(sc);
1489 wakeup(&sc->sc_ioctl_buf);
1490 break;
1491 case MSGBUF_TYPE_WL_EVENT:
1492 event = (struct msgbuf_rx_event *)buf;
1493 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1494 letoh32(event->msg.request_id));
1495 if (m == NULL)
1496 break;
1497 m_adj(m, sc->sc_rx_dataoffset);
1498 m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1499 bwfm_rx(&sc->sc_sc, m);
1500 if_rxr_put(&sc->sc_event_ring, 1);
1501 bwfm_pci_fill_rx_rings(sc);
1502 break;
1503 case MSGBUF_TYPE_TX_STATUS:
1504 tx = (struct msgbuf_tx_status *)buf;
1505 m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1506 letoh32(tx->msg.request_id));
1507 if (m == NULL)
1508 break;
1509 m_freem(m);
1510 if (sc->sc_tx_pkts_full) {
1511 sc->sc_tx_pkts_full = 0;
1512 ifp->if_flags &= ~IFF_OACTIVE;
1513 ifp->if_start(ifp);
1514 }
1515 break;
1516 case MSGBUF_TYPE_RX_CMPLT:
1517 rx = (struct msgbuf_rx_complete *)buf;
1518 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1519 letoh32(rx->msg.request_id));
1520 if (m == NULL)
1521 break;
1522 if (letoh16(rx->data_offset))
1523 m_adj(m, letoh16(rx->data_offset));
1524 else if (sc->sc_rx_dataoffset)
1525 m_adj(m, sc->sc_rx_dataoffset);
1526 m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1527 bwfm_rx(&sc->sc_sc, m);
1528 if_rxr_put(&sc->sc_rxbuf_ring, 1);
1529 bwfm_pci_fill_rx_rings(sc);
1530 break;
1531 default:
1532 printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1533 break;
1534 }
1535 }
1536
1537 /* Bus core helpers */
1538 void
1539 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1540 {
1541 struct bwfm_softc *bwfm = (void *)sc;
1542 struct bwfm_core *core;
1543
1544 core = bwfm_chip_get_core(bwfm, id);
1545 if (core == NULL) {
1546 printf("%s: could not find core to select", DEVNAME(sc));
1547 return;
1548 }
1549
1550 pci_conf_write(sc->sc_pc, sc->sc_tag,
1551 BWFM_PCI_BAR0_WINDOW, core->co_base);
1552 if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1553 BWFM_PCI_BAR0_WINDOW) != core->co_base)
1554 pci_conf_write(sc->sc_pc, sc->sc_tag,
1555 BWFM_PCI_BAR0_WINDOW, core->co_base);
1556 }
1557
1558 uint32_t
1559 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1560 {
1561 struct bwfm_pci_softc *sc = (void *)bwfm;
1562 uint32_t page, offset;
1563
1564 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1565 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1566 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1567 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1568 }
1569
1570 void
1571 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1572 {
1573 struct bwfm_pci_softc *sc = (void *)bwfm;
1574 uint32_t page, offset;
1575
1576 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1577 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1578 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1579 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1580 }
1581
1582 int
1583 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1584 {
1585 return 0;
1586 }
1587
1588 int
1589 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1590 {
1591 struct bwfm_pci_softc *sc = (void *)bwfm;
1592 struct bwfm_core *core;
1593 uint32_t reg;
1594 int i;
1595
1596 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1597 reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1598 BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1599 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1600 reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1601
1602 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1603 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1604 BWFM_CHIP_REG_WATCHDOG, 4);
1605 delay(100 * 1000);
1606
1607 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1608 pci_conf_write(sc->sc_pc, sc->sc_tag,
1609 BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1610
1611 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1612 if (core->co_rev <= 13) {
1613 uint16_t cfg_offset[] = {
1614 BWFM_PCI_CFGREG_STATUS_CMD,
1615 BWFM_PCI_CFGREG_PM_CSR,
1616 BWFM_PCI_CFGREG_MSI_CAP,
1617 BWFM_PCI_CFGREG_MSI_ADDR_L,
1618 BWFM_PCI_CFGREG_MSI_ADDR_H,
1619 BWFM_PCI_CFGREG_MSI_DATA,
1620 BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1621 BWFM_PCI_CFGREG_RBAR_CTRL,
1622 BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1623 BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1624 BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1625 };
1626
1627 for (i = 0; i < nitems(cfg_offset); i++) {
1628 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1629 BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1630 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1631 BWFM_PCI_PCIE2REG_CONFIGDATA);
1632 DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1633 DEVNAME(sc), cfg_offset[i], reg));
1634 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1635 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1636 }
1637 }
1638
1639 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1640 BWFM_PCI_PCIE2REG_MAILBOXINT);
1641 if (reg != 0xffffffff)
1642 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1643 BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1644
1645 return 0;
1646 }
1647
1648 void
1649 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1650 {
1651 struct bwfm_pci_softc *sc = (void *)bwfm;
1652 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1653 }
1654
1655 static int bwfm_pci_prio2fifo[8] = {
1656 1, /* best effort */
1657 0, /* IPTOS_PREC_IMMEDIATE */
1658 0, /* IPTOS_PREC_PRIORITY */
1659 1, /* IPTOS_PREC_FLASH */
1660 2, /* IPTOS_PREC_FLASHOVERRIDE */
1661 2, /* IPTOS_PREC_CRITIC_ECP */
1662 3, /* IPTOS_PREC_INTERNETCONTROL */
1663 3, /* IPTOS_PREC_NETCONTROL */
1664 };
1665
1666 int
1667 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1668 {
1669 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1670 uint8_t *da = mtod(m, uint8_t *);
1671 struct ether_header *eh;
1672 int flowid, prio, fifo;
1673 int i, found, ac;
1674
1675 /* No QoS for EAPOL frames. */
1676 eh = mtod(m, struct ether_header *);
1677 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1678 M_WME_GETAC(m) : WME_AC_BE;
1679
1680 prio = ac;
1681 fifo = bwfm_pci_prio2fifo[prio];
1682
1683 switch (ic->ic_opmode)
1684 {
1685 case IEEE80211_M_STA:
1686 flowid = fifo;
1687 break;
1688 #ifndef IEEE80211_STA_ONLY
1689 case IEEE80211_M_HOSTAP:
1690 if (ETHER_IS_MULTICAST(da))
1691 da = __UNCONST(etherbroadcastaddr);
1692 flowid = da[5] * 2 + fifo;
1693 break;
1694 #endif
1695 default:
1696 printf("%s: state not supported\n", DEVNAME(sc));
1697 return ENOBUFS;
1698 }
1699
1700 found = 0;
1701 flowid = flowid % sc->sc_max_flowrings;
1702 for (i = 0; i < sc->sc_max_flowrings; i++) {
1703 if (ic->ic_opmode == IEEE80211_M_STA &&
1704 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1705 sc->sc_flowrings[flowid].fifo == fifo) {
1706 found = 1;
1707 break;
1708 }
1709 #ifndef IEEE80211_STA_ONLY
1710 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1711 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1712 sc->sc_flowrings[flowid].fifo == fifo &&
1713 !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1714 found = 1;
1715 break;
1716 }
1717 #endif
1718 flowid = (flowid + 1) % sc->sc_max_flowrings;
1719 }
1720
1721 if (found)
1722 return flowid;
1723
1724 return -1;
1725 }
1726
1727 void
1728 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1729 {
1730 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1731 struct bwfm_cmd_flowring_create * cmd;
1732 uint8_t *da = mtod(m, uint8_t *);
1733 struct ether_header *eh;
1734 struct bwfm_pci_msgring *ring;
1735 int flowid, prio, fifo;
1736 int i, found, ac;
1737
1738 cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1739 if (__predict_false(cmd == NULL))
1740 return;
1741
1742 /* No QoS for EAPOL frames. */
1743 eh = mtod(m, struct ether_header *);
1744 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1745 M_WME_GETAC(m) : WME_AC_BE;
1746
1747 prio = ac;
1748 fifo = bwfm_pci_prio2fifo[prio];
1749
1750 switch (ic->ic_opmode)
1751 {
1752 case IEEE80211_M_STA:
1753 flowid = fifo;
1754 break;
1755 #ifndef IEEE80211_STA_ONLY
1756 case IEEE80211_M_HOSTAP:
1757 if (ETHER_IS_MULTICAST(da))
1758 da = __UNCONST(etherbroadcastaddr);
1759 flowid = da[5] * 2 + fifo;
1760 break;
1761 #endif
1762 default:
1763 printf("%s: state not supported\n", DEVNAME(sc));
1764 return;
1765 }
1766
1767 found = 0;
1768 flowid = flowid % sc->sc_max_flowrings;
1769 for (i = 0; i < sc->sc_max_flowrings; i++) {
1770 ring = &sc->sc_flowrings[flowid];
1771 if (ring->status == RING_CLOSED) {
1772 ring->status = RING_OPENING;
1773 found = 1;
1774 break;
1775 }
1776 flowid = (flowid + 1) % sc->sc_max_flowrings;
1777 }
1778
1779 /*
1780 * We cannot recover from that so far. Only a stop/init
1781 * cycle can revive this if it ever happens at all.
1782 */
1783 if (!found) {
1784 printf("%s: no flowring available\n", DEVNAME(sc));
1785 return;
1786 }
1787
1788 cmd->sc = sc;
1789 cmd->m = m;
1790 cmd->prio = prio;
1791 cmd->flowid = flowid;
1792 workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1793 }
1794
1795 void
1796 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1797 {
1798 struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1799 struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1800 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1801 struct msgbuf_tx_flowring_create_req *req;
1802 struct bwfm_pci_msgring *ring;
1803 uint8_t *da, *sa;
1804
1805 da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1806 sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1807
1808 ring = &sc->sc_flowrings[cmd->flowid];
1809 if (ring->status != RING_OPENING) {
1810 printf("%s: flowring not opening\n", DEVNAME(sc));
1811 return;
1812 }
1813
1814 if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1815 printf("%s: cannot setup flowring\n", DEVNAME(sc));
1816 return;
1817 }
1818
1819 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1820 if (req == NULL) {
1821 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1822 return;
1823 }
1824
1825 ring->status = RING_OPENING;
1826 ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1827 ring->m = cmd->m;
1828 memcpy(ring->mac, da, ETHER_ADDR_LEN);
1829 #ifndef IEEE80211_STA_ONLY
1830 if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1831 memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1832 #endif
1833
1834 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1835 req->msg.ifidx = 0;
1836 req->msg.request_id = 0;
1837 req->tid = bwfm_pci_prio2fifo[cmd->prio];
1838 req->flow_ring_id = letoh16(cmd->flowid + 2);
1839 memcpy(req->da, da, ETHER_ADDR_LEN);
1840 memcpy(req->sa, sa, ETHER_ADDR_LEN);
1841 req->flow_ring_addr.high_addr =
1842 letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1843 req->flow_ring_addr.low_addr =
1844 letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1845 req->max_items = letoh16(512);
1846 req->len_item = letoh16(48);
1847
1848 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1849 pool_put(&sc->sc_flowring_pool, cmd);
1850 }
1851
1852 void
1853 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1854 {
1855 struct msgbuf_tx_flowring_delete_req *req;
1856 struct bwfm_pci_msgring *ring;
1857
1858 ring = &sc->sc_flowrings[flowid];
1859 if (ring->status != RING_OPEN) {
1860 printf("%s: flowring not open\n", DEVNAME(sc));
1861 return;
1862 }
1863
1864 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1865 if (req == NULL) {
1866 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1867 return;
1868 }
1869
1870 ring->status = RING_CLOSING;
1871
1872 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1873 req->msg.ifidx = 0;
1874 req->msg.request_id = 0;
1875 req->flow_ring_id = letoh16(flowid + 2);
1876 req->reason = 0;
1877
1878 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1879 }
1880
1881 void
1882 bwfm_pci_stop(struct bwfm_softc *bwfm)
1883 {
1884 struct bwfm_pci_softc *sc = (void *)bwfm;
1885 struct bwfm_pci_msgring *ring;
1886 int i;
1887
1888 for (i = 0; i < sc->sc_max_flowrings; i++) {
1889 ring = &sc->sc_flowrings[i];
1890 if (ring->status == RING_OPEN)
1891 bwfm_pci_flowring_delete(sc, i);
1892 }
1893 }
1894
1895 int
1896 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1897 {
1898 struct bwfm_pci_softc *sc = (void *)bwfm;
1899 struct bwfm_pci_msgring *ring;
1900 int i;
1901
1902 /* If we are transitioning, we cannot send. */
1903 for (i = 0; i < sc->sc_max_flowrings; i++) {
1904 ring = &sc->sc_flowrings[i];
1905 if (ring->status == RING_OPENING)
1906 return ENOBUFS;
1907 }
1908
1909 if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1910 sc->sc_tx_pkts_full = 1;
1911 return ENOBUFS;
1912 }
1913
1914 return 0;
1915 }
1916
1917 int
1918 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1919 {
1920 struct bwfm_pci_softc *sc = (void *)bwfm;
1921 struct bwfm_pci_msgring *ring;
1922 struct msgbuf_tx_msghdr *tx;
1923 uint32_t pktid;
1924 paddr_t paddr;
1925 uint64_t devaddr;
1926 struct ether_header *eh;
1927 int flowid, ret, ac;
1928
1929 flowid = bwfm_pci_flowring_lookup(sc, *mp);
1930 if (flowid < 0) {
1931 /*
1932 * We cannot send the packet right now as there is
1933 * no flowring yet. The flowring will be created
1934 * asynchronously. While the ring is transitioning
1935 * the TX check will tell the upper layers that we
1936 * cannot send packets right now. When the flowring
1937 * is created the queue will be restarted and this
1938 * mbuf will be transmitted.
1939 */
1940 bwfm_pci_flowring_create(sc, *mp);
1941 return 0;
1942 }
1943
1944 ring = &sc->sc_flowrings[flowid];
1945 if (ring->status == RING_OPENING ||
1946 ring->status == RING_CLOSING) {
1947 printf("%s: tried to use a flow that was "
1948 "transitioning in status %d\n",
1949 DEVNAME(sc), ring->status);
1950 return ENOBUFS;
1951 }
1952
1953 tx = bwfm_pci_ring_write_reserve(sc, ring);
1954 if (tx == NULL)
1955 return ENOBUFS;
1956
1957 /* No QoS for EAPOL frames. */
1958 eh = mtod(*mp, struct ether_header *);
1959 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1960 M_WME_GETAC(*mp) : WME_AC_BE;
1961
1962 memset(tx, 0, sizeof(*tx));
1963 tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1964 tx->msg.ifidx = 0;
1965 tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1966 tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1967 tx->seg_cnt = 1;
1968 memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1969
1970 ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1971 if (ret) {
1972 if (ret == ENOBUFS) {
1973 printf("%s: no pktid available for TX\n",
1974 DEVNAME(sc));
1975 sc->sc_tx_pkts_full = 1;
1976 }
1977 bwfm_pci_ring_write_cancel(sc, ring, 1);
1978 return ret;
1979 }
1980 devaddr = paddr + ETHER_HDR_LEN;
1981
1982 tx->msg.request_id = htole32(pktid);
1983 tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1984 tx->data_buf_addr.high_addr = htole32(devaddr >> 32);
1985 tx->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1986
1987 bwfm_pci_ring_write_commit(sc, ring);
1988 return 0;
1989 }
1990
1991 #ifdef BWFM_DEBUG
1992 void
1993 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1994 {
1995 uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1996 sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1997
1998 if (newidx != sc->sc_console_readidx)
1999 DPRINTFN(3, ("BWFM CONSOLE: "));
2000 while (newidx != sc->sc_console_readidx) {
2001 uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2002 sc->sc_console_buf_addr + sc->sc_console_readidx);
2003 sc->sc_console_readidx++;
2004 if (sc->sc_console_readidx == sc->sc_console_buf_size)
2005 sc->sc_console_readidx = 0;
2006 if (ch == '\r')
2007 continue;
2008 DPRINTFN(3, ("%c", ch));
2009 }
2010 }
2011 #endif
2012
2013 int
2014 bwfm_pci_intr(void *v)
2015 {
2016 struct bwfm_pci_softc *sc = (void *)v;
2017 uint32_t status;
2018
2019 if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2020 BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
2021 return 0;
2022
2023 bwfm_pci_intr_disable(sc);
2024 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2025 BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2026
2027 if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2028 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
2029 printf("%s: handle MB data\n", __func__);
2030
2031 if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
2032 bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
2033 bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
2034 bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
2035 }
2036
2037 #ifdef BWFM_DEBUG
2038 bwfm_pci_debug_console(sc);
2039 #endif
2040
2041 bwfm_pci_intr_enable(sc);
2042 return 1;
2043 }
2044
2045 void
2046 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2047 {
2048 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2049 BWFM_PCI_PCIE2REG_MAILBOXMASK,
2050 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2051 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2052 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2053 }
2054
2055 void
2056 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2057 {
2058 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2059 BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2060 }
2061
2062 /* Msgbuf protocol implementation */
2063 int
2064 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2065 int cmd, char *buf, size_t *len)
2066 {
2067 struct bwfm_pci_softc *sc = (void *)bwfm;
2068 struct msgbuf_ioctl_req_hdr *req;
2069 struct mbuf *m;
2070 size_t buflen;
2071 int s;
2072
2073 s = splnet();
2074 sc->sc_ioctl_resp_pktid = -1;
2075 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2076 if (req == NULL) {
2077 printf("%s: cannot reserve for write\n", DEVNAME(sc));
2078 splx(s);
2079 return 1;
2080 }
2081 req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2082 req->msg.ifidx = 0;
2083 req->msg.flags = 0;
2084 req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2085 req->cmd = htole32(cmd);
2086 req->output_buf_len = htole16(*len);
2087 req->trans_id = htole16(sc->sc_ioctl_reqid++);
2088
2089 buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2090 req->input_buf_len = htole16(buflen);
2091 req->req_buf_addr.high_addr =
2092 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2093 req->req_buf_addr.low_addr =
2094 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2095 if (buf)
2096 memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2097 else
2098 memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2099
2100 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2101 splx(s);
2102
2103 if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2104 printf("%s: timeout waiting for ioctl response\n",
2105 DEVNAME(sc));
2106 return 1;
2107 }
2108
2109 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2110 if (m == NULL)
2111 return 1;
2112
2113 *len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2114 if (buf)
2115 memcpy(buf, mtod(m, char *), *len);
2116 m_freem(m);
2117 splx(s);
2118
2119 return 0;
2120 }
2121
2122 int
2123 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2124 int cmd, char *buf, size_t len)
2125 {
2126 return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2127 }
2128