if_bwfm_pci.c revision 1.11 1 /* $NetBSD: if_bwfm_pci.c,v 1.11 2021/08/26 21:33:36 andvar Exp $ */
2 /* $OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $ */
3 /*
4 * Copyright (c) 2010-2016 Broadcom Corporation
5 * Copyright (c) 2017 Patrick Wildt <patrick (at) blueri.se>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/cdefs.h>
21 __KERNEL_RCSID(0, "$NetBSD: if_bwfm_pci.c,v 1.11 2021/08/26 21:33:36 andvar Exp $");
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/buf.h>
26 #include <sys/kernel.h>
27 #include <sys/kmem.h>
28 #include <sys/device.h>
29 #include <sys/pool.h>
30 #include <sys/workqueue.h>
31 #include <sys/socket.h>
32
33 #include <net/bpf.h>
34 #include <net/if.h>
35 #include <net/if_dl.h>
36 #include <net/if_ether.h>
37 #include <net/if_media.h>
38
39 #include <netinet/in.h>
40
41 #include <net80211/ieee80211_var.h>
42
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45 #include <dev/pci/pcidevs.h>
46
47 #include <dev/ic/bwfmreg.h>
48 #include <dev/ic/bwfmvar.h>
49 #include <dev/pci/if_bwfm_pci.h>
50
51 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN 8
52 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN 1024
53 #define BWFM_DMA_H2D_IOCTL_BUF_LEN ETHER_MAX_LEN
54
55 #define BWFM_NUM_TX_MSGRINGS 2
56 #define BWFM_NUM_RX_MSGRINGS 3
57
58 #define BWFM_NUM_TX_PKTIDS 2048
59 #define BWFM_NUM_RX_PKTIDS 1024
60
61 #define BWFM_NUM_TX_DESCS 1
62 #define BWFM_NUM_RX_DESCS 1
63
64 #ifdef BWFM_DEBUG
65 #define DPRINTF(x) do { if (bwfm_debug > 0) printf x; } while (0)
66 #define DPRINTFN(n, x) do { if (bwfm_debug >= (n)) printf x; } while (0)
67 static int bwfm_debug = 2;
68 #else
69 #define DPRINTF(x) do { ; } while (0)
70 #define DPRINTFN(n, x) do { ; } while (0)
71 #endif
72
73 #define DEVNAME(sc) device_xname((sc)->sc_sc.sc_dev)
74 #define letoh16 htole16
75 #define letoh32 htole32
76 #define nitems(x) __arraycount(x)
77
78 enum ring_status {
79 RING_CLOSED,
80 RING_CLOSING,
81 RING_OPEN,
82 RING_OPENING,
83 };
84
85 struct bwfm_pci_msgring {
86 uint32_t w_idx_addr;
87 uint32_t r_idx_addr;
88 uint32_t w_ptr;
89 uint32_t r_ptr;
90 int nitem;
91 int itemsz;
92 enum ring_status status;
93 struct bwfm_pci_dmamem *ring;
94 struct mbuf *m;
95
96 int fifo;
97 uint8_t mac[ETHER_ADDR_LEN];
98 };
99
100 struct bwfm_pci_buf {
101 bus_dmamap_t bb_map;
102 struct mbuf *bb_m;
103 };
104
105 struct bwfm_pci_pkts {
106 struct bwfm_pci_buf *pkts;
107 uint32_t npkt;
108 int last;
109 };
110
111 struct if_rxring {
112 u_int rxr_total;
113 u_int rxr_inuse;
114 };
115
116 struct bwfm_cmd_flowring_create {
117 struct work wq_cookie;
118 struct bwfm_pci_softc *sc;
119 struct mbuf *m;
120 int flowid;
121 int prio;
122 };
123
124 struct bwfm_pci_softc {
125 struct bwfm_softc sc_sc;
126 pci_chipset_tag_t sc_pc;
127 pcitag_t sc_tag;
128 pcireg_t sc_id;
129 void *sc_ih;
130 pci_intr_handle_t *sc_pihp;
131
132 bus_space_tag_t sc_reg_iot;
133 bus_space_handle_t sc_reg_ioh;
134 bus_size_t sc_reg_ios;
135
136 bus_space_tag_t sc_tcm_iot;
137 bus_space_handle_t sc_tcm_ioh;
138 bus_size_t sc_tcm_ios;
139
140 bus_dma_tag_t sc_dmat;
141
142 uint32_t sc_shared_address;
143 uint32_t sc_shared_flags;
144 uint8_t sc_shared_version;
145
146 uint8_t sc_dma_idx_sz;
147 struct bwfm_pci_dmamem *sc_dma_idx_buf;
148 size_t sc_dma_idx_bufsz;
149
150 uint16_t sc_max_rxbufpost;
151 uint32_t sc_rx_dataoffset;
152 uint32_t sc_htod_mb_data_addr;
153 uint32_t sc_dtoh_mb_data_addr;
154 uint32_t sc_ring_info_addr;
155
156 uint32_t sc_console_base_addr;
157 uint32_t sc_console_buf_addr;
158 uint32_t sc_console_buf_size;
159 uint32_t sc_console_readidx;
160
161 struct pool sc_flowring_pool;
162 struct workqueue *flowring_wq;
163
164 uint16_t sc_max_flowrings;
165 uint16_t sc_max_submissionrings;
166 uint16_t sc_max_completionrings;
167
168 struct bwfm_pci_msgring sc_ctrl_submit;
169 struct bwfm_pci_msgring sc_rxpost_submit;
170 struct bwfm_pci_msgring sc_ctrl_complete;
171 struct bwfm_pci_msgring sc_tx_complete;
172 struct bwfm_pci_msgring sc_rx_complete;
173 struct bwfm_pci_msgring *sc_flowrings;
174
175 struct bwfm_pci_dmamem *sc_scratch_buf;
176 struct bwfm_pci_dmamem *sc_ringupd_buf;
177
178 struct bwfm_pci_dmamem *sc_ioctl_buf;
179 int sc_ioctl_reqid;
180 uint32_t sc_ioctl_resp_pktid;
181 uint32_t sc_ioctl_resp_ret_len;
182 uint32_t sc_ioctl_resp_status;
183 int sc_ioctl_poll;
184
185 struct if_rxring sc_ioctl_ring;
186 struct if_rxring sc_event_ring;
187 struct if_rxring sc_rxbuf_ring;
188
189 struct bwfm_pci_pkts sc_rx_pkts;
190 struct bwfm_pci_pkts sc_tx_pkts;
191 int sc_tx_pkts_full;
192 };
193
194 struct bwfm_pci_dmamem {
195 bus_dmamap_t bdm_map;
196 bus_dma_segment_t bdm_seg;
197 size_t bdm_size;
198 char * bdm_kva;
199 };
200
201 #define BWFM_PCI_DMA_MAP(_bdm) ((_bdm)->bdm_map)
202 #define BWFM_PCI_DMA_LEN(_bdm) ((_bdm)->bdm_size)
203 #define BWFM_PCI_DMA_DVA(_bdm) (uint64_t)((_bdm)->bdm_map->dm_segs[0].ds_addr)
204 #define BWFM_PCI_DMA_KVA(_bdm) ((_bdm)->bdm_kva)
205
206 static u_int if_rxr_get(struct if_rxring *rxr, unsigned int max);
207 static void if_rxr_put(struct if_rxring *rxr, unsigned int n);
208 static void if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
209
210 int bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
211 void bwfm_pci_attachhook(device_t);
212 void bwfm_pci_attach(device_t, device_t, void *);
213 int bwfm_pci_detach(device_t, int);
214
215 int bwfm_pci_intr(void *);
216 void bwfm_pci_intr_enable(struct bwfm_pci_softc *);
217 void bwfm_pci_intr_disable(struct bwfm_pci_softc *);
218 int bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
219 size_t);
220 void bwfm_pci_select_core(struct bwfm_pci_softc *, int );
221
222 struct bwfm_pci_dmamem *
223 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
224 bus_size_t);
225 void bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
226 int bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
227 struct bwfm_pci_pkts *);
228 int bwfm_pci_pktid_new(struct bwfm_pci_softc *,
229 struct bwfm_pci_pkts *, struct mbuf **,
230 uint32_t *, paddr_t *);
231 struct mbuf * bwfm_pci_pktid_free(struct bwfm_pci_softc *,
232 struct bwfm_pci_pkts *, uint32_t);
233 void bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
234 struct if_rxring *, uint32_t);
235 void bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
236 void bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
237 int bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
238 int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
239 int bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
240 int, size_t);
241
242 void bwfm_pci_ring_bell(struct bwfm_pci_softc *,
243 struct bwfm_pci_msgring *);
244 void bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
245 struct bwfm_pci_msgring *);
246 void bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
247 struct bwfm_pci_msgring *);
248 void bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
249 struct bwfm_pci_msgring *);
250 void bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
251 struct bwfm_pci_msgring *);
252 void * bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
253 struct bwfm_pci_msgring *);
254 void * bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
255 struct bwfm_pci_msgring *, int, int *);
256 void * bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
257 struct bwfm_pci_msgring *, int *);
258 void bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
259 struct bwfm_pci_msgring *, int);
260 void bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
261 struct bwfm_pci_msgring *);
262 void bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
263 struct bwfm_pci_msgring *, int);
264
265 void bwfm_pci_ring_rx(struct bwfm_pci_softc *,
266 struct bwfm_pci_msgring *);
267 void bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
268
269 uint32_t bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
270 void bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
271 uint32_t);
272 int bwfm_pci_buscore_prepare(struct bwfm_softc *);
273 int bwfm_pci_buscore_reset(struct bwfm_softc *);
274 void bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
275
276 int bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
277 struct mbuf *);
278 void bwfm_pci_flowring_create(struct bwfm_pci_softc *,
279 struct mbuf *);
280 void bwfm_pci_flowring_create_cb(struct work *, void *);
281 void bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
282
283 void bwfm_pci_stop(struct bwfm_softc *);
284 int bwfm_pci_txcheck(struct bwfm_softc *);
285 int bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
286
287 #ifdef BWFM_DEBUG
288 void bwfm_pci_debug_console(struct bwfm_pci_softc *);
289 #endif
290
291 int bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
292 int, char *, size_t *);
293 int bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
294 int, char *, size_t);
295
296 static const struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
297 .bc_read = bwfm_pci_buscore_read,
298 .bc_write = bwfm_pci_buscore_write,
299 .bc_prepare = bwfm_pci_buscore_prepare,
300 .bc_reset = bwfm_pci_buscore_reset,
301 .bc_setup = NULL,
302 .bc_activate = bwfm_pci_buscore_activate,
303 };
304
305 static const struct bwfm_bus_ops bwfm_pci_bus_ops = {
306 .bs_init = NULL,
307 .bs_stop = bwfm_pci_stop,
308 .bs_txcheck = bwfm_pci_txcheck,
309 .bs_txdata = bwfm_pci_txdata,
310 .bs_txctl = NULL,
311 .bs_rxctl = NULL,
312 };
313
314 static const struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
315 .proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
316 .proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
317 };
318
319
320 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
321 bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
322
323 static const struct bwfm_firmware_selector bwfm_pci_fwtab[] = {
324 BWFM_FW_ENTRY(BRCM_CC_43602_CHIP_ID,
325 BWFM_FWSEL_ALLREVS, "brcmfmac43602-pcie"),
326
327 BWFM_FW_ENTRY(BRCM_CC_43465_CHIP_ID,
328 BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
329
330 BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
331 BWFM_FWSEL_REV_LE(7), "brcmfmac4350c2-pcie"),
332 BWFM_FW_ENTRY(BRCM_CC_4350_CHIP_ID,
333 BWFM_FWSEL_REV_GE(8), "brcmfmac4350-pcie"),
334
335 BWFM_FW_ENTRY(BRCM_CC_43525_CHIP_ID,
336 BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
337
338 BWFM_FW_ENTRY(BRCM_CC_4356_CHIP_ID,
339 BWFM_FWSEL_ALLREVS, "brcmfmac4356-pcie"),
340
341 BWFM_FW_ENTRY(BRCM_CC_43567_CHIP_ID,
342 BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
343 BWFM_FW_ENTRY(BRCM_CC_43569_CHIP_ID,
344 BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
345 BWFM_FW_ENTRY(BRCM_CC_43570_CHIP_ID,
346 BWFM_FWSEL_ALLREVS, "brcmfmac43570-pcie"),
347
348 BWFM_FW_ENTRY(BRCM_CC_4358_CHIP_ID,
349 BWFM_FWSEL_ALLREVS, "brcmfmac4358-pcie"),
350
351 BWFM_FW_ENTRY(BRCM_CC_4359_CHIP_ID,
352 BWFM_FWSEL_ALLREVS, "brcmfmac4359-pcie"),
353
354 BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
355 BWFM_FWSEL_REV_LE(3), "brcmfmac4365b-pcie"),
356 BWFM_FW_ENTRY(BRCM_CC_4365_CHIP_ID,
357 BWFM_FWSEL_REV_GE(4), "brcmfmac4365c-pcie"),
358
359 BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
360 BWFM_FWSEL_REV_LE(3), "brcmfmac4366b-pcie"),
361 BWFM_FW_ENTRY(BRCM_CC_4366_CHIP_ID,
362 BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
363 BWFM_FW_ENTRY(BRCM_CC_43664_CHIP_ID,
364 BWFM_FWSEL_REV_GE(4), "brcmfmac4366c-pcie"),
365
366 BWFM_FW_ENTRY(BRCM_CC_4371_CHIP_ID,
367 BWFM_FWSEL_ALLREVS, "brcmfmac4371-pcie"),
368
369 BWFM_FW_ENTRY_END
370 };
371
372 static const struct device_compatible_entry compat_data[] = {
373 { .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
374 PCI_PRODUCT_BROADCOM_BCM43602), },
375
376 { .id = PCI_ID_CODE(PCI_VENDOR_BROADCOM,
377 PCI_PRODUCT_BROADCOM_BCM4350), },
378
379 PCI_COMPAT_EOL
380 };
381
382 static struct mbuf *
383 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
384 struct ifnet *ifp __unused, u_int size)
385 {
386 struct mbuf *m;
387
388 MGETHDR(m, how, MT_DATA);
389 if (m == NULL)
390 return NULL;
391
392 MEXTMALLOC(m, size, how);
393 if ((m->m_flags & M_EXT) == 0) {
394 m_freem(m);
395 return NULL;
396 }
397 return m;
398 }
399
400 int
401 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
402 {
403 struct pci_attach_args *pa = aux;
404
405 return pci_compatible_match(pa, compat_data);
406 }
407
408 void
409 bwfm_pci_attach(device_t parent, device_t self, void *aux)
410 {
411 struct bwfm_pci_softc *sc = device_private(self);
412 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
413 const char *intrstr;
414 char intrbuf[PCI_INTRSTR_LEN];
415
416 sc->sc_sc.sc_dev = self;
417
418 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
419 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
420 NULL, &sc->sc_reg_ios)) {
421 printf(": can't map bar0\n");
422 return;
423 }
424
425 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
426 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
427 NULL, &sc->sc_tcm_ios)) {
428 printf(": can't map bar1\n");
429 goto bar0;
430 }
431
432 sc->sc_pc = pa->pa_pc;
433 sc->sc_tag = pa->pa_tag;
434 sc->sc_id = pa->pa_id;
435
436 if (pci_dma64_available(pa))
437 sc->sc_dmat = pa->pa_dmat64;
438 else
439 sc->sc_dmat = pa->pa_dmat;
440
441 /* Map and establish the interrupt. */
442 if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
443 printf(": couldn't map interrupt\n");
444 goto bar1;
445 }
446 intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
447
448 sc->sc_ih = pci_intr_establish_xname(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
449 bwfm_pci_intr, sc, device_xname(self));
450 if (sc->sc_ih == NULL) {
451 printf(": couldn't establish interrupt");
452 if (intrstr != NULL)
453 printf(" at %s", intrstr);
454 printf("\n");
455 goto bar1;
456 }
457 printf(": %s\n", intrstr);
458
459 config_mountroot(self, bwfm_pci_attachhook);
460 return;
461
462 bar1:
463 bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
464 bar0:
465 bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
466 }
467
468 void
469 bwfm_pci_attachhook(device_t self)
470 {
471 struct bwfm_pci_softc *sc = device_private(self);
472 struct bwfm_softc *bwfm = (void *)sc;
473 struct bwfm_pci_ringinfo ringinfo;
474 struct bwfm_firmware_context fwctx;
475 uint8_t *ucode;
476 size_t ucsize;
477 uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
478 uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
479 uint32_t idx_offset, reg;
480 int i;
481
482 sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
483 if (bwfm_chip_attach(&sc->sc_sc) != 0) {
484 aprint_error_dev(bwfm->sc_dev, "cannot attach chip\n");
485 return;
486 }
487
488 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
489 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
490 BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
491 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
492 BWFM_PCI_PCIE2REG_CONFIGDATA);
493 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
494 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
495
496 bwfm_firmware_context_init(&fwctx,
497 bwfm->sc_chip.ch_chip, bwfm->sc_chip.ch_chiprev, NULL,
498 BWFM_FWREQ(BWFM_FILETYPE_UCODE));
499
500 if (!bwfm_firmware_open(bwfm, bwfm_pci_fwtab, &fwctx)) {
501 /* Error message already displayed. */
502 goto err;
503 }
504
505 ucode = bwfm_firmware_data(&fwctx, BWFM_FILETYPE_UCODE, &ucsize);
506 KASSERT(ucode != NULL);
507
508 /* Retrieve RAM size from firmware. */
509 if (ucsize >= BWFM_RAMSIZE + 8) {
510 uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
511 if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
512 bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
513 }
514
515 if (bwfm_pci_load_microcode(sc, ucode, ucsize) != 0) {
516 aprint_error_dev(bwfm->sc_dev, "could not load microcode\n");
517 goto err;
518 }
519
520 bwfm_firmware_close(&fwctx);
521
522 sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
523 sc->sc_shared_address + BWFM_SHARED_INFO);
524 sc->sc_shared_version = sc->sc_shared_flags;
525 if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
526 sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
527 aprint_error_dev(bwfm->sc_dev,
528 "PCIe version %d unsupported\n", sc->sc_shared_version);
529 return;
530 }
531
532 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
533 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
534 sc->sc_dma_idx_sz = sizeof(uint16_t);
535 else
536 sc->sc_dma_idx_sz = sizeof(uint32_t);
537 }
538
539 /* Maximum RX data buffers in the ring. */
540 sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
541 sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
542 if (sc->sc_max_rxbufpost == 0)
543 sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
544
545 /* Alternative offset of data in a packet */
546 sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
547 sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
548
549 /* For Power Management */
550 sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
551 sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
552 sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
553 sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
554
555 /* Ring information */
556 sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
557 sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
558
559 /* Firmware's "dmesg" */
560 sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
561 sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
562 sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
563 sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
564 sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
565 sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
566
567 /* Read ring information. */
568 bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
569 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
570
571 if (sc->sc_shared_version >= 6) {
572 sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
573 sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
574 sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
575 } else {
576 sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
577 sc->sc_max_flowrings = sc->sc_max_submissionrings -
578 BWFM_NUM_TX_MSGRINGS;
579 sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
580 }
581
582 if (sc->sc_dma_idx_sz == 0) {
583 d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
584 d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
585 h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
586 h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
587 idx_offset = sizeof(uint32_t);
588 } else {
589 uint64_t address;
590
591 /* Each TX/RX Ring has a Read and Write Ptr */
592 sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
593 sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
594 sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
595 sc->sc_dma_idx_bufsz, 8);
596 if (sc->sc_dma_idx_buf == NULL) {
597 /* XXX: Fallback to TCM? */
598 aprint_error_dev(bwfm->sc_dev,
599 "cannot allocate idx buf\n");
600 return;
601 }
602
603 idx_offset = sc->sc_dma_idx_sz;
604 h2d_w_idx_ptr = 0;
605 address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
606 ringinfo.h2d_w_idx_hostaddr_low =
607 htole32(address & 0xffffffff);
608 ringinfo.h2d_w_idx_hostaddr_high =
609 htole32(address >> 32);
610
611 h2d_r_idx_ptr = h2d_w_idx_ptr +
612 sc->sc_max_submissionrings * idx_offset;
613 address += sc->sc_max_submissionrings * idx_offset;
614 ringinfo.h2d_r_idx_hostaddr_low =
615 htole32(address & 0xffffffff);
616 ringinfo.h2d_r_idx_hostaddr_high =
617 htole32(address >> 32);
618
619 d2h_w_idx_ptr = h2d_r_idx_ptr +
620 sc->sc_max_submissionrings * idx_offset;
621 address += sc->sc_max_submissionrings * idx_offset;
622 ringinfo.d2h_w_idx_hostaddr_low =
623 htole32(address & 0xffffffff);
624 ringinfo.d2h_w_idx_hostaddr_high =
625 htole32(address >> 32);
626
627 d2h_r_idx_ptr = d2h_w_idx_ptr +
628 sc->sc_max_completionrings * idx_offset;
629 address += sc->sc_max_completionrings * idx_offset;
630 ringinfo.d2h_r_idx_hostaddr_low =
631 htole32(address & 0xffffffff);
632 ringinfo.d2h_r_idx_hostaddr_high =
633 htole32(address >> 32);
634
635 bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
636 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
637 }
638
639 uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
640 /* TX ctrl ring: Send ctrl buffers, send IOCTLs */
641 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
642 h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
643 &ring_mem_ptr))
644 goto cleanup;
645 /* TX rxpost ring: Send clean data mbufs for RX */
646 if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
647 h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
648 &ring_mem_ptr))
649 goto cleanup;
650 /* RX completion rings: recv our filled buffers back */
651 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
652 d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
653 &ring_mem_ptr))
654 goto cleanup;
655 if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
656 d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
657 &ring_mem_ptr))
658 goto cleanup;
659 if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
660 d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
661 &ring_mem_ptr))
662 goto cleanup;
663
664 /* Dynamic TX rings for actual data */
665 sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
666 sizeof(struct bwfm_pci_msgring), KM_SLEEP);
667 for (i = 0; i < sc->sc_max_flowrings; i++) {
668 struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
669 ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
670 ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
671 }
672
673 pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
674 0, 0, 0, "bwfmpl", NULL, IPL_NET);
675
676 /* Scratch and ring update buffers for firmware */
677 if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
678 BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
679 goto cleanup;
680 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
681 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
682 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
683 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
684 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
685 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
686 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
687 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
688 BWFM_DMA_D2H_SCRATCH_BUF_LEN);
689
690 if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
691 BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
692 goto cleanup;
693 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
694 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
695 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
696 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
697 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
698 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
699 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
700 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
701 BWFM_DMA_D2H_RINGUPD_BUF_LEN);
702
703 if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
704 BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
705 goto cleanup;
706
707 if (workqueue_create(&sc->flowring_wq, "bwfmflow",
708 bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
709 goto cleanup;
710
711 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
712 bwfm_pci_intr_enable(sc);
713
714 /* Maps RX mbufs to a packet id and back. */
715 sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
716 sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
717 sizeof(struct bwfm_pci_buf), KM_SLEEP);
718 for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
719 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
720 BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
721 &sc->sc_rx_pkts.pkts[i].bb_map);
722
723 /* Maps TX mbufs to a packet id and back. */
724 sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
725 sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
726 * sizeof(struct bwfm_pci_buf), KM_SLEEP);
727 for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
728 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
729 BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
730 &sc->sc_tx_pkts.pkts[i].bb_map);
731
732 /*
733 * For whatever reason, could also be a bug somewhere in this
734 * driver, the firmware needs a bunch of RX buffers otherwise
735 * it won't send any RX complete messages. 64 buffers don't
736 * suffice, but 128 buffers are enough.
737 */
738 if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
739 if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
740 if_rxr_init(&sc->sc_event_ring, 8, 8);
741 bwfm_pci_fill_rx_rings(sc);
742
743
744 #ifdef BWFM_DEBUG
745 sc->sc_console_readidx = 0;
746 bwfm_pci_debug_console(sc);
747 #endif
748
749 sc->sc_ioctl_poll = 1;
750 sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
751 sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
752 bwfm_attach(&sc->sc_sc);
753 sc->sc_ioctl_poll = 0;
754 return;
755
756 cleanup:
757 if (sc->flowring_wq != NULL)
758 workqueue_destroy(sc->flowring_wq);
759 if (sc->sc_ih != NULL) {
760 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
761 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
762 }
763 if (sc->sc_ioctl_buf)
764 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
765 if (sc->sc_ringupd_buf)
766 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
767 if (sc->sc_scratch_buf)
768 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
769 if (sc->sc_rx_complete.ring)
770 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
771 if (sc->sc_tx_complete.ring)
772 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
773 if (sc->sc_ctrl_complete.ring)
774 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
775 if (sc->sc_rxpost_submit.ring)
776 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
777 if (sc->sc_ctrl_submit.ring)
778 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
779 if (sc->sc_dma_idx_buf)
780 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
781
782 err:
783 bwfm_firmware_close(&fwctx);
784 }
785
786 int
787 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
788 {
789 struct bwfm_softc *bwfm = (void *)sc;
790 struct bwfm_core *core;
791 uint32_t shared;
792 int i;
793
794 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
795 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
796 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
797 BWFM_PCI_ARMCR4REG_BANKIDX, 5);
798 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
799 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
800 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
801 BWFM_PCI_ARMCR4REG_BANKIDX, 7);
802 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
803 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
804 }
805
806 for (i = 0; i < size; i++)
807 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
808 bwfm->sc_chip.ch_rambase + i, ucode[i]);
809
810 /* Firmware replaces this with a pointer once up. */
811 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
812 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
813
814 /* TODO: restore NVRAM */
815
816 /* Load reset vector from firmware and kickstart core. */
817 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
818 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
819 bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
820 }
821 bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
822
823 for (i = 0; i < 40; i++) {
824 delay(50 * 1000);
825 shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
826 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
827 if (shared)
828 break;
829 }
830 if (!shared) {
831 printf("%s: firmware did not come up\n", DEVNAME(sc));
832 return 1;
833 }
834
835 sc->sc_shared_address = shared;
836 return 0;
837 }
838
839 int
840 bwfm_pci_detach(device_t self, int flags)
841 {
842 struct bwfm_pci_softc *sc = device_private(self);
843
844 bwfm_detach(&sc->sc_sc, flags);
845
846 /* FIXME: free RX buffers */
847 /* FIXME: free TX buffers */
848 /* FIXME: free more memory */
849
850 kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
851 * sizeof(struct bwfm_pci_msgring));
852 pool_destroy(&sc->sc_flowring_pool);
853
854 workqueue_destroy(sc->flowring_wq);
855 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
856 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
857 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
858 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
859 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
860 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
861 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
862 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
863 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
864 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
865 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
866 return 0;
867 }
868
869 /* DMA code */
870 struct bwfm_pci_dmamem *
871 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
872 {
873 struct bwfm_pci_dmamem *bdm;
874 int nsegs;
875
876 bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
877 bdm->bdm_size = size;
878
879 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
880 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
881 goto bdmfree;
882
883 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
884 &nsegs, BUS_DMA_WAITOK) != 0)
885 goto destroy;
886
887 if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
888 (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
889 goto free;
890
891 if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
892 NULL, BUS_DMA_WAITOK) != 0)
893 goto unmap;
894
895 bzero(bdm->bdm_kva, size);
896
897 return (bdm);
898
899 unmap:
900 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
901 free:
902 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
903 destroy:
904 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
905 bdmfree:
906 kmem_free(bdm, sizeof(*bdm));
907
908 return (NULL);
909 }
910
911 void
912 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
913 {
914 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
915 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
916 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
917 kmem_free(bdm, sizeof(*bdm));
918 }
919
920 /*
921 * We need a simple mapping from a packet ID to mbufs, because when
922 * a transfer completed, we only know the ID so we have to look up
923 * the memory for the ID. This simply looks for an empty slot.
924 */
925 int
926 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
927 {
928 int i, idx;
929
930 idx = pkts->last + 1;
931 for (i = 0; i < pkts->npkt; i++) {
932 if (idx == pkts->npkt)
933 idx = 0;
934 if (pkts->pkts[idx].bb_m == NULL)
935 return 0;
936 idx++;
937 }
938 return ENOBUFS;
939 }
940
941 int
942 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
943 struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
944 {
945 int i, idx;
946
947 idx = pkts->last + 1;
948 for (i = 0; i < pkts->npkt; i++) {
949 if (idx == pkts->npkt)
950 idx = 0;
951 if (pkts->pkts[idx].bb_m == NULL) {
952 if (bus_dmamap_load_mbuf(sc->sc_dmat,
953 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
954 /*
955 * Didn't fit. Maybe it has too many
956 * segments. If it has only one
957 * segment, fail; otherwise try to
958 * compact it into a single mbuf
959 * segment.
960 */
961 if ((*mp)->m_next == NULL)
962 return ENOBUFS;
963 struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
964 NULL, MSGBUF_MAX_PKT_SIZE);
965 if (m0 == NULL)
966 return ENOBUFS;
967 m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
968 mtod(m0, void *));
969 m0->m_pkthdr.len = m0->m_len =
970 (*mp)->m_pkthdr.len;
971 m_freem(*mp);
972 *mp = m0;
973 if (bus_dmamap_load_mbuf(sc->sc_dmat,
974 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
975 return EFBIG;
976 }
977 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[idx].bb_map,
978 0, pkts->pkts[idx].bb_map->dm_mapsize,
979 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
980 pkts->last = idx;
981 pkts->pkts[idx].bb_m = *mp;
982 *pktid = idx;
983 *paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
984 return 0;
985 }
986 idx++;
987 }
988 return ENOBUFS;
989 }
990
991 struct mbuf *
992 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
993 uint32_t pktid)
994 {
995 struct mbuf *m;
996
997 if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
998 return NULL;
999 bus_dmamap_sync(sc->sc_dmat, pkts->pkts[pktid].bb_map, 0,
1000 pkts->pkts[pktid].bb_map->dm_mapsize,
1001 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1002 bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
1003 m = pkts->pkts[pktid].bb_m;
1004 pkts->pkts[pktid].bb_m = NULL;
1005 return m;
1006 }
1007
1008 void
1009 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
1010 {
1011 bwfm_pci_fill_rx_buf_ring(sc);
1012 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
1013 MSGBUF_TYPE_IOCTLRESP_BUF_POST);
1014 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
1015 MSGBUF_TYPE_EVENT_BUF_POST);
1016 }
1017
1018 void
1019 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
1020 uint32_t msgtype)
1021 {
1022 struct msgbuf_rx_ioctl_resp_or_event *req;
1023 struct mbuf *m;
1024 uint32_t pktid;
1025 paddr_t paddr;
1026 int s, slots;
1027 uint64_t devaddr;
1028
1029 s = splnet();
1030 for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1031 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1032 break;
1033 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1034 if (req == NULL)
1035 break;
1036 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1037 if (m == NULL) {
1038 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1039 break;
1040 }
1041 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1042 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1043 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1044 m_freem(m);
1045 break;
1046 }
1047 devaddr = paddr;
1048 memset(req, 0, sizeof(*req));
1049 req->msg.msgtype = msgtype;
1050 req->msg.request_id = htole32(pktid);
1051 req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1052 req->host_buf_addr.high_addr = htole32(devaddr >> 32);
1053 req->host_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1054 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1055 }
1056 if_rxr_put(rxring, slots);
1057 splx(s);
1058 }
1059
1060 void
1061 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1062 {
1063 struct msgbuf_rx_bufpost *req;
1064 struct mbuf *m;
1065 uint32_t pktid;
1066 paddr_t paddr;
1067 int s, slots;
1068 uint64_t devaddr;
1069
1070 s = splnet();
1071 for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1072 slots > 0; slots--) {
1073 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1074 break;
1075 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1076 if (req == NULL)
1077 break;
1078 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1079 if (m == NULL) {
1080 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1081 break;
1082 }
1083 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1084 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1085 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1086 m_freem(m);
1087 break;
1088 }
1089 devaddr = paddr;
1090 memset(req, 0, sizeof(*req));
1091 req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1092 req->msg.request_id = htole32(pktid);
1093 req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1094 req->data_buf_addr.high_addr = htole32(devaddr >> 32);
1095 req->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1096 bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1097 }
1098 if_rxr_put(&sc->sc_rxbuf_ring, slots);
1099 splx(s);
1100 }
1101
1102 int
1103 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1104 int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1105 int idx, uint32_t idx_off, uint32_t *ring_mem)
1106 {
1107 ring->w_idx_addr = w_idx + idx * idx_off;
1108 ring->r_idx_addr = r_idx + idx * idx_off;
1109 ring->nitem = nitem;
1110 ring->itemsz = itemsz;
1111 bwfm_pci_ring_write_rptr(sc, ring);
1112 bwfm_pci_ring_write_wptr(sc, ring);
1113
1114 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1115 if (ring->ring == NULL)
1116 return ENOMEM;
1117 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1118 *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1119 BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1120 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1121 *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1122 BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1123 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1124 *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1125 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1126 *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1127 *ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1128 return 0;
1129 }
1130
1131 int
1132 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1133 int nitem, size_t itemsz)
1134 {
1135 ring->w_ptr = 0;
1136 ring->r_ptr = 0;
1137 ring->nitem = nitem;
1138 ring->itemsz = itemsz;
1139 bwfm_pci_ring_write_rptr(sc, ring);
1140 bwfm_pci_ring_write_wptr(sc, ring);
1141
1142 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1143 if (ring->ring == NULL)
1144 return ENOMEM;
1145 return 0;
1146 }
1147
1148 /* Ring helpers */
1149 void
1150 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1151 struct bwfm_pci_msgring *ring)
1152 {
1153 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1154 BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1155 }
1156
1157 void
1158 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1159 struct bwfm_pci_msgring *ring)
1160 {
1161 if (sc->sc_dma_idx_sz == 0) {
1162 ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1163 sc->sc_tcm_ioh, ring->r_idx_addr);
1164 } else {
1165 bus_dmamap_sync(sc->sc_dmat,
1166 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1167 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1168 ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1169 + ring->r_idx_addr);
1170 }
1171 }
1172
1173 static u_int
1174 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1175 {
1176 u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1177
1178 KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1179 "rxr->rxr_inuse: %d\n"
1180 "taken: %d\n"
1181 "rxr->rxr_total: %d\n",
1182 rxr->rxr_inuse, taken, rxr->rxr_total);
1183 rxr->rxr_inuse += taken;
1184
1185 return taken;
1186 }
1187
1188 static void
1189 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1190 {
1191 KASSERTMSG(rxr->rxr_inuse >= n,
1192 "rxr->rxr_inuse: %d\n"
1193 "n: %d\n"
1194 "rxr->rxr_total: %d\n",
1195 rxr->rxr_inuse, n, rxr->rxr_total);
1196
1197 rxr->rxr_inuse -= n;
1198 }
1199
1200 static void
1201 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1202 {
1203 (void) lwm;
1204
1205 rxr->rxr_total = hwm;
1206 rxr->rxr_inuse = 0;
1207 }
1208
1209 void
1210 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1211 struct bwfm_pci_msgring *ring)
1212 {
1213 if (sc->sc_dma_idx_sz == 0) {
1214 ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1215 sc->sc_tcm_ioh, ring->w_idx_addr);
1216 } else {
1217 ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1218 + ring->w_idx_addr);
1219 bus_dmamap_sync(sc->sc_dmat,
1220 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1221 sizeof(uint16_t), BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1222 }
1223 }
1224
1225 void
1226 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1227 struct bwfm_pci_msgring *ring)
1228 {
1229 if (sc->sc_dma_idx_sz == 0) {
1230 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1231 ring->r_idx_addr, ring->r_ptr);
1232 } else {
1233 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1234 + ring->r_idx_addr) = ring->r_ptr;
1235 bus_dmamap_sync(sc->sc_dmat,
1236 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->r_idx_addr,
1237 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1238 }
1239 }
1240
1241 void
1242 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1243 struct bwfm_pci_msgring *ring)
1244 {
1245 if (sc->sc_dma_idx_sz == 0) {
1246 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1247 ring->w_idx_addr, ring->w_ptr);
1248 } else {
1249 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1250 + ring->w_idx_addr) = ring->w_ptr;
1251 bus_dmamap_sync(sc->sc_dmat,
1252 BWFM_PCI_DMA_MAP(sc->sc_dma_idx_buf), ring->w_idx_addr,
1253 sizeof(uint16_t), BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1254 }
1255 }
1256
1257 /*
1258 * Retrieve a free descriptor to put new stuff in, but don't commit
1259 * to it yet so we can rollback later if any error occurs.
1260 */
1261 void *
1262 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1263 struct bwfm_pci_msgring *ring)
1264 {
1265 int available;
1266 char *ret;
1267
1268 bwfm_pci_ring_update_rptr(sc, ring);
1269
1270 if (ring->r_ptr > ring->w_ptr)
1271 available = ring->r_ptr - ring->w_ptr;
1272 else
1273 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1274
1275 if (available < 1)
1276 return NULL;
1277
1278 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1279 ring->w_ptr += 1;
1280 if (ring->w_ptr == ring->nitem)
1281 ring->w_ptr = 0;
1282 return ret;
1283 }
1284
1285 void *
1286 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1287 struct bwfm_pci_msgring *ring, int count, int *avail)
1288 {
1289 int available;
1290 char *ret;
1291
1292 bwfm_pci_ring_update_rptr(sc, ring);
1293
1294 if (ring->r_ptr > ring->w_ptr)
1295 available = ring->r_ptr - ring->w_ptr;
1296 else
1297 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1298
1299 if (available < 1)
1300 return NULL;
1301
1302 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1303 *avail = uimin(count, available - 1);
1304 if (*avail + ring->w_ptr > ring->nitem)
1305 *avail = ring->nitem - ring->w_ptr;
1306 ring->w_ptr += *avail;
1307 if (ring->w_ptr == ring->nitem)
1308 ring->w_ptr = 0;
1309 return ret;
1310 }
1311
1312 /*
1313 * Read number of descriptors available (submitted by the firmware)
1314 * and retrieve pointer to first descriptor.
1315 */
1316 void *
1317 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1318 struct bwfm_pci_msgring *ring, int *avail)
1319 {
1320 bwfm_pci_ring_update_wptr(sc, ring);
1321
1322 if (ring->w_ptr >= ring->r_ptr)
1323 *avail = ring->w_ptr - ring->r_ptr;
1324 else
1325 *avail = ring->nitem - ring->r_ptr;
1326
1327 if (*avail == 0)
1328 return NULL;
1329 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1330 ring->r_ptr * ring->itemsz, *avail * ring->itemsz,
1331 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1332 return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1333 }
1334
1335 /*
1336 * Let firmware know we read N descriptors.
1337 */
1338 void
1339 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1340 struct bwfm_pci_msgring *ring, int nitem)
1341 {
1342 ring->r_ptr += nitem;
1343 if (ring->r_ptr == ring->nitem)
1344 ring->r_ptr = 0;
1345 bwfm_pci_ring_write_rptr(sc, ring);
1346 }
1347
1348 /*
1349 * Let firmware know that we submitted some descriptors.
1350 */
1351 void
1352 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1353 struct bwfm_pci_msgring *ring)
1354 {
1355 bus_dmamap_sync(sc->sc_dmat, BWFM_PCI_DMA_MAP(ring->ring),
1356 0, BWFM_PCI_DMA_LEN(ring->ring), BUS_DMASYNC_PREREAD |
1357 BUS_DMASYNC_PREWRITE);
1358 bwfm_pci_ring_write_wptr(sc, ring);
1359 bwfm_pci_ring_bell(sc, ring);
1360 }
1361
1362 /*
1363 * Rollback N descriptors in case we don't actually want
1364 * to commit to it.
1365 */
1366 void
1367 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1368 struct bwfm_pci_msgring *ring, int nitem)
1369 {
1370 if (ring->w_ptr == 0)
1371 ring->w_ptr = ring->nitem - nitem;
1372 else
1373 ring->w_ptr -= nitem;
1374 }
1375
1376 /*
1377 * Foreach written descriptor on the ring, pass the descriptor to
1378 * a message handler and let the firmware know we handled it.
1379 */
1380 void
1381 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1382 {
1383 char *buf;
1384 int avail, processed;
1385
1386 again:
1387 buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1388 if (buf == NULL)
1389 return;
1390
1391 processed = 0;
1392 while (avail) {
1393 bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1394 buf += ring->itemsz;
1395 processed++;
1396 if (processed == 48) {
1397 bwfm_pci_ring_read_commit(sc, ring, processed);
1398 processed = 0;
1399 }
1400 avail--;
1401 }
1402 if (processed)
1403 bwfm_pci_ring_read_commit(sc, ring, processed);
1404 if (ring->r_ptr == 0)
1405 goto again;
1406 }
1407
1408 void
1409 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1410 {
1411 struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1412 struct msgbuf_ioctl_resp_hdr *resp;
1413 struct msgbuf_tx_status *tx;
1414 struct msgbuf_rx_complete *rx;
1415 struct msgbuf_rx_event *event;
1416 struct msgbuf_common_hdr *msg;
1417 struct msgbuf_flowring_create_resp *fcr;
1418 struct msgbuf_flowring_delete_resp *fdr;
1419 struct bwfm_pci_msgring *ring;
1420 struct mbuf *m;
1421 int flowid;
1422
1423 msg = (struct msgbuf_common_hdr *)buf;
1424 switch (msg->msgtype)
1425 {
1426 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1427 fcr = (struct msgbuf_flowring_create_resp *)buf;
1428 flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1429 if (flowid < 2)
1430 break;
1431 flowid -= 2;
1432 if (flowid >= sc->sc_max_flowrings)
1433 break;
1434 ring = &sc->sc_flowrings[flowid];
1435 if (ring->status != RING_OPENING)
1436 break;
1437 if (fcr->compl_hdr.status) {
1438 printf("%s: failed to open flowring %d\n",
1439 DEVNAME(sc), flowid);
1440 ring->status = RING_CLOSED;
1441 if (ring->m) {
1442 m_freem(ring->m);
1443 ring->m = NULL;
1444 }
1445 ifp->if_flags &= ~IFF_OACTIVE;
1446 ifp->if_start(ifp);
1447 break;
1448 }
1449 ring->status = RING_OPEN;
1450 if (ring->m != NULL) {
1451 m = ring->m;
1452 ring->m = NULL;
1453 if (bwfm_pci_txdata(&sc->sc_sc, &m))
1454 m_freem(ring->m);
1455 }
1456 ifp->if_flags &= ~IFF_OACTIVE;
1457 ifp->if_start(ifp);
1458 break;
1459 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1460 fdr = (struct msgbuf_flowring_delete_resp *)buf;
1461 flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1462 if (flowid < 2)
1463 break;
1464 flowid -= 2;
1465 if (flowid >= sc->sc_max_flowrings)
1466 break;
1467 ring = &sc->sc_flowrings[flowid];
1468 if (ring->status != RING_CLOSING)
1469 break;
1470 if (fdr->compl_hdr.status) {
1471 printf("%s: failed to delete flowring %d\n",
1472 DEVNAME(sc), flowid);
1473 break;
1474 }
1475 bwfm_pci_dmamem_free(sc, ring->ring);
1476 ring->status = RING_CLOSED;
1477 break;
1478 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1479 break;
1480 case MSGBUF_TYPE_IOCTL_CMPLT:
1481 resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1482 sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1483 sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1484 sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1485 if_rxr_put(&sc->sc_ioctl_ring, 1);
1486 bwfm_pci_fill_rx_rings(sc);
1487 wakeup(&sc->sc_ioctl_buf);
1488 break;
1489 case MSGBUF_TYPE_WL_EVENT:
1490 event = (struct msgbuf_rx_event *)buf;
1491 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1492 letoh32(event->msg.request_id));
1493 if (m == NULL)
1494 break;
1495 m_adj(m, sc->sc_rx_dataoffset);
1496 m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1497 bwfm_rx(&sc->sc_sc, m);
1498 if_rxr_put(&sc->sc_event_ring, 1);
1499 bwfm_pci_fill_rx_rings(sc);
1500 break;
1501 case MSGBUF_TYPE_TX_STATUS:
1502 tx = (struct msgbuf_tx_status *)buf;
1503 m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1504 letoh32(tx->msg.request_id));
1505 if (m == NULL)
1506 break;
1507 m_freem(m);
1508 if (sc->sc_tx_pkts_full) {
1509 sc->sc_tx_pkts_full = 0;
1510 ifp->if_flags &= ~IFF_OACTIVE;
1511 ifp->if_start(ifp);
1512 }
1513 break;
1514 case MSGBUF_TYPE_RX_CMPLT:
1515 rx = (struct msgbuf_rx_complete *)buf;
1516 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1517 letoh32(rx->msg.request_id));
1518 if (m == NULL)
1519 break;
1520 if (letoh16(rx->data_offset))
1521 m_adj(m, letoh16(rx->data_offset));
1522 else if (sc->sc_rx_dataoffset)
1523 m_adj(m, sc->sc_rx_dataoffset);
1524 m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1525 bwfm_rx(&sc->sc_sc, m);
1526 if_rxr_put(&sc->sc_rxbuf_ring, 1);
1527 bwfm_pci_fill_rx_rings(sc);
1528 break;
1529 default:
1530 printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1531 break;
1532 }
1533 }
1534
1535 /* Bus core helpers */
1536 void
1537 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1538 {
1539 struct bwfm_softc *bwfm = (void *)sc;
1540 struct bwfm_core *core;
1541
1542 core = bwfm_chip_get_core(bwfm, id);
1543 if (core == NULL) {
1544 printf("%s: could not find core to select", DEVNAME(sc));
1545 return;
1546 }
1547
1548 pci_conf_write(sc->sc_pc, sc->sc_tag,
1549 BWFM_PCI_BAR0_WINDOW, core->co_base);
1550 if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1551 BWFM_PCI_BAR0_WINDOW) != core->co_base)
1552 pci_conf_write(sc->sc_pc, sc->sc_tag,
1553 BWFM_PCI_BAR0_WINDOW, core->co_base);
1554 }
1555
1556 uint32_t
1557 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1558 {
1559 struct bwfm_pci_softc *sc = (void *)bwfm;
1560 uint32_t page, offset;
1561
1562 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1563 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1564 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1565 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1566 }
1567
1568 void
1569 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1570 {
1571 struct bwfm_pci_softc *sc = (void *)bwfm;
1572 uint32_t page, offset;
1573
1574 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1575 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1576 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1577 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1578 }
1579
1580 int
1581 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1582 {
1583 return 0;
1584 }
1585
1586 int
1587 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1588 {
1589 struct bwfm_pci_softc *sc = (void *)bwfm;
1590 struct bwfm_core *core;
1591 uint32_t reg;
1592 int i;
1593
1594 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1595 reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1596 BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1597 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1598 reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1599
1600 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1601 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1602 BWFM_CHIP_REG_WATCHDOG, 4);
1603 delay(100 * 1000);
1604
1605 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1606 pci_conf_write(sc->sc_pc, sc->sc_tag,
1607 BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1608
1609 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1610 if (core->co_rev <= 13) {
1611 uint16_t cfg_offset[] = {
1612 BWFM_PCI_CFGREG_STATUS_CMD,
1613 BWFM_PCI_CFGREG_PM_CSR,
1614 BWFM_PCI_CFGREG_MSI_CAP,
1615 BWFM_PCI_CFGREG_MSI_ADDR_L,
1616 BWFM_PCI_CFGREG_MSI_ADDR_H,
1617 BWFM_PCI_CFGREG_MSI_DATA,
1618 BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1619 BWFM_PCI_CFGREG_RBAR_CTRL,
1620 BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1621 BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1622 BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1623 };
1624
1625 for (i = 0; i < nitems(cfg_offset); i++) {
1626 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1627 BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1628 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1629 BWFM_PCI_PCIE2REG_CONFIGDATA);
1630 DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1631 DEVNAME(sc), cfg_offset[i], reg));
1632 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1633 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1634 }
1635 }
1636
1637 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1638 BWFM_PCI_PCIE2REG_MAILBOXINT);
1639 if (reg != 0xffffffff)
1640 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1641 BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1642
1643 return 0;
1644 }
1645
1646 void
1647 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1648 {
1649 struct bwfm_pci_softc *sc = (void *)bwfm;
1650 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1651 }
1652
1653 static int bwfm_pci_prio2fifo[8] = {
1654 1, /* best effort */
1655 0, /* IPTOS_PREC_IMMEDIATE */
1656 0, /* IPTOS_PREC_PRIORITY */
1657 1, /* IPTOS_PREC_FLASH */
1658 2, /* IPTOS_PREC_FLASHOVERRIDE */
1659 2, /* IPTOS_PREC_CRITIC_ECP */
1660 3, /* IPTOS_PREC_INTERNETCONTROL */
1661 3, /* IPTOS_PREC_NETCONTROL */
1662 };
1663
1664 int
1665 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1666 {
1667 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1668 uint8_t *da = mtod(m, uint8_t *);
1669 struct ether_header *eh;
1670 int flowid, prio, fifo;
1671 int i, found, ac;
1672
1673 /* No QoS for EAPOL frames. */
1674 eh = mtod(m, struct ether_header *);
1675 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1676 M_WME_GETAC(m) : WME_AC_BE;
1677
1678 prio = ac;
1679 fifo = bwfm_pci_prio2fifo[prio];
1680
1681 switch (ic->ic_opmode)
1682 {
1683 case IEEE80211_M_STA:
1684 flowid = fifo;
1685 break;
1686 #ifndef IEEE80211_STA_ONLY
1687 case IEEE80211_M_HOSTAP:
1688 if (ETHER_IS_MULTICAST(da))
1689 da = __UNCONST(etherbroadcastaddr);
1690 flowid = da[5] * 2 + fifo;
1691 break;
1692 #endif
1693 default:
1694 printf("%s: state not supported\n", DEVNAME(sc));
1695 return ENOBUFS;
1696 }
1697
1698 found = 0;
1699 flowid = flowid % sc->sc_max_flowrings;
1700 for (i = 0; i < sc->sc_max_flowrings; i++) {
1701 if (ic->ic_opmode == IEEE80211_M_STA &&
1702 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1703 sc->sc_flowrings[flowid].fifo == fifo) {
1704 found = 1;
1705 break;
1706 }
1707 #ifndef IEEE80211_STA_ONLY
1708 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1709 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1710 sc->sc_flowrings[flowid].fifo == fifo &&
1711 !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1712 found = 1;
1713 break;
1714 }
1715 #endif
1716 flowid = (flowid + 1) % sc->sc_max_flowrings;
1717 }
1718
1719 if (found)
1720 return flowid;
1721
1722 return -1;
1723 }
1724
1725 void
1726 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1727 {
1728 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1729 struct bwfm_cmd_flowring_create * cmd;
1730 uint8_t *da = mtod(m, uint8_t *);
1731 struct ether_header *eh;
1732 struct bwfm_pci_msgring *ring;
1733 int flowid, prio, fifo;
1734 int i, found, ac;
1735
1736 cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1737 if (__predict_false(cmd == NULL))
1738 return;
1739
1740 /* No QoS for EAPOL frames. */
1741 eh = mtod(m, struct ether_header *);
1742 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1743 M_WME_GETAC(m) : WME_AC_BE;
1744
1745 prio = ac;
1746 fifo = bwfm_pci_prio2fifo[prio];
1747
1748 switch (ic->ic_opmode)
1749 {
1750 case IEEE80211_M_STA:
1751 flowid = fifo;
1752 break;
1753 #ifndef IEEE80211_STA_ONLY
1754 case IEEE80211_M_HOSTAP:
1755 if (ETHER_IS_MULTICAST(da))
1756 da = __UNCONST(etherbroadcastaddr);
1757 flowid = da[5] * 2 + fifo;
1758 break;
1759 #endif
1760 default:
1761 printf("%s: state not supported\n", DEVNAME(sc));
1762 return;
1763 }
1764
1765 found = 0;
1766 flowid = flowid % sc->sc_max_flowrings;
1767 for (i = 0; i < sc->sc_max_flowrings; i++) {
1768 ring = &sc->sc_flowrings[flowid];
1769 if (ring->status == RING_CLOSED) {
1770 ring->status = RING_OPENING;
1771 found = 1;
1772 break;
1773 }
1774 flowid = (flowid + 1) % sc->sc_max_flowrings;
1775 }
1776
1777 /*
1778 * We cannot recover from that so far. Only a stop/init
1779 * cycle can revive this if it ever happens at all.
1780 */
1781 if (!found) {
1782 printf("%s: no flowring available\n", DEVNAME(sc));
1783 return;
1784 }
1785
1786 cmd->sc = sc;
1787 cmd->m = m;
1788 cmd->prio = prio;
1789 cmd->flowid = flowid;
1790 workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1791 }
1792
1793 void
1794 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1795 {
1796 struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1797 struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1798 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1799 struct msgbuf_tx_flowring_create_req *req;
1800 struct bwfm_pci_msgring *ring;
1801 uint8_t *da, *sa;
1802
1803 da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1804 sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1805
1806 ring = &sc->sc_flowrings[cmd->flowid];
1807 if (ring->status != RING_OPENING) {
1808 printf("%s: flowring not opening\n", DEVNAME(sc));
1809 return;
1810 }
1811
1812 if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1813 printf("%s: cannot setup flowring\n", DEVNAME(sc));
1814 return;
1815 }
1816
1817 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1818 if (req == NULL) {
1819 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1820 return;
1821 }
1822
1823 ring->status = RING_OPENING;
1824 ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1825 ring->m = cmd->m;
1826 memcpy(ring->mac, da, ETHER_ADDR_LEN);
1827 #ifndef IEEE80211_STA_ONLY
1828 if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1829 memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1830 #endif
1831
1832 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1833 req->msg.ifidx = 0;
1834 req->msg.request_id = 0;
1835 req->tid = bwfm_pci_prio2fifo[cmd->prio];
1836 req->flow_ring_id = letoh16(cmd->flowid + 2);
1837 memcpy(req->da, da, ETHER_ADDR_LEN);
1838 memcpy(req->sa, sa, ETHER_ADDR_LEN);
1839 req->flow_ring_addr.high_addr =
1840 letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1841 req->flow_ring_addr.low_addr =
1842 letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1843 req->max_items = letoh16(512);
1844 req->len_item = letoh16(48);
1845
1846 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1847 pool_put(&sc->sc_flowring_pool, cmd);
1848 }
1849
1850 void
1851 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1852 {
1853 struct msgbuf_tx_flowring_delete_req *req;
1854 struct bwfm_pci_msgring *ring;
1855
1856 ring = &sc->sc_flowrings[flowid];
1857 if (ring->status != RING_OPEN) {
1858 printf("%s: flowring not open\n", DEVNAME(sc));
1859 return;
1860 }
1861
1862 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1863 if (req == NULL) {
1864 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1865 return;
1866 }
1867
1868 ring->status = RING_CLOSING;
1869
1870 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1871 req->msg.ifidx = 0;
1872 req->msg.request_id = 0;
1873 req->flow_ring_id = letoh16(flowid + 2);
1874 req->reason = 0;
1875
1876 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1877 }
1878
1879 void
1880 bwfm_pci_stop(struct bwfm_softc *bwfm)
1881 {
1882 struct bwfm_pci_softc *sc = (void *)bwfm;
1883 struct bwfm_pci_msgring *ring;
1884 int i;
1885
1886 for (i = 0; i < sc->sc_max_flowrings; i++) {
1887 ring = &sc->sc_flowrings[i];
1888 if (ring->status == RING_OPEN)
1889 bwfm_pci_flowring_delete(sc, i);
1890 }
1891 }
1892
1893 int
1894 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1895 {
1896 struct bwfm_pci_softc *sc = (void *)bwfm;
1897 struct bwfm_pci_msgring *ring;
1898 int i;
1899
1900 /* If we are transitioning, we cannot send. */
1901 for (i = 0; i < sc->sc_max_flowrings; i++) {
1902 ring = &sc->sc_flowrings[i];
1903 if (ring->status == RING_OPENING)
1904 return ENOBUFS;
1905 }
1906
1907 if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1908 sc->sc_tx_pkts_full = 1;
1909 return ENOBUFS;
1910 }
1911
1912 return 0;
1913 }
1914
1915 int
1916 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1917 {
1918 struct bwfm_pci_softc *sc = (void *)bwfm;
1919 struct bwfm_pci_msgring *ring;
1920 struct msgbuf_tx_msghdr *tx;
1921 uint32_t pktid;
1922 paddr_t paddr;
1923 uint64_t devaddr;
1924 struct ether_header *eh;
1925 int flowid, ret, ac;
1926
1927 flowid = bwfm_pci_flowring_lookup(sc, *mp);
1928 if (flowid < 0) {
1929 /*
1930 * We cannot send the packet right now as there is
1931 * no flowring yet. The flowring will be created
1932 * asynchronously. While the ring is transitioning
1933 * the TX check will tell the upper layers that we
1934 * cannot send packets right now. When the flowring
1935 * is created the queue will be restarted and this
1936 * mbuf will be transmitted.
1937 */
1938 bwfm_pci_flowring_create(sc, *mp);
1939 return 0;
1940 }
1941
1942 ring = &sc->sc_flowrings[flowid];
1943 if (ring->status == RING_OPENING ||
1944 ring->status == RING_CLOSING) {
1945 printf("%s: tried to use a flow that was "
1946 "transitioning in status %d\n",
1947 DEVNAME(sc), ring->status);
1948 return ENOBUFS;
1949 }
1950
1951 tx = bwfm_pci_ring_write_reserve(sc, ring);
1952 if (tx == NULL)
1953 return ENOBUFS;
1954
1955 /* No QoS for EAPOL frames. */
1956 eh = mtod(*mp, struct ether_header *);
1957 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1958 M_WME_GETAC(*mp) : WME_AC_BE;
1959
1960 memset(tx, 0, sizeof(*tx));
1961 tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1962 tx->msg.ifidx = 0;
1963 tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1964 tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1965 tx->seg_cnt = 1;
1966 memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1967
1968 ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1969 if (ret) {
1970 if (ret == ENOBUFS) {
1971 printf("%s: no pktid available for TX\n",
1972 DEVNAME(sc));
1973 sc->sc_tx_pkts_full = 1;
1974 }
1975 bwfm_pci_ring_write_cancel(sc, ring, 1);
1976 return ret;
1977 }
1978 devaddr = paddr + ETHER_HDR_LEN;
1979
1980 tx->msg.request_id = htole32(pktid);
1981 tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1982 tx->data_buf_addr.high_addr = htole32(devaddr >> 32);
1983 tx->data_buf_addr.low_addr = htole32(devaddr & 0xffffffff);
1984
1985 bwfm_pci_ring_write_commit(sc, ring);
1986 return 0;
1987 }
1988
1989 #ifdef BWFM_DEBUG
1990 void
1991 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1992 {
1993 uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1994 sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1995
1996 if (newidx != sc->sc_console_readidx)
1997 DPRINTFN(3, ("BWFM CONSOLE: "));
1998 while (newidx != sc->sc_console_readidx) {
1999 uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
2000 sc->sc_console_buf_addr + sc->sc_console_readidx);
2001 sc->sc_console_readidx++;
2002 if (sc->sc_console_readidx == sc->sc_console_buf_size)
2003 sc->sc_console_readidx = 0;
2004 if (ch == '\r')
2005 continue;
2006 DPRINTFN(3, ("%c", ch));
2007 }
2008 }
2009 #endif
2010
2011 int
2012 bwfm_pci_intr(void *v)
2013 {
2014 struct bwfm_pci_softc *sc = (void *)v;
2015 uint32_t status;
2016
2017 if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2018 BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
2019 return 0;
2020
2021 bwfm_pci_intr_disable(sc);
2022 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2023 BWFM_PCI_PCIE2REG_MAILBOXINT, status);
2024
2025 if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2026 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
2027 printf("%s: handle MB data\n", __func__);
2028
2029 if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
2030 bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
2031 bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
2032 bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
2033 }
2034
2035 #ifdef BWFM_DEBUG
2036 bwfm_pci_debug_console(sc);
2037 #endif
2038
2039 bwfm_pci_intr_enable(sc);
2040 return 1;
2041 }
2042
2043 void
2044 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
2045 {
2046 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2047 BWFM_PCI_PCIE2REG_MAILBOXMASK,
2048 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2049 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2050 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2051 }
2052
2053 void
2054 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2055 {
2056 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2057 BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2058 }
2059
2060 /* Msgbuf protocol implementation */
2061 int
2062 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2063 int cmd, char *buf, size_t *len)
2064 {
2065 struct bwfm_pci_softc *sc = (void *)bwfm;
2066 struct msgbuf_ioctl_req_hdr *req;
2067 struct mbuf *m;
2068 size_t buflen;
2069 int s;
2070
2071 s = splnet();
2072 sc->sc_ioctl_resp_pktid = -1;
2073 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2074 if (req == NULL) {
2075 printf("%s: cannot reserve for write\n", DEVNAME(sc));
2076 splx(s);
2077 return 1;
2078 }
2079 req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2080 req->msg.ifidx = 0;
2081 req->msg.flags = 0;
2082 req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2083 req->cmd = htole32(cmd);
2084 req->output_buf_len = htole16(*len);
2085 req->trans_id = htole16(sc->sc_ioctl_reqid++);
2086
2087 buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2088 req->input_buf_len = htole16(buflen);
2089 req->req_buf_addr.high_addr =
2090 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2091 req->req_buf_addr.low_addr =
2092 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2093 if (buf)
2094 memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2095 else
2096 memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2097
2098 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2099 splx(s);
2100
2101 if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2102 printf("%s: timeout waiting for ioctl response\n",
2103 DEVNAME(sc));
2104 return 1;
2105 }
2106
2107 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2108 if (m == NULL)
2109 return 1;
2110
2111 *len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2112 if (buf)
2113 memcpy(buf, mtod(m, char *), *len);
2114 m_freem(m);
2115 splx(s);
2116
2117 return 0;
2118 }
2119
2120 int
2121 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2122 int cmd, char *buf, size_t len)
2123 {
2124 return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2125 }
2126