if_bwfm_pci.c revision 1.1.2.3 1 /* $NetBSD: if_bwfm_pci.c,v 1.1.2.3 2018/09/06 06:55:51 pgoyette Exp $ */
2 /* $OpenBSD: if_bwfm_pci.c,v 1.18 2018/02/08 05:00:38 patrick Exp $ */
3 /*
4 * Copyright (c) 2010-2016 Broadcom Corporation
5 * Copyright (c) 2017 Patrick Wildt <patrick (at) blueri.se>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/kernel.h>
24 #include <sys/kmem.h>
25 #include <sys/device.h>
26 #include <sys/pool.h>
27 #include <sys/workqueue.h>
28 #include <sys/socket.h>
29
30 #include <net/bpf.h>
31 #include <net/if.h>
32 #include <net/if_dl.h>
33 #include <net/if_ether.h>
34 #include <net/if_media.h>
35
36 #include <netinet/in.h>
37
38 #include <net80211/ieee80211_var.h>
39
40 #include <dev/firmload.h>
41
42 #include <dev/pci/pcireg.h>
43 #include <dev/pci/pcivar.h>
44 #include <dev/pci/pcidevs.h>
45
46 #include <dev/ic/bwfmvar.h>
47 #include <dev/ic/bwfmreg.h>
48 #include <dev/pci/if_bwfm_pci.h>
49
50 #define BWFM_DMA_D2H_SCRATCH_BUF_LEN 8
51 #define BWFM_DMA_D2H_RINGUPD_BUF_LEN 1024
52 #define BWFM_DMA_H2D_IOCTL_BUF_LEN ETHER_MAX_LEN
53
54 #define BWFM_NUM_TX_MSGRINGS 2
55 #define BWFM_NUM_RX_MSGRINGS 3
56
57 #define BWFM_NUM_TX_PKTIDS 2048
58 #define BWFM_NUM_RX_PKTIDS 1024
59
60 #define BWFM_NUM_TX_DESCS 1
61 #define BWFM_NUM_RX_DESCS 1
62
63 #ifdef BWFM_DEBUG
64 #define DPRINTF(x) do { if (bwfm_debug > 0) printf x; } while (0)
65 #define DPRINTFN(n, x) do { if (bwfm_debug >= (n)) printf x; } while (0)
66 static int bwfm_debug = 2;
67 #else
68 #define DPRINTF(x) do { ; } while (0)
69 #define DPRINTFN(n, x) do { ; } while (0)
70 #endif
71
72 #define DEVNAME(sc) device_xname((sc)->sc_sc.sc_dev)
73 #define letoh16 htole16
74 #define letoh32 htole32
75 #define nitems(x) __arraycount(x)
76
77 enum ring_status {
78 RING_CLOSED,
79 RING_CLOSING,
80 RING_OPEN,
81 RING_OPENING,
82 };
83
84 struct bwfm_pci_msgring {
85 uint32_t w_idx_addr;
86 uint32_t r_idx_addr;
87 uint32_t w_ptr;
88 uint32_t r_ptr;
89 int nitem;
90 int itemsz;
91 enum ring_status status;
92 struct bwfm_pci_dmamem *ring;
93 struct mbuf *m;
94
95 int fifo;
96 uint8_t mac[ETHER_ADDR_LEN];
97 };
98
99 struct bwfm_pci_buf {
100 bus_dmamap_t bb_map;
101 struct mbuf *bb_m;
102 };
103
104 struct bwfm_pci_pkts {
105 struct bwfm_pci_buf *pkts;
106 uint32_t npkt;
107 int last;
108 };
109
110 struct if_rxring {
111 u_int rxr_total;
112 u_int rxr_inuse;
113 };
114
115 struct bwfm_cmd_flowring_create {
116 struct work wq_cookie;
117 struct bwfm_pci_softc *sc;
118 struct mbuf *m;
119 int flowid;
120 int prio;
121 };
122
123 struct bwfm_pci_softc {
124 struct bwfm_softc sc_sc;
125 pci_chipset_tag_t sc_pc;
126 pcitag_t sc_tag;
127 pcireg_t sc_id;
128 void *sc_ih;
129 pci_intr_handle_t *sc_pihp;
130
131 bus_space_tag_t sc_reg_iot;
132 bus_space_handle_t sc_reg_ioh;
133 bus_size_t sc_reg_ios;
134
135 bus_space_tag_t sc_tcm_iot;
136 bus_space_handle_t sc_tcm_ioh;
137 bus_size_t sc_tcm_ios;
138
139 bus_dma_tag_t sc_dmat;
140
141 uint32_t sc_shared_address;
142 uint32_t sc_shared_flags;
143 uint8_t sc_shared_version;
144
145 uint8_t sc_dma_idx_sz;
146 struct bwfm_pci_dmamem *sc_dma_idx_buf;
147 size_t sc_dma_idx_bufsz;
148
149 uint16_t sc_max_rxbufpost;
150 uint32_t sc_rx_dataoffset;
151 uint32_t sc_htod_mb_data_addr;
152 uint32_t sc_dtoh_mb_data_addr;
153 uint32_t sc_ring_info_addr;
154
155 uint32_t sc_console_base_addr;
156 uint32_t sc_console_buf_addr;
157 uint32_t sc_console_buf_size;
158 uint32_t sc_console_readidx;
159
160 struct pool sc_flowring_pool;
161 struct workqueue *flowring_wq;
162
163 uint16_t sc_max_flowrings;
164 uint16_t sc_max_submissionrings;
165 uint16_t sc_max_completionrings;
166
167 struct bwfm_pci_msgring sc_ctrl_submit;
168 struct bwfm_pci_msgring sc_rxpost_submit;
169 struct bwfm_pci_msgring sc_ctrl_complete;
170 struct bwfm_pci_msgring sc_tx_complete;
171 struct bwfm_pci_msgring sc_rx_complete;
172 struct bwfm_pci_msgring *sc_flowrings;
173
174 struct bwfm_pci_dmamem *sc_scratch_buf;
175 struct bwfm_pci_dmamem *sc_ringupd_buf;
176
177 struct bwfm_pci_dmamem *sc_ioctl_buf;
178 int sc_ioctl_reqid;
179 uint32_t sc_ioctl_resp_pktid;
180 uint32_t sc_ioctl_resp_ret_len;
181 uint32_t sc_ioctl_resp_status;
182 int sc_ioctl_poll;
183
184 struct if_rxring sc_ioctl_ring;
185 struct if_rxring sc_event_ring;
186 struct if_rxring sc_rxbuf_ring;
187
188 struct bwfm_pci_pkts sc_rx_pkts;
189 struct bwfm_pci_pkts sc_tx_pkts;
190 int sc_tx_pkts_full;
191 };
192
193 struct bwfm_pci_dmamem {
194 bus_dmamap_t bdm_map;
195 bus_dma_segment_t bdm_seg;
196 size_t bdm_size;
197 char * bdm_kva;
198 };
199
200 #define BWFM_PCI_DMA_MAP(_bdm) ((_bdm)->bdm_map)
201 #define BWFM_PCI_DMA_LEN(_bdm) ((_bdm)->bdm_size)
202 #define BWFM_PCI_DMA_DVA(_bdm) ((_bdm)->bdm_map->dm_segs[0].ds_addr)
203 #define BWFM_PCI_DMA_KVA(_bdm) ((_bdm)->bdm_kva)
204
205 static u_int if_rxr_get(struct if_rxring *rxr, unsigned int max);
206 static void if_rxr_put(struct if_rxring *rxr, unsigned int n);
207 static void if_rxr_init(struct if_rxring *rxr, unsigned int lwm, unsigned int hwm);
208
209 int bwfm_pci_match(device_t parent, cfdata_t match, void *aux);
210 void bwfm_pci_attachhook(device_t);
211 void bwfm_pci_attach(device_t, device_t, void *);
212 int bwfm_pci_detach(device_t, int);
213
214 int bwfm_pci_intr(void *);
215 void bwfm_pci_intr_enable(struct bwfm_pci_softc *);
216 void bwfm_pci_intr_disable(struct bwfm_pci_softc *);
217 int bwfm_pci_load_microcode(struct bwfm_pci_softc *, const u_char *,
218 size_t);
219 void bwfm_pci_select_core(struct bwfm_pci_softc *, int );
220
221 struct bwfm_pci_dmamem *
222 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *, bus_size_t,
223 bus_size_t);
224 void bwfm_pci_dmamem_free(struct bwfm_pci_softc *, struct bwfm_pci_dmamem *);
225 int bwfm_pci_pktid_avail(struct bwfm_pci_softc *,
226 struct bwfm_pci_pkts *);
227 int bwfm_pci_pktid_new(struct bwfm_pci_softc *,
228 struct bwfm_pci_pkts *, struct mbuf **,
229 uint32_t *, paddr_t *);
230 struct mbuf * bwfm_pci_pktid_free(struct bwfm_pci_softc *,
231 struct bwfm_pci_pkts *, uint32_t);
232 void bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *,
233 struct if_rxring *, uint32_t);
234 void bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *);
235 void bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *);
236 int bwfm_pci_setup_ring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
237 int, size_t, uint32_t, uint32_t, int, uint32_t, uint32_t *);
238 int bwfm_pci_setup_flowring(struct bwfm_pci_softc *, struct bwfm_pci_msgring *,
239 int, size_t);
240
241 void bwfm_pci_ring_bell(struct bwfm_pci_softc *,
242 struct bwfm_pci_msgring *);
243 void bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *,
244 struct bwfm_pci_msgring *);
245 void bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *,
246 struct bwfm_pci_msgring *);
247 void bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *,
248 struct bwfm_pci_msgring *);
249 void bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *,
250 struct bwfm_pci_msgring *);
251 void * bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *,
252 struct bwfm_pci_msgring *);
253 void * bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *,
254 struct bwfm_pci_msgring *, int, int *);
255 void * bwfm_pci_ring_read_avail(struct bwfm_pci_softc *,
256 struct bwfm_pci_msgring *, int *);
257 void bwfm_pci_ring_read_commit(struct bwfm_pci_softc *,
258 struct bwfm_pci_msgring *, int);
259 void bwfm_pci_ring_write_commit(struct bwfm_pci_softc *,
260 struct bwfm_pci_msgring *);
261 void bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *,
262 struct bwfm_pci_msgring *, int);
263
264 void bwfm_pci_ring_rx(struct bwfm_pci_softc *,
265 struct bwfm_pci_msgring *);
266 void bwfm_pci_msg_rx(struct bwfm_pci_softc *, void *);
267
268 uint32_t bwfm_pci_buscore_read(struct bwfm_softc *, uint32_t);
269 void bwfm_pci_buscore_write(struct bwfm_softc *, uint32_t,
270 uint32_t);
271 int bwfm_pci_buscore_prepare(struct bwfm_softc *);
272 int bwfm_pci_buscore_reset(struct bwfm_softc *);
273 void bwfm_pci_buscore_activate(struct bwfm_softc *, const uint32_t);
274
275 int bwfm_pci_flowring_lookup(struct bwfm_pci_softc *,
276 struct mbuf *);
277 void bwfm_pci_flowring_create(struct bwfm_pci_softc *,
278 struct mbuf *);
279 void bwfm_pci_flowring_create_cb(struct work *, void *);
280 void bwfm_pci_flowring_delete(struct bwfm_pci_softc *, int);
281
282 void bwfm_pci_stop(struct bwfm_softc *);
283 int bwfm_pci_txcheck(struct bwfm_softc *);
284 int bwfm_pci_txdata(struct bwfm_softc *, struct mbuf **);
285
286 #ifdef BWFM_DEBUG
287 void bwfm_pci_debug_console(struct bwfm_pci_softc *);
288 #endif
289
290 int bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *, int,
291 int, char *, size_t *);
292 int bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *, int,
293 int, char *, size_t);
294
295 struct bwfm_buscore_ops bwfm_pci_buscore_ops = {
296 .bc_read = bwfm_pci_buscore_read,
297 .bc_write = bwfm_pci_buscore_write,
298 .bc_prepare = bwfm_pci_buscore_prepare,
299 .bc_reset = bwfm_pci_buscore_reset,
300 .bc_setup = NULL,
301 .bc_activate = bwfm_pci_buscore_activate,
302 };
303
304 struct bwfm_bus_ops bwfm_pci_bus_ops = {
305 .bs_init = NULL,
306 .bs_stop = bwfm_pci_stop,
307 .bs_txcheck = bwfm_pci_txcheck,
308 .bs_txdata = bwfm_pci_txdata,
309 .bs_txctl = NULL,
310 .bs_rxctl = NULL,
311 };
312
313 struct bwfm_proto_ops bwfm_pci_msgbuf_ops = {
314 .proto_query_dcmd = bwfm_pci_msgbuf_query_dcmd,
315 .proto_set_dcmd = bwfm_pci_msgbuf_set_dcmd,
316 };
317
318
319 CFATTACH_DECL_NEW(bwfm_pci, sizeof(struct bwfm_pci_softc),
320 bwfm_pci_match, bwfm_pci_attach, bwfm_pci_detach, NULL);
321
322 static const struct bwfm_pci_matchid {
323 pci_vendor_id_t bwfm_vendor;
324 pci_product_id_t bwfm_product;
325 } bwfm_pci_devices[] = {
326 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM43602 },
327 { PCI_VENDOR_BROADCOM, PCI_PRODUCT_BROADCOM_BCM4350 },
328 };
329
330 static struct mbuf *
331 MCLGETI(struct bwfm_pci_softc *sc __unused, int how,
332 struct ifnet *ifp __unused, u_int size)
333 {
334 struct mbuf *m;
335
336 MGETHDR(m, how, MT_DATA);
337 if (m == NULL)
338 return NULL;
339
340 MEXTMALLOC(m, size, how);
341 if ((m->m_flags & M_EXT) == 0) {
342 m_freem(m);
343 return NULL;
344 }
345 return m;
346 }
347
348 int
349 bwfm_pci_match(device_t parent, cfdata_t match, void *aux)
350 {
351 struct pci_attach_args *pa = aux;
352
353 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BROADCOM)
354 return 0;
355
356 for (size_t i = 0; i < __arraycount(bwfm_pci_devices); i++)
357 if (PCI_PRODUCT(pa->pa_id) == bwfm_pci_devices[i].bwfm_product)
358 return 1;
359
360 return 0;
361 }
362
363 void
364 bwfm_pci_attach(device_t parent, device_t self, void *aux)
365 {
366 struct bwfm_pci_softc *sc = device_private(self);
367 struct pci_attach_args *pa = (struct pci_attach_args *)aux;
368 const char *intrstr;
369 char intrbuf[PCI_INTRSTR_LEN];
370
371 sc->sc_sc.sc_dev = self;
372
373 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x00,
374 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_reg_iot, &sc->sc_reg_ioh,
375 NULL, &sc->sc_reg_ios)) {
376 printf(": can't map bar0\n");
377 return;
378 }
379
380 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x08,
381 PCI_MAPREG_MEM_TYPE_64BIT, 0, &sc->sc_tcm_iot, &sc->sc_tcm_ioh,
382 NULL, &sc->sc_tcm_ios)) {
383 printf(": can't map bar1\n");
384 goto bar0;
385 }
386
387 sc->sc_pc = pa->pa_pc;
388 sc->sc_tag = pa->pa_tag;
389 sc->sc_id = pa->pa_id;
390
391 if (pci_dma64_available(pa))
392 sc->sc_dmat = pa->pa_dmat64;
393 else
394 sc->sc_dmat = pa->pa_dmat;
395
396 /* Map and establish the interrupt. */
397 if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0) != 0) {
398 printf(": couldn't map interrupt\n");
399 goto bar1;
400 }
401 intrstr = pci_intr_string(pa->pa_pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf));
402
403 sc->sc_ih = pci_intr_establish(pa->pa_pc, sc->sc_pihp[0], IPL_NET,
404 bwfm_pci_intr, sc);
405 if (sc->sc_ih == NULL) {
406 printf(": couldn't establish interrupt");
407 if (intrstr != NULL)
408 printf(" at %s", intrstr);
409 printf("\n");
410 goto bar1;
411 }
412 printf(": %s\n", intrstr);
413
414 config_mountroot(self, bwfm_pci_attachhook);
415 return;
416
417 bar1:
418 bus_space_unmap(sc->sc_tcm_iot, sc->sc_tcm_ioh, sc->sc_tcm_ios);
419 bar0:
420 bus_space_unmap(sc->sc_reg_iot, sc->sc_reg_ioh, sc->sc_reg_ios);
421 }
422
423 void
424 bwfm_pci_attachhook(device_t self)
425 {
426 struct bwfm_pci_softc *sc = device_private(self);
427 struct bwfm_softc *bwfm = (void *)sc;
428 struct bwfm_pci_ringinfo ringinfo;
429 const char *name = NULL;
430 firmware_handle_t fwh;
431 u_char *ucode; size_t size;
432 uint32_t d2h_w_idx_ptr, d2h_r_idx_ptr;
433 uint32_t h2d_w_idx_ptr, h2d_r_idx_ptr;
434 uint32_t idx_offset, reg;
435 int i;
436 int error;
437
438 sc->sc_sc.sc_buscore_ops = &bwfm_pci_buscore_ops;
439 if (bwfm_chip_attach(&sc->sc_sc) != 0) {
440 printf("%s: cannot attach chip\n", DEVNAME(sc));
441 return;
442 }
443
444 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
445 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
446 BWFM_PCI_PCIE2REG_CONFIGADDR, 0x4e0);
447 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
448 BWFM_PCI_PCIE2REG_CONFIGDATA);
449 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
450 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
451
452 switch (bwfm->sc_chip.ch_chip)
453 {
454 case BRCM_CC_4350_CHIP_ID:
455 if (bwfm->sc_chip.ch_chiprev > 7)
456 name = "brcmfmac4350-pcie.bin";
457 else
458 name = "brcmfmac4350c2-pcie.bin";
459 break;
460 case BRCM_CC_43602_CHIP_ID:
461 name = "brcmfmac43602-pcie.bin";
462 break;
463 default:
464 printf("%s: unknown firmware for chip %s\n",
465 DEVNAME(sc), bwfm->sc_chip.ch_name);
466 return;
467 }
468
469 if (firmware_open("if_bwfm", name, &fwh) != 0) {
470 printf("%s: failed firmware_open of file %s\n",
471 DEVNAME(sc), name);
472 return;
473 }
474 size = firmware_get_size(fwh);
475 ucode = firmware_malloc(size);
476 if (ucode == NULL) {
477 printf("%s: failed to allocate firmware memory\n",
478 DEVNAME(sc));
479 firmware_close(fwh);
480 return;
481 }
482 error = firmware_read(fwh, 0, ucode, size);
483 firmware_close(fwh);
484 if (error != 0) {
485 printf("%s: failed to read firmware (error %d)\n",
486 DEVNAME(sc), error);
487 firmware_free(ucode, size);
488 return;
489 }
490
491 /* Retrieve RAM size from firmware. */
492 if (size >= BWFM_RAMSIZE + 8) {
493 uint32_t *ramsize = (uint32_t *)&ucode[BWFM_RAMSIZE];
494 if (letoh32(ramsize[0]) == BWFM_RAMSIZE_MAGIC)
495 bwfm->sc_chip.ch_ramsize = letoh32(ramsize[1]);
496 }
497
498 if (bwfm_pci_load_microcode(sc, ucode, size) != 0) {
499 printf("%s: could not load microcode\n",
500 DEVNAME(sc));
501 kmem_free(ucode, size);
502 return;
503 }
504
505 firmware_free(ucode, size);
506
507 sc->sc_shared_flags = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
508 sc->sc_shared_address + BWFM_SHARED_INFO);
509 sc->sc_shared_version = sc->sc_shared_flags;
510 if (sc->sc_shared_version > BWFM_SHARED_INFO_MAX_VERSION ||
511 sc->sc_shared_version < BWFM_SHARED_INFO_MIN_VERSION) {
512 printf("%s: PCIe version %d unsupported\n",
513 DEVNAME(sc), sc->sc_shared_version);
514 return;
515 }
516
517 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_INDEX) {
518 if (sc->sc_shared_flags & BWFM_SHARED_INFO_DMA_2B_IDX)
519 sc->sc_dma_idx_sz = sizeof(uint16_t);
520 else
521 sc->sc_dma_idx_sz = sizeof(uint32_t);
522 }
523
524 /* Maximum RX data buffers in the ring. */
525 sc->sc_max_rxbufpost = bus_space_read_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
526 sc->sc_shared_address + BWFM_SHARED_MAX_RXBUFPOST);
527 if (sc->sc_max_rxbufpost == 0)
528 sc->sc_max_rxbufpost = BWFM_SHARED_MAX_RXBUFPOST_DEFAULT;
529
530 /* Alternative offset of data in a packet */
531 sc->sc_rx_dataoffset = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
532 sc->sc_shared_address + BWFM_SHARED_RX_DATAOFFSET);
533
534 /* For Power Management */
535 sc->sc_htod_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
536 sc->sc_shared_address + BWFM_SHARED_HTOD_MB_DATA_ADDR);
537 sc->sc_dtoh_mb_data_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
538 sc->sc_shared_address + BWFM_SHARED_DTOH_MB_DATA_ADDR);
539
540 /* Ring information */
541 sc->sc_ring_info_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
542 sc->sc_shared_address + BWFM_SHARED_RING_INFO_ADDR);
543
544 /* Firmware's "dmesg" */
545 sc->sc_console_base_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
546 sc->sc_shared_address + BWFM_SHARED_CONSOLE_ADDR);
547 sc->sc_console_buf_addr = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
548 sc->sc_console_base_addr + BWFM_CONSOLE_BUFADDR);
549 sc->sc_console_buf_size = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
550 sc->sc_console_base_addr + BWFM_CONSOLE_BUFSIZE);
551
552 /* Read ring information. */
553 bus_space_read_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
554 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
555
556 if (sc->sc_shared_version >= 6) {
557 sc->sc_max_submissionrings = le16toh(ringinfo.max_submissionrings);
558 sc->sc_max_flowrings = le16toh(ringinfo.max_flowrings);
559 sc->sc_max_completionrings = le16toh(ringinfo.max_completionrings);
560 } else {
561 sc->sc_max_submissionrings = le16toh(ringinfo.max_flowrings);
562 sc->sc_max_flowrings = sc->sc_max_submissionrings -
563 BWFM_NUM_TX_MSGRINGS;
564 sc->sc_max_completionrings = BWFM_NUM_RX_MSGRINGS;
565 }
566
567 if (sc->sc_dma_idx_sz == 0) {
568 d2h_w_idx_ptr = letoh32(ringinfo.d2h_w_idx_ptr);
569 d2h_r_idx_ptr = letoh32(ringinfo.d2h_r_idx_ptr);
570 h2d_w_idx_ptr = letoh32(ringinfo.h2d_w_idx_ptr);
571 h2d_r_idx_ptr = letoh32(ringinfo.h2d_r_idx_ptr);
572 idx_offset = sizeof(uint32_t);
573 } else {
574 uint64_t address;
575
576 /* Each TX/RX Ring has a Read and Write Ptr */
577 sc->sc_dma_idx_bufsz = (sc->sc_max_submissionrings +
578 sc->sc_max_completionrings) * sc->sc_dma_idx_sz * 2;
579 sc->sc_dma_idx_buf = bwfm_pci_dmamem_alloc(sc,
580 sc->sc_dma_idx_bufsz, 8);
581 if (sc->sc_dma_idx_buf == NULL) {
582 /* XXX: Fallback to TCM? */
583 printf("%s: cannot allocate idx buf\n",
584 DEVNAME(sc));
585 return;
586 }
587
588 idx_offset = sc->sc_dma_idx_sz;
589 h2d_w_idx_ptr = 0;
590 address = BWFM_PCI_DMA_DVA(sc->sc_dma_idx_buf);
591 ringinfo.h2d_w_idx_hostaddr_low =
592 htole32(address & 0xffffffff);
593 ringinfo.h2d_w_idx_hostaddr_high =
594 htole32(address >> 32);
595
596 h2d_r_idx_ptr = h2d_w_idx_ptr +
597 sc->sc_max_submissionrings * idx_offset;
598 address += sc->sc_max_submissionrings * idx_offset;
599 ringinfo.h2d_r_idx_hostaddr_low =
600 htole32(address & 0xffffffff);
601 ringinfo.h2d_r_idx_hostaddr_high =
602 htole32(address >> 32);
603
604 d2h_w_idx_ptr = h2d_r_idx_ptr +
605 sc->sc_max_submissionrings * idx_offset;
606 address += sc->sc_max_submissionrings * idx_offset;
607 ringinfo.d2h_w_idx_hostaddr_low =
608 htole32(address & 0xffffffff);
609 ringinfo.d2h_w_idx_hostaddr_high =
610 htole32(address >> 32);
611
612 d2h_r_idx_ptr = d2h_w_idx_ptr +
613 sc->sc_max_completionrings * idx_offset;
614 address += sc->sc_max_completionrings * idx_offset;
615 ringinfo.d2h_r_idx_hostaddr_low =
616 htole32(address & 0xffffffff);
617 ringinfo.d2h_r_idx_hostaddr_high =
618 htole32(address >> 32);
619
620 bus_space_write_region_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
621 sc->sc_ring_info_addr, (void *)&ringinfo, sizeof(ringinfo));
622 }
623
624 uint32_t ring_mem_ptr = letoh32(ringinfo.ringmem);
625 /* TX ctrl ring: Send ctrl buffers, send IOCTLs */
626 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_submit, 64, 40,
627 h2d_w_idx_ptr, h2d_r_idx_ptr, 0, idx_offset,
628 &ring_mem_ptr))
629 goto cleanup;
630 /* TX rxpost ring: Send clean data mbufs for RX */
631 if (bwfm_pci_setup_ring(sc, &sc->sc_rxpost_submit, 512, 32,
632 h2d_w_idx_ptr, h2d_r_idx_ptr, 1, idx_offset,
633 &ring_mem_ptr))
634 goto cleanup;
635 /* RX completion rings: recv our filled buffers back */
636 if (bwfm_pci_setup_ring(sc, &sc->sc_ctrl_complete, 64, 24,
637 d2h_w_idx_ptr, d2h_r_idx_ptr, 0, idx_offset,
638 &ring_mem_ptr))
639 goto cleanup;
640 if (bwfm_pci_setup_ring(sc, &sc->sc_tx_complete, 1024, 16,
641 d2h_w_idx_ptr, d2h_r_idx_ptr, 1, idx_offset,
642 &ring_mem_ptr))
643 goto cleanup;
644 if (bwfm_pci_setup_ring(sc, &sc->sc_rx_complete, 512, 32,
645 d2h_w_idx_ptr, d2h_r_idx_ptr, 2, idx_offset,
646 &ring_mem_ptr))
647 goto cleanup;
648
649 /* Dynamic TX rings for actual data */
650 sc->sc_flowrings = kmem_zalloc(sc->sc_max_flowrings *
651 sizeof(struct bwfm_pci_msgring), KM_SLEEP);
652 for (i = 0; i < sc->sc_max_flowrings; i++) {
653 struct bwfm_pci_msgring *ring = &sc->sc_flowrings[i];
654 ring->w_idx_addr = h2d_w_idx_ptr + (i + 2) * idx_offset;
655 ring->r_idx_addr = h2d_r_idx_ptr + (i + 2) * idx_offset;
656 }
657
658 pool_init(&sc->sc_flowring_pool, sizeof(struct bwfm_cmd_flowring_create),
659 0, 0, 0, "bwfmpl", NULL, IPL_NET);
660
661 /* Scratch and ring update buffers for firmware */
662 if ((sc->sc_scratch_buf = bwfm_pci_dmamem_alloc(sc,
663 BWFM_DMA_D2H_SCRATCH_BUF_LEN, 8)) == NULL)
664 goto cleanup;
665 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
666 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_LOW,
667 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) & 0xffffffff);
668 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
669 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_ADDR_HIGH,
670 BWFM_PCI_DMA_DVA(sc->sc_scratch_buf) >> 32);
671 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
672 sc->sc_shared_address + BWFM_SHARED_DMA_SCRATCH_LEN,
673 BWFM_DMA_D2H_SCRATCH_BUF_LEN);
674
675 if ((sc->sc_ringupd_buf = bwfm_pci_dmamem_alloc(sc,
676 BWFM_DMA_D2H_RINGUPD_BUF_LEN, 8)) == NULL)
677 goto cleanup;
678 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
679 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_LOW,
680 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) & 0xffffffff);
681 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
682 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_ADDR_HIGH,
683 BWFM_PCI_DMA_DVA(sc->sc_ringupd_buf) >> 32);
684 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
685 sc->sc_shared_address + BWFM_SHARED_DMA_RINGUPD_LEN,
686 BWFM_DMA_D2H_RINGUPD_BUF_LEN);
687
688 if ((sc->sc_ioctl_buf = bwfm_pci_dmamem_alloc(sc,
689 BWFM_DMA_H2D_IOCTL_BUF_LEN, 8)) == NULL)
690 goto cleanup;
691
692 if (workqueue_create(&sc->flowring_wq, "bwfmflow",
693 bwfm_pci_flowring_create_cb, sc, PRI_SOFTNET, IPL_NET, 0))
694 goto cleanup;
695
696 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
697 bwfm_pci_intr_enable(sc);
698
699 /* Maps RX mbufs to a packet id and back. */
700 sc->sc_rx_pkts.npkt = BWFM_NUM_RX_PKTIDS;
701 sc->sc_rx_pkts.pkts = kmem_zalloc(BWFM_NUM_RX_PKTIDS *
702 sizeof(struct bwfm_pci_buf), KM_SLEEP);
703 for (i = 0; i < BWFM_NUM_RX_PKTIDS; i++)
704 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
705 BWFM_NUM_RX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
706 &sc->sc_rx_pkts.pkts[i].bb_map);
707
708 /* Maps TX mbufs to a packet id and back. */
709 sc->sc_tx_pkts.npkt = BWFM_NUM_TX_PKTIDS;
710 sc->sc_tx_pkts.pkts = kmem_zalloc(BWFM_NUM_TX_PKTIDS
711 * sizeof(struct bwfm_pci_buf), KM_SLEEP);
712 for (i = 0; i < BWFM_NUM_TX_PKTIDS; i++)
713 bus_dmamap_create(sc->sc_dmat, MSGBUF_MAX_PKT_SIZE,
714 BWFM_NUM_TX_DESCS, MSGBUF_MAX_PKT_SIZE, 0, BUS_DMA_WAITOK,
715 &sc->sc_tx_pkts.pkts[i].bb_map);
716
717 /*
718 * For whatever reason, could also be a bug somewhere in this
719 * driver, the firmware needs a bunch of RX buffers otherwise
720 * it won't send any RX complete messages. 64 buffers don't
721 * suffice, but 128 buffers are enough.
722 */
723 if_rxr_init(&sc->sc_rxbuf_ring, 128, sc->sc_max_rxbufpost);
724 if_rxr_init(&sc->sc_ioctl_ring, 8, 8);
725 if_rxr_init(&sc->sc_event_ring, 8, 8);
726 bwfm_pci_fill_rx_rings(sc);
727
728
729 #ifdef BWFM_DEBUG
730 sc->sc_console_readidx = 0;
731 bwfm_pci_debug_console(sc);
732 #endif
733
734 sc->sc_ioctl_poll = 1;
735 sc->sc_sc.sc_bus_ops = &bwfm_pci_bus_ops;
736 sc->sc_sc.sc_proto_ops = &bwfm_pci_msgbuf_ops;
737 bwfm_attach(&sc->sc_sc);
738 sc->sc_ioctl_poll = 0;
739 return;
740
741 cleanup:
742 if (sc->flowring_wq != NULL)
743 workqueue_destroy(sc->flowring_wq);
744 if (sc->sc_ih != NULL) {
745 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
746 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
747 }
748 if (sc->sc_ioctl_buf)
749 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
750 if (sc->sc_ringupd_buf)
751 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
752 if (sc->sc_scratch_buf)
753 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
754 if (sc->sc_rx_complete.ring)
755 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
756 if (sc->sc_tx_complete.ring)
757 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
758 if (sc->sc_ctrl_complete.ring)
759 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
760 if (sc->sc_rxpost_submit.ring)
761 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
762 if (sc->sc_ctrl_submit.ring)
763 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
764 if (sc->sc_dma_idx_buf)
765 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
766 }
767
768 int
769 bwfm_pci_load_microcode(struct bwfm_pci_softc *sc, const u_char *ucode, size_t size)
770 {
771 struct bwfm_softc *bwfm = (void *)sc;
772 struct bwfm_core *core;
773 uint32_t shared;
774 int i;
775
776 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
777 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_ARM_CR4);
778 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
779 BWFM_PCI_ARMCR4REG_BANKIDX, 5);
780 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
781 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
782 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
783 BWFM_PCI_ARMCR4REG_BANKIDX, 7);
784 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
785 BWFM_PCI_ARMCR4REG_BANKPDA, 0);
786 }
787
788 for (i = 0; i < size; i++)
789 bus_space_write_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
790 bwfm->sc_chip.ch_rambase + i, ucode[i]);
791
792 /* Firmware replaces this with a pointer once up. */
793 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
794 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4, 0);
795
796 /* TODO: restore NVRAM */
797
798 /* Load reset vector from firmware and kickstart core. */
799 if (bwfm->sc_chip.ch_chip == BRCM_CC_43602_CHIP_ID) {
800 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_INTERNAL_MEM);
801 bwfm->sc_chip.ch_core_reset(bwfm, core, 0, 0, 0);
802 }
803 bwfm_chip_set_active(bwfm, *(const uint32_t *)ucode);
804
805 for (i = 0; i < 40; i++) {
806 delay(50 * 1000);
807 shared = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
808 bwfm->sc_chip.ch_rambase + bwfm->sc_chip.ch_ramsize - 4);
809 if (shared)
810 break;
811 }
812 if (!shared) {
813 printf("%s: firmware did not come up\n", DEVNAME(sc));
814 return 1;
815 }
816
817 sc->sc_shared_address = shared;
818 return 0;
819 }
820
821 int
822 bwfm_pci_detach(device_t self, int flags)
823 {
824 struct bwfm_pci_softc *sc = device_private(self);
825
826 bwfm_detach(&sc->sc_sc, flags);
827
828 /* FIXME: free RX buffers */
829 /* FIXME: free TX buffers */
830 /* FIXME: free more memory */
831
832 kmem_free(sc->sc_flowrings, sc->sc_max_flowrings
833 * sizeof(struct bwfm_pci_msgring));
834 pool_destroy(&sc->sc_flowring_pool);
835
836 workqueue_destroy(sc->flowring_wq);
837 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
838 pci_intr_release(sc->sc_pc, sc->sc_pihp, 1);
839 bwfm_pci_dmamem_free(sc, sc->sc_ioctl_buf);
840 bwfm_pci_dmamem_free(sc, sc->sc_ringupd_buf);
841 bwfm_pci_dmamem_free(sc, sc->sc_scratch_buf);
842 bwfm_pci_dmamem_free(sc, sc->sc_rx_complete.ring);
843 bwfm_pci_dmamem_free(sc, sc->sc_tx_complete.ring);
844 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_complete.ring);
845 bwfm_pci_dmamem_free(sc, sc->sc_rxpost_submit.ring);
846 bwfm_pci_dmamem_free(sc, sc->sc_ctrl_submit.ring);
847 bwfm_pci_dmamem_free(sc, sc->sc_dma_idx_buf);
848 return 0;
849 }
850
851 /* DMA code */
852 struct bwfm_pci_dmamem *
853 bwfm_pci_dmamem_alloc(struct bwfm_pci_softc *sc, bus_size_t size, bus_size_t align)
854 {
855 struct bwfm_pci_dmamem *bdm;
856 int nsegs;
857
858 bdm = kmem_zalloc(sizeof(*bdm), KM_SLEEP);
859 bdm->bdm_size = size;
860
861 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
862 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, &bdm->bdm_map) != 0)
863 goto bdmfree;
864
865 if (bus_dmamem_alloc(sc->sc_dmat, size, align, 0, &bdm->bdm_seg, 1,
866 &nsegs, BUS_DMA_WAITOK) != 0)
867 goto destroy;
868
869 if (bus_dmamem_map(sc->sc_dmat, &bdm->bdm_seg, nsegs, size,
870 (void **) &bdm->bdm_kva, BUS_DMA_WAITOK | BUS_DMA_COHERENT) != 0)
871 goto free;
872
873 if (bus_dmamap_load(sc->sc_dmat, bdm->bdm_map, bdm->bdm_kva, size,
874 NULL, BUS_DMA_WAITOK) != 0)
875 goto unmap;
876
877 bzero(bdm->bdm_kva, size);
878
879 return (bdm);
880
881 unmap:
882 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, size);
883 free:
884 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
885 destroy:
886 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
887 bdmfree:
888 kmem_free(bdm, sizeof(*bdm));
889
890 return (NULL);
891 }
892
893 void
894 bwfm_pci_dmamem_free(struct bwfm_pci_softc *sc, struct bwfm_pci_dmamem *bdm)
895 {
896 bus_dmamem_unmap(sc->sc_dmat, bdm->bdm_kva, bdm->bdm_size);
897 bus_dmamem_free(sc->sc_dmat, &bdm->bdm_seg, 1);
898 bus_dmamap_destroy(sc->sc_dmat, bdm->bdm_map);
899 kmem_free(bdm, sizeof(*bdm));
900 }
901
902 /*
903 * We need a simple mapping from a packet ID to mbufs, because when
904 * a transfer completed, we only know the ID so we have to look up
905 * the memory for the ID. This simply looks for an empty slot.
906 */
907 int
908 bwfm_pci_pktid_avail(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts)
909 {
910 int i, idx;
911
912 idx = pkts->last + 1;
913 for (i = 0; i < pkts->npkt; i++) {
914 if (idx == pkts->npkt)
915 idx = 0;
916 if (pkts->pkts[idx].bb_m == NULL)
917 return 0;
918 idx++;
919 }
920 return ENOBUFS;
921 }
922
923 int
924 bwfm_pci_pktid_new(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
925 struct mbuf **mp, uint32_t *pktid, paddr_t *paddr)
926 {
927 int i, idx;
928
929 idx = pkts->last + 1;
930 for (i = 0; i < pkts->npkt; i++) {
931 if (idx == pkts->npkt)
932 idx = 0;
933 if (pkts->pkts[idx].bb_m == NULL) {
934 if (bus_dmamap_load_mbuf(sc->sc_dmat,
935 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0) {
936 /*
937 * Didn't fit. Maybe it has too many
938 * segments. If it has only one
939 * segment, fail; otherwise try to
940 * compact it into a single mbuf
941 * segment.
942 */
943 if ((*mp)->m_next == NULL)
944 return ENOBUFS;
945 struct mbuf *m0 = MCLGETI(NULL, M_DONTWAIT,
946 NULL, MSGBUF_MAX_PKT_SIZE);
947 if (m0 == NULL)
948 return ENOBUFS;
949 m_copydata(*mp, 0, (*mp)->m_pkthdr.len,
950 mtod(m0, void *));
951 m0->m_pkthdr.len = m0->m_len =
952 (*mp)->m_pkthdr.len;
953 m_freem(*mp);
954 *mp = m0;
955 if (bus_dmamap_load_mbuf(sc->sc_dmat,
956 pkts->pkts[idx].bb_map, *mp, BUS_DMA_NOWAIT) != 0)
957 return EFBIG;
958 }
959 pkts->last = idx;
960 pkts->pkts[idx].bb_m = *mp;
961 *pktid = idx;
962 *paddr = pkts->pkts[idx].bb_map->dm_segs[0].ds_addr;
963 return 0;
964 }
965 idx++;
966 }
967 return ENOBUFS;
968 }
969
970 struct mbuf *
971 bwfm_pci_pktid_free(struct bwfm_pci_softc *sc, struct bwfm_pci_pkts *pkts,
972 uint32_t pktid)
973 {
974 struct mbuf *m;
975
976 if (pktid >= pkts->npkt || pkts->pkts[pktid].bb_m == NULL)
977 return NULL;
978 bus_dmamap_unload(sc->sc_dmat, pkts->pkts[pktid].bb_map);
979 m = pkts->pkts[pktid].bb_m;
980 pkts->pkts[pktid].bb_m = NULL;
981 return m;
982 }
983
984 void
985 bwfm_pci_fill_rx_rings(struct bwfm_pci_softc *sc)
986 {
987 bwfm_pci_fill_rx_buf_ring(sc);
988 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_ioctl_ring,
989 MSGBUF_TYPE_IOCTLRESP_BUF_POST);
990 bwfm_pci_fill_rx_ioctl_ring(sc, &sc->sc_event_ring,
991 MSGBUF_TYPE_EVENT_BUF_POST);
992 }
993
994 void
995 bwfm_pci_fill_rx_ioctl_ring(struct bwfm_pci_softc *sc, struct if_rxring *rxring,
996 uint32_t msgtype)
997 {
998 struct msgbuf_rx_ioctl_resp_or_event *req;
999 struct mbuf *m;
1000 uint32_t pktid;
1001 paddr_t paddr;
1002 int s, slots;
1003
1004 s = splnet();
1005 for (slots = if_rxr_get(rxring, 8); slots > 0; slots--) {
1006 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1007 break;
1008 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1009 if (req == NULL)
1010 break;
1011 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1012 if (m == NULL) {
1013 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1014 break;
1015 }
1016 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1017 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1018 bwfm_pci_ring_write_cancel(sc, &sc->sc_ctrl_submit, 1);
1019 m_freem(m);
1020 break;
1021 }
1022 memset(req, 0, sizeof(*req));
1023 req->msg.msgtype = msgtype;
1024 req->msg.request_id = htole32(pktid);
1025 req->host_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1026 req->host_buf_addr.high_addr = htole32(paddr >> 32);
1027 req->host_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1028 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1029 }
1030 if_rxr_put(rxring, slots);
1031 splx(s);
1032 }
1033
1034 void
1035 bwfm_pci_fill_rx_buf_ring(struct bwfm_pci_softc *sc)
1036 {
1037 struct msgbuf_rx_bufpost *req;
1038 struct mbuf *m;
1039 uint32_t pktid;
1040 paddr_t paddr;
1041 int s, slots;
1042
1043 s = splnet();
1044 for (slots = if_rxr_get(&sc->sc_rxbuf_ring, sc->sc_max_rxbufpost);
1045 slots > 0; slots--) {
1046 if (bwfm_pci_pktid_avail(sc, &sc->sc_rx_pkts))
1047 break;
1048 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_rxpost_submit);
1049 if (req == NULL)
1050 break;
1051 m = MCLGETI(NULL, M_DONTWAIT, NULL, MSGBUF_MAX_PKT_SIZE);
1052 if (m == NULL) {
1053 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1054 break;
1055 }
1056 m->m_len = m->m_pkthdr.len = MSGBUF_MAX_PKT_SIZE;
1057 if (bwfm_pci_pktid_new(sc, &sc->sc_rx_pkts, &m, &pktid, &paddr)) {
1058 bwfm_pci_ring_write_cancel(sc, &sc->sc_rxpost_submit, 1);
1059 m_freem(m);
1060 break;
1061 }
1062 memset(req, 0, sizeof(*req));
1063 req->msg.msgtype = MSGBUF_TYPE_RXBUF_POST;
1064 req->msg.request_id = htole32(pktid);
1065 req->data_buf_len = htole16(MSGBUF_MAX_PKT_SIZE);
1066 req->data_buf_addr.high_addr = htole32(paddr >> 32);
1067 req->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1068 bwfm_pci_ring_write_commit(sc, &sc->sc_rxpost_submit);
1069 }
1070 if_rxr_put(&sc->sc_rxbuf_ring, slots);
1071 splx(s);
1072 }
1073
1074 int
1075 bwfm_pci_setup_ring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1076 int nitem, size_t itemsz, uint32_t w_idx, uint32_t r_idx,
1077 int idx, uint32_t idx_off, uint32_t *ring_mem)
1078 {
1079 ring->w_idx_addr = w_idx + idx * idx_off;
1080 ring->r_idx_addr = r_idx + idx * idx_off;
1081 ring->nitem = nitem;
1082 ring->itemsz = itemsz;
1083 bwfm_pci_ring_write_rptr(sc, ring);
1084 bwfm_pci_ring_write_wptr(sc, ring);
1085
1086 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1087 if (ring->ring == NULL)
1088 return ENOMEM;
1089 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1090 *ring_mem + BWFM_RING_MEM_BASE_ADDR_LOW,
1091 BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1092 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1093 *ring_mem + BWFM_RING_MEM_BASE_ADDR_HIGH,
1094 BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1095 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1096 *ring_mem + BWFM_RING_MAX_ITEM, nitem);
1097 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1098 *ring_mem + BWFM_RING_LEN_ITEMS, itemsz);
1099 *ring_mem = *ring_mem + BWFM_RING_MEM_SZ;
1100 return 0;
1101 }
1102
1103 int
1104 bwfm_pci_setup_flowring(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring,
1105 int nitem, size_t itemsz)
1106 {
1107 ring->w_ptr = 0;
1108 ring->r_ptr = 0;
1109 ring->nitem = nitem;
1110 ring->itemsz = itemsz;
1111 bwfm_pci_ring_write_rptr(sc, ring);
1112 bwfm_pci_ring_write_wptr(sc, ring);
1113
1114 ring->ring = bwfm_pci_dmamem_alloc(sc, nitem * itemsz, 8);
1115 if (ring->ring == NULL)
1116 return ENOMEM;
1117 return 0;
1118 }
1119
1120 /* Ring helpers */
1121 void
1122 bwfm_pci_ring_bell(struct bwfm_pci_softc *sc,
1123 struct bwfm_pci_msgring *ring)
1124 {
1125 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1126 BWFM_PCI_PCIE2REG_H2D_MAILBOX, 1);
1127 }
1128
1129 void
1130 bwfm_pci_ring_update_rptr(struct bwfm_pci_softc *sc,
1131 struct bwfm_pci_msgring *ring)
1132 {
1133 if (sc->sc_dma_idx_sz == 0) {
1134 ring->r_ptr = bus_space_read_2(sc->sc_tcm_iot,
1135 sc->sc_tcm_ioh, ring->r_idx_addr);
1136 } else {
1137 ring->r_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1138 + ring->r_idx_addr);
1139 }
1140 }
1141
1142 static u_int
1143 if_rxr_get(struct if_rxring *rxr, unsigned int max)
1144 {
1145 u_int taken = MIN(max, (rxr->rxr_total - rxr->rxr_inuse));
1146
1147 KASSERTMSG(rxr->rxr_inuse + taken <= rxr->rxr_total,
1148 "rxr->rxr_inuse: %d\n"
1149 "taken: %d\n"
1150 "rxr->rxr_total: %d\n",
1151 rxr->rxr_inuse, taken, rxr->rxr_total);
1152 rxr->rxr_inuse += taken;
1153
1154 return taken;
1155 }
1156
1157 static void
1158 if_rxr_put(struct if_rxring *rxr, unsigned int n)
1159 {
1160 KASSERTMSG(rxr->rxr_inuse >= n,
1161 "rxr->rxr_inuse: %d\n"
1162 "n: %d\n"
1163 "rxr->rxr_total: %d\n",
1164 rxr->rxr_inuse, n, rxr->rxr_total);
1165
1166 rxr->rxr_inuse -= n;
1167 }
1168
1169 static void
1170 if_rxr_init(struct if_rxring *rxr, unsigned int lwm __unused, unsigned int hwm)
1171 {
1172 (void) lwm;
1173
1174 rxr->rxr_total = hwm;
1175 rxr->rxr_inuse = 0;
1176 }
1177
1178 void
1179 bwfm_pci_ring_update_wptr(struct bwfm_pci_softc *sc,
1180 struct bwfm_pci_msgring *ring)
1181 {
1182 if (sc->sc_dma_idx_sz == 0) {
1183 ring->w_ptr = bus_space_read_2(sc->sc_tcm_iot,
1184 sc->sc_tcm_ioh, ring->w_idx_addr);
1185 } else {
1186 ring->w_ptr = *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1187 + ring->w_idx_addr);
1188 }
1189 }
1190
1191 void
1192 bwfm_pci_ring_write_rptr(struct bwfm_pci_softc *sc,
1193 struct bwfm_pci_msgring *ring)
1194 {
1195 if (sc->sc_dma_idx_sz == 0) {
1196 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1197 ring->r_idx_addr, ring->r_ptr);
1198 } else {
1199 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1200 + ring->r_idx_addr) = ring->r_ptr;
1201 }
1202 }
1203
1204 void
1205 bwfm_pci_ring_write_wptr(struct bwfm_pci_softc *sc,
1206 struct bwfm_pci_msgring *ring)
1207 {
1208 if (sc->sc_dma_idx_sz == 0) {
1209 bus_space_write_2(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1210 ring->w_idx_addr, ring->w_ptr);
1211 } else {
1212 *(uint16_t *)(BWFM_PCI_DMA_KVA(sc->sc_dma_idx_buf)
1213 + ring->w_idx_addr) = ring->w_ptr;
1214 }
1215 }
1216
1217 /*
1218 * Retrieve a free descriptor to put new stuff in, but don't commit
1219 * to it yet so we can rollback later if any error occurs.
1220 */
1221 void *
1222 bwfm_pci_ring_write_reserve(struct bwfm_pci_softc *sc,
1223 struct bwfm_pci_msgring *ring)
1224 {
1225 int available;
1226 char *ret;
1227
1228 bwfm_pci_ring_update_rptr(sc, ring);
1229
1230 if (ring->r_ptr > ring->w_ptr)
1231 available = ring->r_ptr - ring->w_ptr;
1232 else
1233 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1234
1235 if (available < 1)
1236 return NULL;
1237
1238 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1239 ring->w_ptr += 1;
1240 if (ring->w_ptr == ring->nitem)
1241 ring->w_ptr = 0;
1242 return ret;
1243 }
1244
1245 void *
1246 bwfm_pci_ring_write_reserve_multi(struct bwfm_pci_softc *sc,
1247 struct bwfm_pci_msgring *ring, int count, int *avail)
1248 {
1249 int available;
1250 char *ret;
1251
1252 bwfm_pci_ring_update_rptr(sc, ring);
1253
1254 if (ring->r_ptr > ring->w_ptr)
1255 available = ring->r_ptr - ring->w_ptr;
1256 else
1257 available = ring->r_ptr + (ring->nitem - ring->w_ptr);
1258
1259 if (available < 1)
1260 return NULL;
1261
1262 ret = BWFM_PCI_DMA_KVA(ring->ring) + (ring->w_ptr * ring->itemsz);
1263 *avail = uimin(count, available - 1);
1264 if (*avail + ring->w_ptr > ring->nitem)
1265 *avail = ring->nitem - ring->w_ptr;
1266 ring->w_ptr += *avail;
1267 if (ring->w_ptr == ring->nitem)
1268 ring->w_ptr = 0;
1269 return ret;
1270 }
1271
1272 /*
1273 * Read number of descriptors available (submitted by the firmware)
1274 * and retrieve pointer to first descriptor.
1275 */
1276 void *
1277 bwfm_pci_ring_read_avail(struct bwfm_pci_softc *sc,
1278 struct bwfm_pci_msgring *ring, int *avail)
1279 {
1280 bwfm_pci_ring_update_wptr(sc, ring);
1281
1282 if (ring->w_ptr >= ring->r_ptr)
1283 *avail = ring->w_ptr - ring->r_ptr;
1284 else
1285 *avail = ring->nitem - ring->r_ptr;
1286
1287 if (*avail == 0)
1288 return NULL;
1289
1290 return BWFM_PCI_DMA_KVA(ring->ring) + (ring->r_ptr * ring->itemsz);
1291 }
1292
1293 /*
1294 * Let firmware know we read N descriptors.
1295 */
1296 void
1297 bwfm_pci_ring_read_commit(struct bwfm_pci_softc *sc,
1298 struct bwfm_pci_msgring *ring, int nitem)
1299 {
1300 ring->r_ptr += nitem;
1301 if (ring->r_ptr == ring->nitem)
1302 ring->r_ptr = 0;
1303 bwfm_pci_ring_write_rptr(sc, ring);
1304 }
1305
1306 /*
1307 * Let firmware know that we submitted some descriptors.
1308 */
1309 void
1310 bwfm_pci_ring_write_commit(struct bwfm_pci_softc *sc,
1311 struct bwfm_pci_msgring *ring)
1312 {
1313 bwfm_pci_ring_write_wptr(sc, ring);
1314 bwfm_pci_ring_bell(sc, ring);
1315 }
1316
1317 /*
1318 * Rollback N descriptors in case we don't actually want
1319 * to commit to it.
1320 */
1321 void
1322 bwfm_pci_ring_write_cancel(struct bwfm_pci_softc *sc,
1323 struct bwfm_pci_msgring *ring, int nitem)
1324 {
1325 if (ring->w_ptr == 0)
1326 ring->w_ptr = ring->nitem - nitem;
1327 else
1328 ring->w_ptr -= nitem;
1329 }
1330
1331 /*
1332 * Foreach written descriptor on the ring, pass the descriptor to
1333 * a message handler and let the firmware know we handled it.
1334 */
1335 void
1336 bwfm_pci_ring_rx(struct bwfm_pci_softc *sc, struct bwfm_pci_msgring *ring)
1337 {
1338 char *buf;
1339 int avail, processed;
1340
1341 again:
1342 buf = bwfm_pci_ring_read_avail(sc, ring, &avail);
1343 if (buf == NULL)
1344 return;
1345
1346 processed = 0;
1347 while (avail) {
1348 bwfm_pci_msg_rx(sc, buf + sc->sc_rx_dataoffset);
1349 buf += ring->itemsz;
1350 processed++;
1351 if (processed == 48) {
1352 bwfm_pci_ring_read_commit(sc, ring, processed);
1353 processed = 0;
1354 }
1355 avail--;
1356 }
1357 if (processed)
1358 bwfm_pci_ring_read_commit(sc, ring, processed);
1359 if (ring->r_ptr == 0)
1360 goto again;
1361 }
1362
1363 void
1364 bwfm_pci_msg_rx(struct bwfm_pci_softc *sc, void *buf)
1365 {
1366 struct ifnet *ifp = sc->sc_sc.sc_ic.ic_ifp;
1367 struct msgbuf_ioctl_resp_hdr *resp;
1368 struct msgbuf_tx_status *tx;
1369 struct msgbuf_rx_complete *rx;
1370 struct msgbuf_rx_event *event;
1371 struct msgbuf_common_hdr *msg;
1372 struct msgbuf_flowring_create_resp *fcr;
1373 struct msgbuf_flowring_delete_resp *fdr;
1374 struct bwfm_pci_msgring *ring;
1375 struct mbuf *m;
1376 int flowid;
1377
1378 msg = (struct msgbuf_common_hdr *)buf;
1379 switch (msg->msgtype)
1380 {
1381 case MSGBUF_TYPE_FLOW_RING_CREATE_CMPLT:
1382 fcr = (struct msgbuf_flowring_create_resp *)buf;
1383 flowid = letoh16(fcr->compl_hdr.flow_ring_id);
1384 if (flowid < 2)
1385 break;
1386 flowid -= 2;
1387 if (flowid >= sc->sc_max_flowrings)
1388 break;
1389 ring = &sc->sc_flowrings[flowid];
1390 if (ring->status != RING_OPENING)
1391 break;
1392 if (fcr->compl_hdr.status) {
1393 printf("%s: failed to open flowring %d\n",
1394 DEVNAME(sc), flowid);
1395 ring->status = RING_CLOSED;
1396 if (ring->m) {
1397 m_freem(ring->m);
1398 ring->m = NULL;
1399 }
1400 ifp->if_flags &= ~IFF_OACTIVE;
1401 ifp->if_start(ifp);
1402 break;
1403 }
1404 ring->status = RING_OPEN;
1405 if (ring->m != NULL) {
1406 m = ring->m;
1407 ring->m = NULL;
1408 if (bwfm_pci_txdata(&sc->sc_sc, &m))
1409 m_freem(ring->m);
1410 }
1411 ifp->if_flags &= ~IFF_OACTIVE;
1412 ifp->if_start(ifp);
1413 break;
1414 case MSGBUF_TYPE_FLOW_RING_DELETE_CMPLT:
1415 fdr = (struct msgbuf_flowring_delete_resp *)buf;
1416 flowid = letoh16(fdr->compl_hdr.flow_ring_id);
1417 if (flowid < 2)
1418 break;
1419 flowid -= 2;
1420 if (flowid >= sc->sc_max_flowrings)
1421 break;
1422 ring = &sc->sc_flowrings[flowid];
1423 if (ring->status != RING_CLOSING)
1424 break;
1425 if (fdr->compl_hdr.status) {
1426 printf("%s: failed to delete flowring %d\n",
1427 DEVNAME(sc), flowid);
1428 break;
1429 }
1430 bwfm_pci_dmamem_free(sc, ring->ring);
1431 ring->status = RING_CLOSED;
1432 break;
1433 case MSGBUF_TYPE_IOCTLPTR_REQ_ACK:
1434 break;
1435 case MSGBUF_TYPE_IOCTL_CMPLT:
1436 resp = (struct msgbuf_ioctl_resp_hdr *)buf;
1437 sc->sc_ioctl_resp_pktid = letoh32(resp->msg.request_id);
1438 sc->sc_ioctl_resp_ret_len = letoh16(resp->resp_len);
1439 sc->sc_ioctl_resp_status = letoh16(resp->compl_hdr.status);
1440 if_rxr_put(&sc->sc_ioctl_ring, 1);
1441 bwfm_pci_fill_rx_rings(sc);
1442 wakeup(&sc->sc_ioctl_buf);
1443 break;
1444 case MSGBUF_TYPE_WL_EVENT:
1445 event = (struct msgbuf_rx_event *)buf;
1446 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1447 letoh32(event->msg.request_id));
1448 if (m == NULL)
1449 break;
1450 m_adj(m, sc->sc_rx_dataoffset);
1451 m->m_len = m->m_pkthdr.len = letoh16(event->event_data_len);
1452 bwfm_rx(&sc->sc_sc, m);
1453 if_rxr_put(&sc->sc_event_ring, 1);
1454 bwfm_pci_fill_rx_rings(sc);
1455 break;
1456 case MSGBUF_TYPE_TX_STATUS:
1457 tx = (struct msgbuf_tx_status *)buf;
1458 m = bwfm_pci_pktid_free(sc, &sc->sc_tx_pkts,
1459 letoh32(tx->msg.request_id));
1460 if (m == NULL)
1461 break;
1462 m_freem(m);
1463 if (sc->sc_tx_pkts_full) {
1464 sc->sc_tx_pkts_full = 0;
1465 ifp->if_flags &= ~IFF_OACTIVE;
1466 ifp->if_start(ifp);
1467 }
1468 break;
1469 case MSGBUF_TYPE_RX_CMPLT:
1470 rx = (struct msgbuf_rx_complete *)buf;
1471 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts,
1472 letoh32(rx->msg.request_id));
1473 if (m == NULL)
1474 break;
1475 if (letoh16(rx->data_offset))
1476 m_adj(m, letoh16(rx->data_offset));
1477 else if (sc->sc_rx_dataoffset)
1478 m_adj(m, sc->sc_rx_dataoffset);
1479 m->m_len = m->m_pkthdr.len = letoh16(rx->data_len);
1480 bwfm_rx(&sc->sc_sc, m);
1481 if_rxr_put(&sc->sc_rxbuf_ring, 1);
1482 bwfm_pci_fill_rx_rings(sc);
1483 break;
1484 default:
1485 printf("%s: msgtype 0x%08x\n", __func__, msg->msgtype);
1486 break;
1487 }
1488 }
1489
1490 /* Bus core helpers */
1491 void
1492 bwfm_pci_select_core(struct bwfm_pci_softc *sc, int id)
1493 {
1494 struct bwfm_softc *bwfm = (void *)sc;
1495 struct bwfm_core *core;
1496
1497 core = bwfm_chip_get_core(bwfm, id);
1498 if (core == NULL) {
1499 printf("%s: could not find core to select", DEVNAME(sc));
1500 return;
1501 }
1502
1503 pci_conf_write(sc->sc_pc, sc->sc_tag,
1504 BWFM_PCI_BAR0_WINDOW, core->co_base);
1505 if (pci_conf_read(sc->sc_pc, sc->sc_tag,
1506 BWFM_PCI_BAR0_WINDOW) != core->co_base)
1507 pci_conf_write(sc->sc_pc, sc->sc_tag,
1508 BWFM_PCI_BAR0_WINDOW, core->co_base);
1509 }
1510
1511 uint32_t
1512 bwfm_pci_buscore_read(struct bwfm_softc *bwfm, uint32_t reg)
1513 {
1514 struct bwfm_pci_softc *sc = (void *)bwfm;
1515 uint32_t page, offset;
1516
1517 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1518 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1519 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1520 return bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset);
1521 }
1522
1523 void
1524 bwfm_pci_buscore_write(struct bwfm_softc *bwfm, uint32_t reg, uint32_t val)
1525 {
1526 struct bwfm_pci_softc *sc = (void *)bwfm;
1527 uint32_t page, offset;
1528
1529 page = reg & ~(BWFM_PCI_BAR0_REG_SIZE - 1);
1530 offset = reg & (BWFM_PCI_BAR0_REG_SIZE - 1);
1531 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_BAR0_WINDOW, page);
1532 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh, offset, val);
1533 }
1534
1535 int
1536 bwfm_pci_buscore_prepare(struct bwfm_softc *bwfm)
1537 {
1538 return 0;
1539 }
1540
1541 int
1542 bwfm_pci_buscore_reset(struct bwfm_softc *bwfm)
1543 {
1544 struct bwfm_pci_softc *sc = (void *)bwfm;
1545 struct bwfm_core *core;
1546 uint32_t reg;
1547 int i;
1548
1549 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1550 reg = pci_conf_read(sc->sc_pc, sc->sc_tag,
1551 BWFM_PCI_CFGREG_LINK_STATUS_CTRL);
1552 pci_conf_write(sc->sc_pc, sc->sc_tag, BWFM_PCI_CFGREG_LINK_STATUS_CTRL,
1553 reg & ~BWFM_PCI_CFGREG_LINK_STATUS_CTRL_ASPM_ENAB);
1554
1555 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_CHIPCOMMON);
1556 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1557 BWFM_CHIP_REG_WATCHDOG, 4);
1558 delay(100 * 1000);
1559
1560 bwfm_pci_select_core(sc, BWFM_AGENT_CORE_PCIE2);
1561 pci_conf_write(sc->sc_pc, sc->sc_tag,
1562 BWFM_PCI_CFGREG_LINK_STATUS_CTRL, reg);
1563
1564 core = bwfm_chip_get_core(bwfm, BWFM_AGENT_CORE_PCIE2);
1565 if (core->co_rev <= 13) {
1566 uint16_t cfg_offset[] = {
1567 BWFM_PCI_CFGREG_STATUS_CMD,
1568 BWFM_PCI_CFGREG_PM_CSR,
1569 BWFM_PCI_CFGREG_MSI_CAP,
1570 BWFM_PCI_CFGREG_MSI_ADDR_L,
1571 BWFM_PCI_CFGREG_MSI_ADDR_H,
1572 BWFM_PCI_CFGREG_MSI_DATA,
1573 BWFM_PCI_CFGREG_LINK_STATUS_CTRL2,
1574 BWFM_PCI_CFGREG_RBAR_CTRL,
1575 BWFM_PCI_CFGREG_PML1_SUB_CTRL1,
1576 BWFM_PCI_CFGREG_REG_BAR2_CONFIG,
1577 BWFM_PCI_CFGREG_REG_BAR3_CONFIG,
1578 };
1579
1580 for (i = 0; i < nitems(cfg_offset); i++) {
1581 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1582 BWFM_PCI_PCIE2REG_CONFIGADDR, cfg_offset[i]);
1583 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1584 BWFM_PCI_PCIE2REG_CONFIGDATA);
1585 DPRINTFN(3, ("%s: config offset 0x%04x, value 0x%04x\n",
1586 DEVNAME(sc), cfg_offset[i], reg));
1587 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1588 BWFM_PCI_PCIE2REG_CONFIGDATA, reg);
1589 }
1590 }
1591
1592 reg = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1593 BWFM_PCI_PCIE2REG_MAILBOXINT);
1594 if (reg != 0xffffffff)
1595 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1596 BWFM_PCI_PCIE2REG_MAILBOXINT, reg);
1597
1598 return 0;
1599 }
1600
1601 void
1602 bwfm_pci_buscore_activate(struct bwfm_softc *bwfm, const uint32_t rstvec)
1603 {
1604 struct bwfm_pci_softc *sc = (void *)bwfm;
1605 bus_space_write_4(sc->sc_tcm_iot, sc->sc_tcm_ioh, 0, rstvec);
1606 }
1607
1608 static int bwfm_pci_prio2fifo[8] = {
1609 1, /* best effort */
1610 0, /* IPTOS_PREC_IMMEDIATE */
1611 0, /* IPTOS_PREC_PRIORITY */
1612 1, /* IPTOS_PREC_FLASH */
1613 2, /* IPTOS_PREC_FLASHOVERRIDE */
1614 2, /* IPTOS_PREC_CRITIC_ECP */
1615 3, /* IPTOS_PREC_INTERNETCONTROL */
1616 3, /* IPTOS_PREC_NETCONTROL */
1617 };
1618
1619 int
1620 bwfm_pci_flowring_lookup(struct bwfm_pci_softc *sc, struct mbuf *m)
1621 {
1622 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1623 uint8_t *da = mtod(m, uint8_t *);
1624 struct ether_header *eh;
1625 int flowid, prio, fifo;
1626 int i, found, ac;
1627
1628 /* No QoS for EAPOL frames. */
1629 eh = mtod(m, struct ether_header *);
1630 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1631 M_WME_GETAC(m) : WME_AC_BE;
1632
1633 prio = ac;
1634 fifo = bwfm_pci_prio2fifo[prio];
1635
1636 switch (ic->ic_opmode)
1637 {
1638 case IEEE80211_M_STA:
1639 flowid = fifo;
1640 break;
1641 #ifndef IEEE80211_STA_ONLY
1642 case IEEE80211_M_HOSTAP:
1643 if (ETHER_IS_MULTICAST(da))
1644 da = __UNCONST(etherbroadcastaddr);
1645 flowid = da[5] * 2 + fifo;
1646 break;
1647 #endif
1648 default:
1649 printf("%s: state not supported\n", DEVNAME(sc));
1650 return ENOBUFS;
1651 }
1652
1653 found = 0;
1654 flowid = flowid % sc->sc_max_flowrings;
1655 for (i = 0; i < sc->sc_max_flowrings; i++) {
1656 if (ic->ic_opmode == IEEE80211_M_STA &&
1657 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1658 sc->sc_flowrings[flowid].fifo == fifo) {
1659 found = 1;
1660 break;
1661 }
1662 #ifndef IEEE80211_STA_ONLY
1663 if (ic->ic_opmode == IEEE80211_M_HOSTAP &&
1664 sc->sc_flowrings[flowid].status >= RING_OPEN &&
1665 sc->sc_flowrings[flowid].fifo == fifo &&
1666 !memcmp(sc->sc_flowrings[flowid].mac, da, ETHER_ADDR_LEN)) {
1667 found = 1;
1668 break;
1669 }
1670 #endif
1671 flowid = (flowid + 1) % sc->sc_max_flowrings;
1672 }
1673
1674 if (found)
1675 return flowid;
1676
1677 return -1;
1678 }
1679
1680 void
1681 bwfm_pci_flowring_create(struct bwfm_pci_softc *sc, struct mbuf *m)
1682 {
1683 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1684 struct bwfm_cmd_flowring_create * cmd;
1685 uint8_t *da = mtod(m, uint8_t *);
1686 struct ether_header *eh;
1687 struct bwfm_pci_msgring *ring;
1688 int flowid, prio, fifo;
1689 int i, found, ac;
1690
1691 cmd = pool_get(&sc->sc_flowring_pool, PR_NOWAIT);
1692 if (__predict_false(cmd == NULL))
1693 return;
1694
1695 /* No QoS for EAPOL frames. */
1696 eh = mtod(m, struct ether_header *);
1697 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1698 M_WME_GETAC(m) : WME_AC_BE;
1699
1700 prio = ac;
1701 fifo = bwfm_pci_prio2fifo[prio];
1702
1703 switch (ic->ic_opmode)
1704 {
1705 case IEEE80211_M_STA:
1706 flowid = fifo;
1707 break;
1708 #ifndef IEEE80211_STA_ONLY
1709 case IEEE80211_M_HOSTAP:
1710 if (ETHER_IS_MULTICAST(da))
1711 da = __UNCONST(etherbroadcastaddr);
1712 flowid = da[5] * 2 + fifo;
1713 break;
1714 #endif
1715 default:
1716 printf("%s: state not supported\n", DEVNAME(sc));
1717 return;
1718 }
1719
1720 found = 0;
1721 flowid = flowid % sc->sc_max_flowrings;
1722 for (i = 0; i < sc->sc_max_flowrings; i++) {
1723 ring = &sc->sc_flowrings[flowid];
1724 if (ring->status == RING_CLOSED) {
1725 ring->status = RING_OPENING;
1726 found = 1;
1727 break;
1728 }
1729 flowid = (flowid + 1) % sc->sc_max_flowrings;
1730 }
1731
1732 /*
1733 * We cannot recover from that so far. Only a stop/init
1734 * cycle can revive this if it ever happens at all.
1735 */
1736 if (!found) {
1737 printf("%s: no flowring available\n", DEVNAME(sc));
1738 return;
1739 }
1740
1741 cmd->sc = sc;
1742 cmd->m = m;
1743 cmd->prio = prio;
1744 cmd->flowid = flowid;
1745 workqueue_enqueue(sc->flowring_wq, &cmd->wq_cookie, NULL);
1746 }
1747
1748 void
1749 bwfm_pci_flowring_create_cb(struct work *wk, void *arg) //(struct bwfm_softc *bwfm, void *arg)
1750 {
1751 struct bwfm_cmd_flowring_create *cmd = container_of(wk, struct bwfm_cmd_flowring_create, wq_cookie);
1752 struct bwfm_pci_softc *sc = cmd->sc; // (void *)bwfm;
1753 struct ieee80211com *ic = &sc->sc_sc.sc_ic;
1754 struct msgbuf_tx_flowring_create_req *req;
1755 struct bwfm_pci_msgring *ring;
1756 uint8_t *da, *sa;
1757
1758 da = mtod(cmd->m, char *) + 0 * ETHER_ADDR_LEN;
1759 sa = mtod(cmd->m, char *) + 1 * ETHER_ADDR_LEN;
1760
1761 ring = &sc->sc_flowrings[cmd->flowid];
1762 if (ring->status != RING_OPENING) {
1763 printf("%s: flowring not opening\n", DEVNAME(sc));
1764 return;
1765 }
1766
1767 if (bwfm_pci_setup_flowring(sc, ring, 512, 48)) {
1768 printf("%s: cannot setup flowring\n", DEVNAME(sc));
1769 return;
1770 }
1771
1772 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1773 if (req == NULL) {
1774 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1775 return;
1776 }
1777
1778 ring->status = RING_OPENING;
1779 ring->fifo = bwfm_pci_prio2fifo[cmd->prio];
1780 ring->m = cmd->m;
1781 memcpy(ring->mac, da, ETHER_ADDR_LEN);
1782 #ifndef IEEE80211_STA_ONLY
1783 if (ic->ic_opmode == IEEE80211_M_HOSTAP && ETHER_IS_MULTICAST(da))
1784 memcpy(ring->mac, etherbroadcastaddr, ETHER_ADDR_LEN);
1785 #endif
1786
1787 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_CREATE;
1788 req->msg.ifidx = 0;
1789 req->msg.request_id = 0;
1790 req->tid = bwfm_pci_prio2fifo[cmd->prio];
1791 req->flow_ring_id = letoh16(cmd->flowid + 2);
1792 memcpy(req->da, da, ETHER_ADDR_LEN);
1793 memcpy(req->sa, sa, ETHER_ADDR_LEN);
1794 req->flow_ring_addr.high_addr =
1795 letoh32(BWFM_PCI_DMA_DVA(ring->ring) >> 32);
1796 req->flow_ring_addr.low_addr =
1797 letoh32(BWFM_PCI_DMA_DVA(ring->ring) & 0xffffffff);
1798 req->max_items = letoh16(512);
1799 req->len_item = letoh16(48);
1800
1801 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1802 pool_put(&sc->sc_flowring_pool, cmd);
1803 }
1804
1805 void
1806 bwfm_pci_flowring_delete(struct bwfm_pci_softc *sc, int flowid)
1807 {
1808 struct msgbuf_tx_flowring_delete_req *req;
1809 struct bwfm_pci_msgring *ring;
1810
1811 ring = &sc->sc_flowrings[flowid];
1812 if (ring->status != RING_OPEN) {
1813 printf("%s: flowring not open\n", DEVNAME(sc));
1814 return;
1815 }
1816
1817 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
1818 if (req == NULL) {
1819 printf("%s: cannot reserve for flowring\n", DEVNAME(sc));
1820 return;
1821 }
1822
1823 ring->status = RING_CLOSING;
1824
1825 req->msg.msgtype = MSGBUF_TYPE_FLOW_RING_DELETE;
1826 req->msg.ifidx = 0;
1827 req->msg.request_id = 0;
1828 req->flow_ring_id = letoh16(flowid + 2);
1829 req->reason = 0;
1830
1831 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
1832 }
1833
1834 void
1835 bwfm_pci_stop(struct bwfm_softc *bwfm)
1836 {
1837 struct bwfm_pci_softc *sc = (void *)bwfm;
1838 struct bwfm_pci_msgring *ring;
1839 int i;
1840
1841 for (i = 0; i < sc->sc_max_flowrings; i++) {
1842 ring = &sc->sc_flowrings[i];
1843 if (ring->status == RING_OPEN)
1844 bwfm_pci_flowring_delete(sc, i);
1845 }
1846 }
1847
1848 int
1849 bwfm_pci_txcheck(struct bwfm_softc *bwfm)
1850 {
1851 struct bwfm_pci_softc *sc = (void *)bwfm;
1852 struct bwfm_pci_msgring *ring;
1853 int i;
1854
1855 /* If we are transitioning, we cannot send. */
1856 for (i = 0; i < sc->sc_max_flowrings; i++) {
1857 ring = &sc->sc_flowrings[i];
1858 if (ring->status == RING_OPENING)
1859 return ENOBUFS;
1860 }
1861
1862 if (bwfm_pci_pktid_avail(sc, &sc->sc_tx_pkts)) {
1863 sc->sc_tx_pkts_full = 1;
1864 return ENOBUFS;
1865 }
1866
1867 return 0;
1868 }
1869
1870 int
1871 bwfm_pci_txdata(struct bwfm_softc *bwfm, struct mbuf **mp)
1872 {
1873 struct bwfm_pci_softc *sc = (void *)bwfm;
1874 struct bwfm_pci_msgring *ring;
1875 struct msgbuf_tx_msghdr *tx;
1876 uint32_t pktid;
1877 paddr_t paddr;
1878 struct ether_header *eh;
1879 int flowid, ret, ac;
1880
1881 flowid = bwfm_pci_flowring_lookup(sc, *mp);
1882 if (flowid < 0) {
1883 /*
1884 * We cannot send the packet right now as there is
1885 * no flowring yet. The flowring will be created
1886 * asynchronously. While the ring is transitioning
1887 * the TX check will tell the upper layers that we
1888 * cannot send packets right now. When the flowring
1889 * is created the queue will be restarted and this
1890 * mbuf will be transmitted.
1891 */
1892 bwfm_pci_flowring_create(sc, *mp);
1893 return 0;
1894 }
1895
1896 ring = &sc->sc_flowrings[flowid];
1897 if (ring->status == RING_OPENING ||
1898 ring->status == RING_CLOSING) {
1899 printf("%s: tried to use a flow that was "
1900 "transitioning in status %d\n",
1901 DEVNAME(sc), ring->status);
1902 return ENOBUFS;
1903 }
1904
1905 tx = bwfm_pci_ring_write_reserve(sc, ring);
1906 if (tx == NULL)
1907 return ENOBUFS;
1908
1909 /* No QoS for EAPOL frames. */
1910 eh = mtod(*mp, struct ether_header *);
1911 ac = (eh->ether_type != htons(ETHERTYPE_PAE)) ?
1912 M_WME_GETAC(*mp) : WME_AC_BE;
1913
1914 memset(tx, 0, sizeof(*tx));
1915 tx->msg.msgtype = MSGBUF_TYPE_TX_POST;
1916 tx->msg.ifidx = 0;
1917 tx->flags = BWFM_MSGBUF_PKT_FLAGS_FRAME_802_3;
1918 tx->flags |= ac << BWFM_MSGBUF_PKT_FLAGS_PRIO_SHIFT;
1919 tx->seg_cnt = 1;
1920 memcpy(tx->txhdr, mtod(*mp, char *), ETHER_HDR_LEN);
1921
1922 ret = bwfm_pci_pktid_new(sc, &sc->sc_tx_pkts, mp, &pktid, &paddr);
1923 if (ret) {
1924 if (ret == ENOBUFS) {
1925 printf("%s: no pktid available for TX\n",
1926 DEVNAME(sc));
1927 sc->sc_tx_pkts_full = 1;
1928 }
1929 bwfm_pci_ring_write_cancel(sc, ring, 1);
1930 return ret;
1931 }
1932 paddr += ETHER_HDR_LEN;
1933
1934 tx->msg.request_id = htole32(pktid);
1935 tx->data_len = htole16((*mp)->m_len - ETHER_HDR_LEN);
1936 tx->data_buf_addr.high_addr = htole32(paddr >> 32);
1937 tx->data_buf_addr.low_addr = htole32(paddr & 0xffffffff);
1938
1939 bwfm_pci_ring_write_commit(sc, ring);
1940 return 0;
1941 }
1942
1943 #ifdef BWFM_DEBUG
1944 void
1945 bwfm_pci_debug_console(struct bwfm_pci_softc *sc)
1946 {
1947 uint32_t newidx = bus_space_read_4(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1948 sc->sc_console_base_addr + BWFM_CONSOLE_WRITEIDX);
1949
1950 if (newidx != sc->sc_console_readidx)
1951 DPRINTFN(3, ("BWFM CONSOLE: "));
1952 while (newidx != sc->sc_console_readidx) {
1953 uint8_t ch = bus_space_read_1(sc->sc_tcm_iot, sc->sc_tcm_ioh,
1954 sc->sc_console_buf_addr + sc->sc_console_readidx);
1955 sc->sc_console_readidx++;
1956 if (sc->sc_console_readidx == sc->sc_console_buf_size)
1957 sc->sc_console_readidx = 0;
1958 if (ch == '\r')
1959 continue;
1960 DPRINTFN(3, ("%c", ch));
1961 }
1962 }
1963 #endif
1964
1965 int
1966 bwfm_pci_intr(void *v)
1967 {
1968 struct bwfm_pci_softc *sc = (void *)v;
1969 uint32_t status;
1970
1971 if ((status = bus_space_read_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1972 BWFM_PCI_PCIE2REG_MAILBOXINT)) == 0)
1973 return 0;
1974
1975 bwfm_pci_intr_disable(sc);
1976 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
1977 BWFM_PCI_PCIE2REG_MAILBOXINT, status);
1978
1979 if (status & (BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
1980 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1))
1981 printf("%s: handle MB data\n", __func__);
1982
1983 if (status & BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB) {
1984 bwfm_pci_ring_rx(sc, &sc->sc_rx_complete);
1985 bwfm_pci_ring_rx(sc, &sc->sc_tx_complete);
1986 bwfm_pci_ring_rx(sc, &sc->sc_ctrl_complete);
1987 }
1988
1989 #ifdef BWFM_DEBUG
1990 bwfm_pci_debug_console(sc);
1991 #endif
1992
1993 bwfm_pci_intr_enable(sc);
1994 return 1;
1995 }
1996
1997 void
1998 bwfm_pci_intr_enable(struct bwfm_pci_softc *sc)
1999 {
2000 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2001 BWFM_PCI_PCIE2REG_MAILBOXMASK,
2002 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_0 |
2003 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_FN0_1 |
2004 BWFM_PCI_PCIE2REG_MAILBOXMASK_INT_D2H_DB);
2005 }
2006
2007 void
2008 bwfm_pci_intr_disable(struct bwfm_pci_softc *sc)
2009 {
2010 bus_space_write_4(sc->sc_reg_iot, sc->sc_reg_ioh,
2011 BWFM_PCI_PCIE2REG_MAILBOXMASK, 0);
2012 }
2013
2014 /* Msgbuf protocol implementation */
2015 int
2016 bwfm_pci_msgbuf_query_dcmd(struct bwfm_softc *bwfm, int ifidx,
2017 int cmd, char *buf, size_t *len)
2018 {
2019 struct bwfm_pci_softc *sc = (void *)bwfm;
2020 struct msgbuf_ioctl_req_hdr *req;
2021 struct mbuf *m;
2022 size_t buflen;
2023 int s;
2024
2025 s = splnet();
2026 sc->sc_ioctl_resp_pktid = -1;
2027 req = bwfm_pci_ring_write_reserve(sc, &sc->sc_ctrl_submit);
2028 if (req == NULL) {
2029 printf("%s: cannot reserve for write\n", DEVNAME(sc));
2030 splx(s);
2031 return 1;
2032 }
2033 req->msg.msgtype = MSGBUF_TYPE_IOCTLPTR_REQ;
2034 req->msg.ifidx = 0;
2035 req->msg.flags = 0;
2036 req->msg.request_id = htole32(MSGBUF_IOCTL_REQ_PKTID);
2037 req->cmd = htole32(cmd);
2038 req->output_buf_len = htole16(*len);
2039 req->trans_id = htole16(sc->sc_ioctl_reqid++);
2040
2041 buflen = uimin(*len, BWFM_DMA_H2D_IOCTL_BUF_LEN);
2042 req->input_buf_len = htole16(buflen);
2043 req->req_buf_addr.high_addr =
2044 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) >> 32);
2045 req->req_buf_addr.low_addr =
2046 htole32((uint64_t)BWFM_PCI_DMA_DVA(sc->sc_ioctl_buf) & 0xffffffff);
2047 if (buf)
2048 memcpy(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), buf, buflen);
2049 else
2050 memset(BWFM_PCI_DMA_KVA(sc->sc_ioctl_buf), 0, buflen);
2051
2052 bwfm_pci_ring_write_commit(sc, &sc->sc_ctrl_submit);
2053 splx(s);
2054
2055 if (tsleep(&sc->sc_ioctl_buf, PCATCH, "bwfm", hz)) {
2056 printf("%s: timeout waiting for ioctl response\n",
2057 DEVNAME(sc));
2058 return 1;
2059 }
2060
2061 m = bwfm_pci_pktid_free(sc, &sc->sc_rx_pkts, sc->sc_ioctl_resp_pktid);
2062 if (m == NULL)
2063 return 1;
2064
2065 *len = uimin(buflen, sc->sc_ioctl_resp_ret_len);
2066 if (buf)
2067 memcpy(buf, mtod(m, char *), *len);
2068 m_freem(m);
2069 splx(s);
2070
2071 return 0;
2072 }
2073
2074 int
2075 bwfm_pci_msgbuf_set_dcmd(struct bwfm_softc *bwfm, int ifidx,
2076 int cmd, char *buf, size_t len)
2077 {
2078 return bwfm_pci_msgbuf_query_dcmd(bwfm, ifidx, cmd, buf, &len);
2079 }
2080