ixgbe_netbsd.c revision 1.15.6.1 1 /* $NetBSD: ixgbe_netbsd.c,v 1.15.6.1 2021/05/13 00:47:31 thorpej Exp $ */
2 /*
3 * Copyright (c) 2011 The NetBSD Foundation, Inc.
4 * All rights reserved.
5 *
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Coyote Point Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: ixgbe_netbsd.c,v 1.15.6.1 2021/05/13 00:47:31 thorpej Exp $");
33
34 #include <sys/param.h>
35
36 #include <sys/atomic.h>
37 #include <sys/bus.h>
38 #include <sys/condvar.h>
39 #include <sys/cpu.h>
40 #include <sys/kmem.h>
41 #include <sys/mbuf.h>
42 #include <sys/mutex.h>
43 #include <sys/queue.h>
44 #include <sys/workqueue.h>
45 #include <dev/pci/pcivar.h>
46
47 #include "ixgbe.h"
48
49 void
50 ixgbe_dma_tag_destroy(ixgbe_dma_tag_t *dt)
51 {
52 kmem_free(dt, sizeof(*dt));
53 }
54
55 int
56 ixgbe_dma_tag_create(bus_dma_tag_t dmat, bus_size_t alignment,
57 bus_size_t boundary, bus_size_t maxsize, int nsegments,
58 bus_size_t maxsegsize, int flags, ixgbe_dma_tag_t **dtp)
59 {
60 ixgbe_dma_tag_t *dt;
61
62 *dtp = NULL;
63
64 dt = kmem_zalloc(sizeof(*dt), KM_SLEEP);
65 dt->dt_dmat = dmat;
66 dt->dt_alignment = alignment;
67 dt->dt_boundary = boundary;
68 dt->dt_maxsize = maxsize;
69 dt->dt_nsegments = nsegments;
70 dt->dt_maxsegsize = maxsegsize;
71 dt->dt_flags = flags;
72 *dtp = dt;
73
74 return 0;
75 }
76
77 void
78 ixgbe_dmamap_destroy(ixgbe_dma_tag_t *dt, bus_dmamap_t dmam)
79 {
80 bus_dmamap_destroy(dt->dt_dmat, dmam);
81 }
82
83 void
84 ixgbe_dmamap_sync(ixgbe_dma_tag_t *dt, bus_dmamap_t dmam, int ops)
85 {
86 bus_dmamap_sync(dt->dt_dmat, dmam, 0, dt->dt_maxsize, ops);
87 }
88
89 void
90 ixgbe_dmamap_unload(ixgbe_dma_tag_t *dt, bus_dmamap_t dmam)
91 {
92 bus_dmamap_unload(dt->dt_dmat, dmam);
93 }
94
95 int
96 ixgbe_dmamap_create(ixgbe_dma_tag_t *dt, int flags, bus_dmamap_t *dmamp)
97 {
98 return bus_dmamap_create(dt->dt_dmat, dt->dt_maxsize, dt->dt_nsegments,
99 dt->dt_maxsegsize, dt->dt_boundary, flags, dmamp);
100 }
101
102 static void
103 ixgbe_putext(ixgbe_extmem_t *em)
104 {
105 ixgbe_extmem_head_t *eh = em->em_head;
106
107 mutex_enter(&eh->eh_mtx);
108
109 TAILQ_INSERT_HEAD(&eh->eh_freelist, em, em_link);
110
111 mutex_exit(&eh->eh_mtx);
112
113 return;
114 }
115
116 static ixgbe_extmem_t *
117 ixgbe_getext(ixgbe_extmem_head_t *eh, size_t size)
118 {
119 ixgbe_extmem_t *em;
120
121 mutex_enter(&eh->eh_mtx);
122
123 TAILQ_FOREACH(em, &eh->eh_freelist, em_link) {
124 if (em->em_size >= size)
125 break;
126 }
127
128 if (em != NULL)
129 TAILQ_REMOVE(&eh->eh_freelist, em, em_link);
130
131 mutex_exit(&eh->eh_mtx);
132
133 return em;
134 }
135
136 static ixgbe_extmem_t *
137 ixgbe_newext(ixgbe_extmem_head_t *eh, bus_dma_tag_t dmat, size_t size)
138 {
139 ixgbe_extmem_t *em;
140 int nseg, rc;
141
142 em = kmem_zalloc(sizeof(*em), KM_SLEEP);
143
144 rc = bus_dmamem_alloc(dmat, size, PAGE_SIZE, 0, &em->em_seg, 1, &nseg,
145 BUS_DMA_WAITOK);
146
147 if (rc != 0)
148 goto post_zalloc_err;
149
150 rc = bus_dmamem_map(dmat, &em->em_seg, 1, size, &em->em_vaddr,
151 BUS_DMA_WAITOK);
152
153 if (rc != 0)
154 goto post_dmamem_err;
155
156 em->em_dmat = dmat;
157 em->em_size = size;
158 em->em_head = eh;
159
160 return em;
161 post_dmamem_err:
162 bus_dmamem_free(dmat, &em->em_seg, 1);
163 post_zalloc_err:
164 kmem_free(em, sizeof(*em));
165 return NULL;
166 }
167
168 static void
169 ixgbe_jcl_freeall(struct adapter *adapter, struct rx_ring *rxr)
170 {
171 ixgbe_extmem_head_t *eh = &rxr->jcl_head;
172 ixgbe_extmem_t *em;
173 bus_dma_tag_t dmat = rxr->ptag->dt_dmat;
174
175 while ((em = ixgbe_getext(eh, 0)) != NULL) {
176 KASSERT(em->em_vaddr != NULL);
177 bus_dmamem_unmap(dmat, em->em_vaddr, em->em_size);
178 bus_dmamem_free(dmat, &em->em_seg, 1);
179 memset(em, 0, sizeof(*em));
180 kmem_free(em, sizeof(*em));
181 }
182 }
183
184 void
185 ixgbe_jcl_reinit(struct adapter *adapter, bus_dma_tag_t dmat,
186 struct rx_ring *rxr, int nbuf, size_t size)
187 {
188 ixgbe_extmem_head_t *eh = &rxr->jcl_head;
189 ixgbe_extmem_t *em;
190 int i;
191
192 if (!eh->eh_initialized) {
193 TAILQ_INIT(&eh->eh_freelist);
194 mutex_init(&eh->eh_mtx, MUTEX_DEFAULT, IPL_NET);
195 eh->eh_initialized = true;
196 }
197
198 /*
199 * Check previous parameters. If it's not required to reinit, just
200 * return.
201 *
202 * Note that the num_rx_desc is currently fixed value. It's never
203 * changed after device is attached.
204 */
205 if ((rxr->last_rx_mbuf_sz == rxr->mbuf_sz)
206 && (rxr->last_num_rx_desc == adapter->num_rx_desc))
207 return;
208
209 /* Free all dmamem */
210 ixgbe_jcl_freeall(adapter, rxr);
211
212 for (i = 0; i < nbuf; i++) {
213 if ((em = ixgbe_newext(eh, dmat, size)) == NULL) {
214 device_printf(adapter->dev,
215 "%s: only %d of %d jumbo buffers allocated\n",
216 __func__, i, nbuf);
217 break;
218 }
219 ixgbe_putext(em);
220 }
221
222 /* Keep current parameters */
223 rxr->last_rx_mbuf_sz = adapter->rx_mbuf_sz;
224 rxr->last_num_rx_desc = adapter->num_rx_desc;
225 }
226
227 void
228 ixgbe_jcl_destroy(struct adapter *adapter, struct rx_ring *rxr)
229 {
230 ixgbe_extmem_head_t *eh = &rxr->jcl_head;
231
232 if (eh->eh_initialized) {
233 /* Free all dmamem */
234 ixgbe_jcl_freeall(adapter, rxr);
235
236 mutex_destroy(&eh->eh_mtx);
237 eh->eh_initialized = false;
238 }
239 }
240
241
242 static void
243 ixgbe_jcl_free(struct mbuf *m, void *buf, size_t size, void *arg)
244 {
245 ixgbe_extmem_t *em = arg;
246
247 KASSERT(em->em_size == size);
248
249 ixgbe_putext(em);
250 /* this is an abstraction violation, but it does not lead to a
251 * double-free
252 */
253 if (__predict_true(m != NULL)) {
254 KASSERT(m->m_type != MT_FREE);
255 m->m_type = MT_FREE;
256 pool_cache_put(mb_cache, m);
257 }
258 }
259
260 /* XXX need to wait for the system to finish with each jumbo mbuf and
261 * free it before detaching the driver from the device.
262 */
263 struct mbuf *
264 ixgbe_getjcl(ixgbe_extmem_head_t *eh, int nowait /* M_DONTWAIT */,
265 int type /* MT_DATA */, int flags /* M_PKTHDR */, size_t size)
266 {
267 ixgbe_extmem_t *em;
268 struct mbuf *m;
269
270 if ((flags & M_PKTHDR) != 0)
271 m = m_gethdr(nowait, type);
272 else
273 m = m_get(nowait, type);
274
275 if (m == NULL)
276 return NULL;
277
278 em = ixgbe_getext(eh, size);
279 if (em == NULL) {
280 m_freem(m);
281 return NULL;
282 }
283
284 MEXTADD(m, em->em_vaddr, em->em_size, M_DEVBUF, &ixgbe_jcl_free, em);
285
286 if ((m->m_flags & M_EXT) == 0) {
287 ixgbe_putext(em);
288 m_freem(m);
289 return NULL;
290 }
291
292 return m;
293 }
294
295 void
296 ixgbe_pci_enable_busmaster(pci_chipset_tag_t pc, pcitag_t tag)
297 {
298 pcireg_t pci_cmd_word;
299
300 pci_cmd_word = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
301 if (!(pci_cmd_word & PCI_COMMAND_MASTER_ENABLE)) {
302 pci_cmd_word |= PCI_COMMAND_MASTER_ENABLE;
303 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, pci_cmd_word);
304 }
305 }
306
307 u_int
308 atomic_load_acq_uint(volatile u_int *p)
309 {
310 return atomic_load_acquire(p);
311 }
312
313 void
314 ixgbe_delay(unsigned int us)
315 {
316
317 if (__predict_false(cold))
318 delay(us);
319 else if ((us / 1000) >= hztoms(1)) {
320 /*
321 * Wait at least two clock ticks so we know the time has
322 * passed.
323 */
324 kpause("ixgdly", false, mstohz(us / 1000) + 1, NULL);
325 } else
326 delay(us);
327 }
328