nouveau_dma.c revision 1.1.1.2 1 /* $NetBSD: nouveau_dma.c,v 1.1.1.2 2014/08/06 12:36:23 riastradh Exp $ */
2
3 /*
4 * Copyright (C) 2007 Ben Skeggs.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining
8 * a copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sublicense, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial
17 * portions of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
23 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
24 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: nouveau_dma.c,v 1.1.1.2 2014/08/06 12:36:23 riastradh Exp $");
31
32 #include <core/client.h>
33
34 #include "nouveau_drm.h"
35 #include "nouveau_dma.h"
36
37 void
38 OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
39 {
40 bool is_iomem;
41 u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem);
42 mem = &mem[chan->dma.cur];
43 if (is_iomem)
44 memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
45 else
46 memcpy(mem, data, nr_dwords * 4);
47 chan->dma.cur += nr_dwords;
48 }
49
50 /* Fetch and adjust GPU GET pointer
51 *
52 * Returns:
53 * value >= 0, the adjusted GET pointer
54 * -EINVAL if GET pointer currently outside main push buffer
55 * -EBUSY if timeout exceeded
56 */
57 static inline int
58 READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
59 {
60 uint64_t val;
61
62 val = nv_ro32(chan->object, chan->user_get);
63 if (chan->user_get_hi)
64 val |= (uint64_t)nv_ro32(chan->object, chan->user_get_hi) << 32;
65
66 /* reset counter as long as GET is still advancing, this is
67 * to avoid misdetecting a GPU lockup if the GPU happens to
68 * just be processing an operation that takes a long time
69 */
70 if (val != *prev_get) {
71 *prev_get = val;
72 *timeout = 0;
73 }
74
75 if ((++*timeout & 0xff) == 0) {
76 udelay(1);
77 if (*timeout > 100000)
78 return -EBUSY;
79 }
80
81 if (val < chan->push.vma.offset ||
82 val > chan->push.vma.offset + (chan->dma.max << 2))
83 return -EINVAL;
84
85 return (val - chan->push.vma.offset) >> 2;
86 }
87
88 void
89 nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
90 int delta, int length)
91 {
92 struct nouveau_bo *pb = chan->push.buffer;
93 struct nouveau_vma *vma;
94 int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
95 u64 offset;
96
97 vma = nouveau_bo_vma_find(bo, nv_client(chan->cli)->vm);
98 BUG_ON(!vma);
99 offset = vma->offset + delta;
100
101 BUG_ON(chan->dma.ib_free < 1);
102
103 nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
104 nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
105
106 chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
107
108 mb();
109 /* Flush writes. */
110 nouveau_bo_rd32(pb, 0);
111
112 nv_wo32(chan->object, 0x8c, chan->dma.ib_put);
113 chan->dma.ib_free--;
114 }
115
116 static int
117 nv50_dma_push_wait(struct nouveau_channel *chan, int count)
118 {
119 uint32_t cnt = 0, prev_get = 0;
120
121 while (chan->dma.ib_free < count) {
122 uint32_t get = nv_ro32(chan->object, 0x88);
123 if (get != prev_get) {
124 prev_get = get;
125 cnt = 0;
126 }
127
128 if ((++cnt & 0xff) == 0) {
129 DRM_UDELAY(1);
130 if (cnt > 100000)
131 return -EBUSY;
132 }
133
134 chan->dma.ib_free = get - chan->dma.ib_put;
135 if (chan->dma.ib_free <= 0)
136 chan->dma.ib_free += chan->dma.ib_max;
137 }
138
139 return 0;
140 }
141
142 static int
143 nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
144 {
145 uint64_t prev_get = 0;
146 int ret, cnt = 0;
147
148 ret = nv50_dma_push_wait(chan, slots + 1);
149 if (unlikely(ret))
150 return ret;
151
152 while (chan->dma.free < count) {
153 int get = READ_GET(chan, &prev_get, &cnt);
154 if (unlikely(get < 0)) {
155 if (get == -EINVAL)
156 continue;
157
158 return get;
159 }
160
161 if (get <= chan->dma.cur) {
162 chan->dma.free = chan->dma.max - chan->dma.cur;
163 if (chan->dma.free >= count)
164 break;
165
166 FIRE_RING(chan);
167 do {
168 get = READ_GET(chan, &prev_get, &cnt);
169 if (unlikely(get < 0)) {
170 if (get == -EINVAL)
171 continue;
172 return get;
173 }
174 } while (get == 0);
175 chan->dma.cur = 0;
176 chan->dma.put = 0;
177 }
178
179 chan->dma.free = get - chan->dma.cur - 1;
180 }
181
182 return 0;
183 }
184
185 int
186 nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
187 {
188 uint64_t prev_get = 0;
189 int cnt = 0, get;
190
191 if (chan->dma.ib_max)
192 return nv50_dma_wait(chan, slots, size);
193
194 while (chan->dma.free < size) {
195 get = READ_GET(chan, &prev_get, &cnt);
196 if (unlikely(get == -EBUSY))
197 return -EBUSY;
198
199 /* loop until we have a usable GET pointer. the value
200 * we read from the GPU may be outside the main ring if
201 * PFIFO is processing a buffer called from the main ring,
202 * discard these values until something sensible is seen.
203 *
204 * the other case we discard GET is while the GPU is fetching
205 * from the SKIPS area, so the code below doesn't have to deal
206 * with some fun corner cases.
207 */
208 if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS)
209 continue;
210
211 if (get <= chan->dma.cur) {
212 /* engine is fetching behind us, or is completely
213 * idle (GET == PUT) so we have free space up until
214 * the end of the push buffer
215 *
216 * we can only hit that path once per call due to
217 * looping back to the beginning of the push buffer,
218 * we'll hit the fetching-ahead-of-us path from that
219 * point on.
220 *
221 * the *one* exception to that rule is if we read
222 * GET==PUT, in which case the below conditional will
223 * always succeed and break us out of the wait loop.
224 */
225 chan->dma.free = chan->dma.max - chan->dma.cur;
226 if (chan->dma.free >= size)
227 break;
228
229 /* not enough space left at the end of the push buffer,
230 * instruct the GPU to jump back to the start right
231 * after processing the currently pending commands.
232 */
233 OUT_RING(chan, chan->push.vma.offset | 0x20000000);
234
235 /* wait for GET to depart from the skips area.
236 * prevents writing GET==PUT and causing a race
237 * condition that causes us to think the GPU is
238 * idle when it's not.
239 */
240 do {
241 get = READ_GET(chan, &prev_get, &cnt);
242 if (unlikely(get == -EBUSY))
243 return -EBUSY;
244 if (unlikely(get == -EINVAL))
245 continue;
246 } while (get <= NOUVEAU_DMA_SKIPS);
247 WRITE_PUT(NOUVEAU_DMA_SKIPS);
248
249 /* we're now submitting commands at the start of
250 * the push buffer.
251 */
252 chan->dma.cur =
253 chan->dma.put = NOUVEAU_DMA_SKIPS;
254 }
255
256 /* engine fetching ahead of us, we have space up until the
257 * current GET pointer. the "- 1" is to ensure there's
258 * space left to emit a jump back to the beginning of the
259 * push buffer if we require it. we can never get GET == PUT
260 * here, so this is safe.
261 */
262 chan->dma.free = get - chan->dma.cur - 1;
263 }
264
265 return 0;
266 }
267
268