savage_bci.c revision 1.1.1.3 1 /* $NetBSD: savage_bci.c,v 1.1.1.3 2018/08/27 01:34:59 riastradh Exp $ */
2
3 /* savage_bci.c -- BCI support for Savage
4 *
5 * Copyright 2004 Felix Kuehling
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sub license,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
20 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
22 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
24 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
25 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: savage_bci.c,v 1.1.1.3 2018/08/27 01:34:59 riastradh Exp $");
29
30 #include <drm/drmP.h>
31 #include <drm/savage_drm.h>
32 #include "savage_drv.h"
33
34 /* Need a long timeout for shadow status updates can take a while
35 * and so can waiting for events when the queue is full. */
36 #define SAVAGE_DEFAULT_USEC_TIMEOUT 1000000 /* 1s */
37 #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
38 #define SAVAGE_FREELIST_DEBUG 0
39
40 static int savage_do_cleanup_bci(struct drm_device *dev);
41
42 static int
43 savage_bci_wait_fifo_shadow(drm_savage_private_t * dev_priv, unsigned int n)
44 {
45 uint32_t mask = dev_priv->status_used_mask;
46 uint32_t threshold = dev_priv->bci_threshold_hi;
47 uint32_t status;
48 int i;
49
50 #if SAVAGE_BCI_DEBUG
51 if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
52 DRM_ERROR("Trying to emit %d words "
53 "(more than guaranteed space in COB)\n", n);
54 #endif
55
56 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
57 mb();
58 status = dev_priv->status_ptr[0];
59 if ((status & mask) < threshold)
60 return 0;
61 DRM_UDELAY(1);
62 }
63
64 #if SAVAGE_BCI_DEBUG
65 DRM_ERROR("failed!\n");
66 DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
67 #endif
68 return -EBUSY;
69 }
70
71 static int
72 savage_bci_wait_fifo_s3d(drm_savage_private_t * dev_priv, unsigned int n)
73 {
74 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
75 uint32_t status;
76 int i;
77
78 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
79 status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
80 if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
81 return 0;
82 DRM_UDELAY(1);
83 }
84
85 #if SAVAGE_BCI_DEBUG
86 DRM_ERROR("failed!\n");
87 DRM_INFO(" status=0x%08x\n", status);
88 #endif
89 return -EBUSY;
90 }
91
92 static int
93 savage_bci_wait_fifo_s4(drm_savage_private_t * dev_priv, unsigned int n)
94 {
95 uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
96 uint32_t status;
97 int i;
98
99 for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
100 status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
101 if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
102 return 0;
103 DRM_UDELAY(1);
104 }
105
106 #if SAVAGE_BCI_DEBUG
107 DRM_ERROR("failed!\n");
108 DRM_INFO(" status=0x%08x\n", status);
109 #endif
110 return -EBUSY;
111 }
112
113 /*
114 * Waiting for events.
115 *
116 * The BIOSresets the event tag to 0 on mode changes. Therefore we
117 * never emit 0 to the event tag. If we find a 0 event tag we know the
118 * BIOS stomped on it and return success assuming that the BIOS waited
119 * for engine idle.
120 *
121 * Note: if the Xserver uses the event tag it has to follow the same
122 * rule. Otherwise there may be glitches every 2^16 events.
123 */
124 static int
125 savage_bci_wait_event_shadow(drm_savage_private_t * dev_priv, uint16_t e)
126 {
127 uint32_t status;
128 int i;
129
130 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
131 mb();
132 status = dev_priv->status_ptr[1];
133 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
134 (status & 0xffff) == 0)
135 return 0;
136 DRM_UDELAY(1);
137 }
138
139 #if SAVAGE_BCI_DEBUG
140 DRM_ERROR("failed!\n");
141 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
142 #endif
143
144 return -EBUSY;
145 }
146
147 static int
148 savage_bci_wait_event_reg(drm_savage_private_t * dev_priv, uint16_t e)
149 {
150 uint32_t status;
151 int i;
152
153 for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
154 status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
155 if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
156 (status & 0xffff) == 0)
157 return 0;
158 DRM_UDELAY(1);
159 }
160
161 #if SAVAGE_BCI_DEBUG
162 DRM_ERROR("failed!\n");
163 DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
164 #endif
165
166 return -EBUSY;
167 }
168
169 uint16_t savage_bci_emit_event(drm_savage_private_t * dev_priv,
170 unsigned int flags)
171 {
172 uint16_t count;
173 BCI_LOCALS;
174
175 if (dev_priv->status_ptr) {
176 /* coordinate with Xserver */
177 count = dev_priv->status_ptr[1023];
178 if (count < dev_priv->event_counter)
179 dev_priv->event_wrap++;
180 } else {
181 count = dev_priv->event_counter;
182 }
183 count = (count + 1) & 0xffff;
184 if (count == 0) {
185 count++; /* See the comment above savage_wait_event_*. */
186 dev_priv->event_wrap++;
187 }
188 dev_priv->event_counter = count;
189 if (dev_priv->status_ptr)
190 dev_priv->status_ptr[1023] = (uint32_t) count;
191
192 if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
193 unsigned int wait_cmd = BCI_CMD_WAIT;
194 if ((flags & SAVAGE_WAIT_2D))
195 wait_cmd |= BCI_CMD_WAIT_2D;
196 if ((flags & SAVAGE_WAIT_3D))
197 wait_cmd |= BCI_CMD_WAIT_3D;
198 BEGIN_BCI(2);
199 BCI_WRITE(wait_cmd);
200 } else {
201 BEGIN_BCI(1);
202 }
203 BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t) count);
204
205 return count;
206 }
207
208 /*
209 * Freelist management
210 */
211 static int savage_freelist_init(struct drm_device * dev)
212 {
213 drm_savage_private_t *dev_priv = dev->dev_private;
214 struct drm_device_dma *dma = dev->dma;
215 struct drm_buf *buf;
216 drm_savage_buf_priv_t *entry;
217 int i;
218 DRM_DEBUG("count=%d\n", dma->buf_count);
219
220 dev_priv->head.next = &dev_priv->tail;
221 dev_priv->head.prev = NULL;
222 dev_priv->head.buf = NULL;
223
224 dev_priv->tail.next = NULL;
225 dev_priv->tail.prev = &dev_priv->head;
226 dev_priv->tail.buf = NULL;
227
228 for (i = 0; i < dma->buf_count; i++) {
229 buf = dma->buflist[i];
230 entry = buf->dev_private;
231
232 SET_AGE(&entry->age, 0, 0);
233 entry->buf = buf;
234
235 entry->next = dev_priv->head.next;
236 entry->prev = &dev_priv->head;
237 dev_priv->head.next->prev = entry;
238 dev_priv->head.next = entry;
239 }
240
241 return 0;
242 }
243
244 static struct drm_buf *savage_freelist_get(struct drm_device * dev)
245 {
246 drm_savage_private_t *dev_priv = dev->dev_private;
247 drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
248 uint16_t event;
249 unsigned int wrap;
250 DRM_DEBUG("\n");
251
252 UPDATE_EVENT_COUNTER();
253 if (dev_priv->status_ptr)
254 event = dev_priv->status_ptr[1] & 0xffff;
255 else
256 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
257 wrap = dev_priv->event_wrap;
258 if (event > dev_priv->event_counter)
259 wrap--; /* hardware hasn't passed the last wrap yet */
260
261 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
262 DRM_DEBUG(" head=0x%04x %d\n", event, wrap);
263
264 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
265 drm_savage_buf_priv_t *next = tail->next;
266 drm_savage_buf_priv_t *prev = tail->prev;
267 prev->next = next;
268 next->prev = prev;
269 tail->next = tail->prev = NULL;
270 return tail->buf;
271 }
272
273 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
274 return NULL;
275 }
276
277 void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf)
278 {
279 drm_savage_private_t *dev_priv = dev->dev_private;
280 drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
281
282 DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
283
284 if (entry->next != NULL || entry->prev != NULL) {
285 DRM_ERROR("entry already on freelist.\n");
286 return;
287 }
288
289 prev = &dev_priv->head;
290 next = prev->next;
291 prev->next = entry;
292 next->prev = entry;
293 entry->prev = prev;
294 entry->next = next;
295 }
296
297 /*
298 * Command DMA
299 */
300 static int savage_dma_init(drm_savage_private_t * dev_priv)
301 {
302 unsigned int i;
303
304 dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
305 (SAVAGE_DMA_PAGE_SIZE * 4);
306 dev_priv->dma_pages = kmalloc(sizeof(drm_savage_dma_page_t) *
307 dev_priv->nr_dma_pages, GFP_KERNEL);
308 if (dev_priv->dma_pages == NULL)
309 return -ENOMEM;
310
311 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
312 SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
313 dev_priv->dma_pages[i].used = 0;
314 dev_priv->dma_pages[i].flushed = 0;
315 }
316 SET_AGE(&dev_priv->last_dma_age, 0, 0);
317
318 dev_priv->first_dma_page = 0;
319 dev_priv->current_dma_page = 0;
320
321 return 0;
322 }
323
324 void savage_dma_reset(drm_savage_private_t * dev_priv)
325 {
326 uint16_t event;
327 unsigned int wrap, i;
328 event = savage_bci_emit_event(dev_priv, 0);
329 wrap = dev_priv->event_wrap;
330 for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
331 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
332 dev_priv->dma_pages[i].used = 0;
333 dev_priv->dma_pages[i].flushed = 0;
334 }
335 SET_AGE(&dev_priv->last_dma_age, event, wrap);
336 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
337 }
338
339 void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page)
340 {
341 uint16_t event;
342 unsigned int wrap;
343
344 /* Faked DMA buffer pages don't age. */
345 if (dev_priv->cmd_dma == &dev_priv->fake_dma)
346 return;
347
348 UPDATE_EVENT_COUNTER();
349 if (dev_priv->status_ptr)
350 event = dev_priv->status_ptr[1] & 0xffff;
351 else
352 event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
353 wrap = dev_priv->event_wrap;
354 if (event > dev_priv->event_counter)
355 wrap--; /* hardware hasn't passed the last wrap yet */
356
357 if (dev_priv->dma_pages[page].age.wrap > wrap ||
358 (dev_priv->dma_pages[page].age.wrap == wrap &&
359 dev_priv->dma_pages[page].age.event > event)) {
360 if (dev_priv->wait_evnt(dev_priv,
361 dev_priv->dma_pages[page].age.event)
362 < 0)
363 DRM_ERROR("wait_evnt failed!\n");
364 }
365 }
366
367 uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv, unsigned int n)
368 {
369 unsigned int cur = dev_priv->current_dma_page;
370 unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
371 dev_priv->dma_pages[cur].used;
372 unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /
373 SAVAGE_DMA_PAGE_SIZE;
374 uint32_t *dma_ptr;
375 unsigned int i;
376
377 DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
378 cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
379
380 if (cur + nr_pages < dev_priv->nr_dma_pages) {
381 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
382 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
383 if (n < rest)
384 rest = n;
385 dev_priv->dma_pages[cur].used += rest;
386 n -= rest;
387 cur++;
388 } else {
389 dev_priv->dma_flush(dev_priv);
390 nr_pages =
391 (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;
392 for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
393 dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
394 dev_priv->dma_pages[i].used = 0;
395 dev_priv->dma_pages[i].flushed = 0;
396 }
397 dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle;
398 dev_priv->first_dma_page = cur = 0;
399 }
400 for (i = cur; nr_pages > 0; ++i, --nr_pages) {
401 #if SAVAGE_DMA_DEBUG
402 if (dev_priv->dma_pages[i].used) {
403 DRM_ERROR("unflushed page %u: used=%u\n",
404 i, dev_priv->dma_pages[i].used);
405 }
406 #endif
407 if (n > SAVAGE_DMA_PAGE_SIZE)
408 dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
409 else
410 dev_priv->dma_pages[i].used = n;
411 n -= SAVAGE_DMA_PAGE_SIZE;
412 }
413 dev_priv->current_dma_page = --i;
414
415 DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
416 i, dev_priv->dma_pages[i].used, n);
417
418 savage_dma_wait(dev_priv, dev_priv->current_dma_page);
419
420 return dma_ptr;
421 }
422
423 static void savage_dma_flush(drm_savage_private_t * dev_priv)
424 {
425 unsigned int first = dev_priv->first_dma_page;
426 unsigned int cur = dev_priv->current_dma_page;
427 uint16_t event;
428 unsigned int wrap, pad, align, len, i;
429 unsigned long phys_addr;
430 BCI_LOCALS;
431
432 if (first == cur &&
433 dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
434 return;
435
436 /* pad length to multiples of 2 entries
437 * align start of next DMA block to multiles of 8 entries */
438 pad = -dev_priv->dma_pages[cur].used & 1;
439 align = -(dev_priv->dma_pages[cur].used + pad) & 7;
440
441 DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
442 "pad=%u, align=%u\n",
443 first, cur, dev_priv->dma_pages[first].flushed,
444 dev_priv->dma_pages[cur].used, pad, align);
445
446 /* pad with noops */
447 if (pad) {
448 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
449 cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;
450 dev_priv->dma_pages[cur].used += pad;
451 while (pad != 0) {
452 *dma_ptr++ = BCI_CMD_WAIT;
453 pad--;
454 }
455 }
456
457 mb();
458
459 /* do flush ... */
460 phys_addr = dev_priv->cmd_dma->offset +
461 (first * SAVAGE_DMA_PAGE_SIZE +
462 dev_priv->dma_pages[first].flushed) * 4;
463 len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
464 dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed;
465
466 DRM_DEBUG("phys_addr=%lx, len=%u\n",
467 phys_addr | dev_priv->dma_type, len);
468
469 BEGIN_BCI(3);
470 BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
471 BCI_WRITE(phys_addr | dev_priv->dma_type);
472 BCI_DMA(len);
473
474 /* fix alignment of the start of the next block */
475 dev_priv->dma_pages[cur].used += align;
476
477 /* age DMA pages */
478 event = savage_bci_emit_event(dev_priv, 0);
479 wrap = dev_priv->event_wrap;
480 for (i = first; i < cur; ++i) {
481 SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
482 dev_priv->dma_pages[i].used = 0;
483 dev_priv->dma_pages[i].flushed = 0;
484 }
485 /* age the current page only when it's full */
486 if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
487 SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
488 dev_priv->dma_pages[cur].used = 0;
489 dev_priv->dma_pages[cur].flushed = 0;
490 /* advance to next page */
491 cur++;
492 if (cur == dev_priv->nr_dma_pages)
493 cur = 0;
494 dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
495 } else {
496 dev_priv->first_dma_page = cur;
497 dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
498 }
499 SET_AGE(&dev_priv->last_dma_age, event, wrap);
500
501 DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
502 dev_priv->dma_pages[cur].used,
503 dev_priv->dma_pages[cur].flushed);
504 }
505
506 static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
507 {
508 unsigned int i, j;
509 BCI_LOCALS;
510
511 if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
512 dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
513 return;
514
515 DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
516 dev_priv->first_dma_page, dev_priv->current_dma_page,
517 dev_priv->dma_pages[dev_priv->current_dma_page].used);
518
519 for (i = dev_priv->first_dma_page;
520 i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
521 ++i) {
522 uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle +
523 i * SAVAGE_DMA_PAGE_SIZE;
524 #if SAVAGE_DMA_DEBUG
525 /* Sanity check: all pages except the last one must be full. */
526 if (i < dev_priv->current_dma_page &&
527 dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
528 DRM_ERROR("partial DMA page %u: used=%u",
529 i, dev_priv->dma_pages[i].used);
530 }
531 #endif
532 BEGIN_BCI(dev_priv->dma_pages[i].used);
533 for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
534 BCI_WRITE(dma_ptr[j]);
535 }
536 dev_priv->dma_pages[i].used = 0;
537 }
538
539 /* reset to first page */
540 dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
541 }
542
543 int savage_driver_load(struct drm_device *dev, unsigned long chipset)
544 {
545 drm_savage_private_t *dev_priv;
546
547 dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
548 if (dev_priv == NULL)
549 return -ENOMEM;
550
551 dev->dev_private = (void *)dev_priv;
552
553 dev_priv->chipset = (enum savage_family)chipset;
554
555 pci_set_master(dev->pdev);
556
557 return 0;
558 }
559
560
561 /*
562 * Initialize mappings. On Savage4 and SavageIX the alignment
563 * and size of the aperture is not suitable for automatic MTRR setup
564 * in drm_legacy_addmap. Therefore we add them manually before the maps are
565 * initialized, and tear them down on last close.
566 */
567 int savage_driver_firstopen(struct drm_device *dev)
568 {
569 drm_savage_private_t *dev_priv = dev->dev_private;
570 unsigned long mmio_base, fb_base, fb_size, aperture_base;
571 /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
572 * in case we decide we need information on the BAR for BSD in the
573 * future.
574 */
575 unsigned int fb_rsrc, aper_rsrc;
576 int ret = 0;
577
578 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
579 fb_rsrc = 0;
580 fb_base = pci_resource_start(dev->pdev, 0);
581 fb_size = SAVAGE_FB_SIZE_S3;
582 mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
583 aper_rsrc = 0;
584 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
585 /* this should always be true */
586 if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
587 /* Don't make MMIO write-cobining! We need 3
588 * MTRRs. */
589 dev_priv->mtrr_handles[0] =
590 arch_phys_wc_add(fb_base, 0x01000000);
591 dev_priv->mtrr_handles[1] =
592 arch_phys_wc_add(fb_base + 0x02000000,
593 0x02000000);
594 dev_priv->mtrr_handles[2] =
595 arch_phys_wc_add(fb_base + 0x04000000,
596 0x04000000);
597 } else {
598 DRM_ERROR("strange pci_resource_len %08llx\n",
599 (unsigned long long)
600 pci_resource_len(dev->pdev, 0));
601 }
602 } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
603 dev_priv->chipset != S3_SAVAGE2000) {
604 mmio_base = pci_resource_start(dev->pdev, 0);
605 fb_rsrc = 1;
606 fb_base = pci_resource_start(dev->pdev, 1);
607 fb_size = SAVAGE_FB_SIZE_S4;
608 aper_rsrc = 1;
609 aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
610 /* this should always be true */
611 if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
612 /* Can use one MTRR to cover both fb and
613 * aperture. */
614 dev_priv->mtrr_handles[0] =
615 arch_phys_wc_add(fb_base,
616 0x08000000);
617 } else {
618 DRM_ERROR("strange pci_resource_len %08llx\n",
619 (unsigned long long)
620 pci_resource_len(dev->pdev, 1));
621 }
622 } else {
623 mmio_base = pci_resource_start(dev->pdev, 0);
624 fb_rsrc = 1;
625 fb_base = pci_resource_start(dev->pdev, 1);
626 fb_size = pci_resource_len(dev->pdev, 1);
627 aper_rsrc = 2;
628 aperture_base = pci_resource_start(dev->pdev, 2);
629 /* Automatic MTRR setup will do the right thing. */
630 }
631
632 ret = drm_legacy_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE,
633 _DRM_REGISTERS, _DRM_READ_ONLY,
634 &dev_priv->mmio);
635 if (ret)
636 return ret;
637
638 ret = drm_legacy_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
639 _DRM_WRITE_COMBINING, &dev_priv->fb);
640 if (ret)
641 return ret;
642
643 ret = drm_legacy_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
644 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
645 &dev_priv->aperture);
646 return ret;
647 }
648
649 /*
650 * Delete MTRRs and free device-private data.
651 */
652 void savage_driver_lastclose(struct drm_device *dev)
653 {
654 drm_savage_private_t *dev_priv = dev->dev_private;
655 int i;
656
657 for (i = 0; i < 3; ++i) {
658 arch_phys_wc_del(dev_priv->mtrr_handles[i]);
659 dev_priv->mtrr_handles[i] = 0;
660 }
661 }
662
663 int savage_driver_unload(struct drm_device *dev)
664 {
665 drm_savage_private_t *dev_priv = dev->dev_private;
666
667 kfree(dev_priv);
668
669 return 0;
670 }
671
672 static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init)
673 {
674 drm_savage_private_t *dev_priv = dev->dev_private;
675
676 if (init->fb_bpp != 16 && init->fb_bpp != 32) {
677 DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
678 return -EINVAL;
679 }
680 if (init->depth_bpp != 16 && init->depth_bpp != 32) {
681 DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
682 return -EINVAL;
683 }
684 if (init->dma_type != SAVAGE_DMA_AGP &&
685 init->dma_type != SAVAGE_DMA_PCI) {
686 DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
687 return -EINVAL;
688 }
689
690 dev_priv->cob_size = init->cob_size;
691 dev_priv->bci_threshold_lo = init->bci_threshold_lo;
692 dev_priv->bci_threshold_hi = init->bci_threshold_hi;
693 dev_priv->dma_type = init->dma_type;
694
695 dev_priv->fb_bpp = init->fb_bpp;
696 dev_priv->front_offset = init->front_offset;
697 dev_priv->front_pitch = init->front_pitch;
698 dev_priv->back_offset = init->back_offset;
699 dev_priv->back_pitch = init->back_pitch;
700 dev_priv->depth_bpp = init->depth_bpp;
701 dev_priv->depth_offset = init->depth_offset;
702 dev_priv->depth_pitch = init->depth_pitch;
703
704 dev_priv->texture_offset = init->texture_offset;
705 dev_priv->texture_size = init->texture_size;
706
707 dev_priv->sarea = drm_legacy_getsarea(dev);
708 if (!dev_priv->sarea) {
709 DRM_ERROR("could not find sarea!\n");
710 savage_do_cleanup_bci(dev);
711 return -EINVAL;
712 }
713 if (init->status_offset != 0) {
714 dev_priv->status = drm_legacy_findmap(dev, init->status_offset);
715 if (!dev_priv->status) {
716 DRM_ERROR("could not find shadow status region!\n");
717 savage_do_cleanup_bci(dev);
718 return -EINVAL;
719 }
720 } else {
721 dev_priv->status = NULL;
722 }
723 if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
724 dev->agp_buffer_token = init->buffers_offset;
725 dev->agp_buffer_map = drm_legacy_findmap(dev,
726 init->buffers_offset);
727 if (!dev->agp_buffer_map) {
728 DRM_ERROR("could not find DMA buffer region!\n");
729 savage_do_cleanup_bci(dev);
730 return -EINVAL;
731 }
732 drm_legacy_ioremap(dev->agp_buffer_map, dev);
733 if (!dev->agp_buffer_map->handle) {
734 DRM_ERROR("failed to ioremap DMA buffer region!\n");
735 savage_do_cleanup_bci(dev);
736 return -ENOMEM;
737 }
738 }
739 if (init->agp_textures_offset) {
740 dev_priv->agp_textures =
741 drm_legacy_findmap(dev, init->agp_textures_offset);
742 if (!dev_priv->agp_textures) {
743 DRM_ERROR("could not find agp texture region!\n");
744 savage_do_cleanup_bci(dev);
745 return -EINVAL;
746 }
747 } else {
748 dev_priv->agp_textures = NULL;
749 }
750
751 if (init->cmd_dma_offset) {
752 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
753 DRM_ERROR("command DMA not supported on "
754 "Savage3D/MX/IX.\n");
755 savage_do_cleanup_bci(dev);
756 return -EINVAL;
757 }
758 if (dev->dma && dev->dma->buflist) {
759 DRM_ERROR("command and vertex DMA not supported "
760 "at the same time.\n");
761 savage_do_cleanup_bci(dev);
762 return -EINVAL;
763 }
764 dev_priv->cmd_dma = drm_legacy_findmap(dev, init->cmd_dma_offset);
765 if (!dev_priv->cmd_dma) {
766 DRM_ERROR("could not find command DMA region!\n");
767 savage_do_cleanup_bci(dev);
768 return -EINVAL;
769 }
770 if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
771 if (dev_priv->cmd_dma->type != _DRM_AGP) {
772 DRM_ERROR("AGP command DMA region is not a "
773 "_DRM_AGP map!\n");
774 savage_do_cleanup_bci(dev);
775 return -EINVAL;
776 }
777 drm_legacy_ioremap(dev_priv->cmd_dma, dev);
778 if (!dev_priv->cmd_dma->handle) {
779 DRM_ERROR("failed to ioremap command "
780 "DMA region!\n");
781 savage_do_cleanup_bci(dev);
782 return -ENOMEM;
783 }
784 } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
785 DRM_ERROR("PCI command DMA region is not a "
786 "_DRM_CONSISTENT map!\n");
787 savage_do_cleanup_bci(dev);
788 return -EINVAL;
789 }
790 } else {
791 dev_priv->cmd_dma = NULL;
792 }
793
794 dev_priv->dma_flush = savage_dma_flush;
795 if (!dev_priv->cmd_dma) {
796 DRM_DEBUG("falling back to faked command DMA.\n");
797 dev_priv->fake_dma.offset = 0;
798 dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
799 dev_priv->fake_dma.type = _DRM_SHM;
800 dev_priv->fake_dma.handle = kmalloc(SAVAGE_FAKE_DMA_SIZE,
801 GFP_KERNEL);
802 if (!dev_priv->fake_dma.handle) {
803 DRM_ERROR("could not allocate faked DMA buffer!\n");
804 savage_do_cleanup_bci(dev);
805 return -ENOMEM;
806 }
807 dev_priv->cmd_dma = &dev_priv->fake_dma;
808 dev_priv->dma_flush = savage_fake_dma_flush;
809 }
810
811 dev_priv->sarea_priv =
812 (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle +
813 init->sarea_priv_offset);
814
815 /* setup bitmap descriptors */
816 {
817 unsigned int color_tile_format;
818 unsigned int depth_tile_format;
819 unsigned int front_stride, back_stride, depth_stride;
820 if (dev_priv->chipset <= S3_SAVAGE4) {
821 color_tile_format = dev_priv->fb_bpp == 16 ?
822 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
823 depth_tile_format = dev_priv->depth_bpp == 16 ?
824 SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
825 } else {
826 color_tile_format = SAVAGE_BD_TILE_DEST;
827 depth_tile_format = SAVAGE_BD_TILE_DEST;
828 }
829 front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8);
830 back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8);
831 depth_stride =
832 dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);
833
834 dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
835 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
836 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
837
838 dev_priv->back_bd = back_stride | SAVAGE_BD_BW_DISABLE |
839 (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
840 (color_tile_format << SAVAGE_BD_TILE_SHIFT);
841
842 dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
843 (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
844 (depth_tile_format << SAVAGE_BD_TILE_SHIFT);
845 }
846
847 /* setup status and bci ptr */
848 dev_priv->event_counter = 0;
849 dev_priv->event_wrap = 0;
850 dev_priv->bci_ptr = (volatile uint32_t *)
851 ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
852 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
853 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
854 } else {
855 dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
856 }
857 if (dev_priv->status != NULL) {
858 dev_priv->status_ptr =
859 (volatile uint32_t *)dev_priv->status->handle;
860 dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
861 dev_priv->wait_evnt = savage_bci_wait_event_shadow;
862 dev_priv->status_ptr[1023] = dev_priv->event_counter;
863 } else {
864 dev_priv->status_ptr = NULL;
865 if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
866 dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
867 } else {
868 dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
869 }
870 dev_priv->wait_evnt = savage_bci_wait_event_reg;
871 }
872
873 /* cliprect functions */
874 if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
875 dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
876 else
877 dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
878
879 if (savage_freelist_init(dev) < 0) {
880 DRM_ERROR("could not initialize freelist\n");
881 savage_do_cleanup_bci(dev);
882 return -ENOMEM;
883 }
884
885 if (savage_dma_init(dev_priv) < 0) {
886 DRM_ERROR("could not initialize command DMA\n");
887 savage_do_cleanup_bci(dev);
888 return -ENOMEM;
889 }
890
891 return 0;
892 }
893
894 static int savage_do_cleanup_bci(struct drm_device * dev)
895 {
896 drm_savage_private_t *dev_priv = dev->dev_private;
897
898 if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
899 kfree(dev_priv->fake_dma.handle);
900 } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
901 dev_priv->cmd_dma->type == _DRM_AGP &&
902 dev_priv->dma_type == SAVAGE_DMA_AGP)
903 drm_legacy_ioremapfree(dev_priv->cmd_dma, dev);
904
905 if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
906 dev->agp_buffer_map && dev->agp_buffer_map->handle) {
907 drm_legacy_ioremapfree(dev->agp_buffer_map, dev);
908 /* make sure the next instance (which may be running
909 * in PCI mode) doesn't try to use an old
910 * agp_buffer_map. */
911 dev->agp_buffer_map = NULL;
912 }
913
914 kfree(dev_priv->dma_pages);
915
916 return 0;
917 }
918
919 static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
920 {
921 drm_savage_init_t *init = data;
922
923 LOCK_TEST_WITH_RETURN(dev, file_priv);
924
925 switch (init->func) {
926 case SAVAGE_INIT_BCI:
927 return savage_do_init_bci(dev, init);
928 case SAVAGE_CLEANUP_BCI:
929 return savage_do_cleanup_bci(dev);
930 }
931
932 return -EINVAL;
933 }
934
935 static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
936 {
937 drm_savage_private_t *dev_priv = dev->dev_private;
938 drm_savage_event_emit_t *event = data;
939
940 DRM_DEBUG("\n");
941
942 LOCK_TEST_WITH_RETURN(dev, file_priv);
943
944 event->count = savage_bci_emit_event(dev_priv, event->flags);
945 event->count |= dev_priv->event_wrap << 16;
946
947 return 0;
948 }
949
950 static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
951 {
952 drm_savage_private_t *dev_priv = dev->dev_private;
953 drm_savage_event_wait_t *event = data;
954 unsigned int event_e, hw_e;
955 unsigned int event_w, hw_w;
956
957 DRM_DEBUG("\n");
958
959 UPDATE_EVENT_COUNTER();
960 if (dev_priv->status_ptr)
961 hw_e = dev_priv->status_ptr[1] & 0xffff;
962 else
963 hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
964 hw_w = dev_priv->event_wrap;
965 if (hw_e > dev_priv->event_counter)
966 hw_w--; /* hardware hasn't passed the last wrap yet */
967
968 event_e = event->count & 0xffff;
969 event_w = event->count >> 16;
970
971 /* Don't need to wait if
972 * - event counter wrapped since the event was emitted or
973 * - the hardware has advanced up to or over the event to wait for.
974 */
975 if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))
976 return 0;
977 else
978 return dev_priv->wait_evnt(dev_priv, event_e);
979 }
980
981 /*
982 * DMA buffer management
983 */
984
985 static int savage_bci_get_buffers(struct drm_device *dev,
986 struct drm_file *file_priv,
987 struct drm_dma *d)
988 {
989 struct drm_buf *buf;
990 int i;
991
992 for (i = d->granted_count; i < d->request_count; i++) {
993 buf = savage_freelist_get(dev);
994 if (!buf)
995 return -EAGAIN;
996
997 buf->file_priv = file_priv;
998
999 if (copy_to_user(&d->request_indices[i],
1000 &buf->idx, sizeof(buf->idx)))
1001 return -EFAULT;
1002 if (copy_to_user(&d->request_sizes[i],
1003 &buf->total, sizeof(buf->total)))
1004 return -EFAULT;
1005
1006 d->granted_count++;
1007 }
1008 return 0;
1009 }
1010
1011 int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
1012 {
1013 struct drm_device_dma *dma = dev->dma;
1014 struct drm_dma *d = data;
1015 int ret = 0;
1016
1017 LOCK_TEST_WITH_RETURN(dev, file_priv);
1018
1019 /* Please don't send us buffers.
1020 */
1021 if (d->send_count != 0) {
1022 DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
1023 DRM_CURRENTPID, d->send_count);
1024 return -EINVAL;
1025 }
1026
1027 /* We'll send you buffers.
1028 */
1029 if (d->request_count < 0 || d->request_count > dma->buf_count) {
1030 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
1031 DRM_CURRENTPID, d->request_count, dma->buf_count);
1032 return -EINVAL;
1033 }
1034
1035 d->granted_count = 0;
1036
1037 if (d->request_count) {
1038 ret = savage_bci_get_buffers(dev, file_priv, d);
1039 }
1040
1041 return ret;
1042 }
1043
1044 void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
1045 {
1046 struct drm_device_dma *dma = dev->dma;
1047 drm_savage_private_t *dev_priv = dev->dev_private;
1048 int release_idlelock = 0;
1049 int i;
1050
1051 if (!dma)
1052 return;
1053 if (!dev_priv)
1054 return;
1055 if (!dma->buflist)
1056 return;
1057
1058 if (file_priv->master && file_priv->master->lock.hw_lock) {
1059 drm_legacy_idlelock_take(&file_priv->master->lock);
1060 release_idlelock = 1;
1061 }
1062
1063 for (i = 0; i < dma->buf_count; i++) {
1064 struct drm_buf *buf = dma->buflist[i];
1065 drm_savage_buf_priv_t *buf_priv = buf->dev_private;
1066
1067 if (buf->file_priv == file_priv && buf_priv &&
1068 buf_priv->next == NULL && buf_priv->prev == NULL) {
1069 uint16_t event;
1070 DRM_DEBUG("reclaimed from client\n");
1071 event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
1072 SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
1073 savage_freelist_put(dev, buf);
1074 }
1075 }
1076
1077 if (release_idlelock)
1078 drm_legacy_idlelock_release(&file_priv->master->lock);
1079 }
1080
1081 const struct drm_ioctl_desc savage_ioctls[] = {
1082 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1083 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
1084 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
1085 DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
1086 };
1087
1088 int savage_max_ioctl = ARRAY_SIZE(savage_ioctls);
1089