1/* -*- c-basic-offset: 4 -*- */
2/*
3 * Copyright © 2006 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 *    Eric Anholt <eric@anholt.net>
26 *
27 */
28
29#ifdef HAVE_CONFIG_H
30#include "config.h"
31#endif
32
33#include <assert.h>
34#include <stdlib.h>
35#include <errno.h>
36
37#include "xf86.h"
38#include "i830.h"
39#include "i830_ring.h"
40#include "i915_drm.h"
41
42static int
43intel_nondrm_exec(dri_bo *bo, unsigned int used, void *priv)
44{
45    ScrnInfoPtr pScrn = priv;
46    I830Ptr pI830 = I830PTR(pScrn);
47
48    BEGIN_LP_RING(4);
49    OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
50    OUT_RING(bo->offset);
51    OUT_RING(MI_NOOP);
52    OUT_RING(MI_NOOP);
53    ADVANCE_LP_RING();
54
55    return 0;
56}
57
58static int
59intel_nondrm_exec_i830(dri_bo *bo, unsigned int used, void *priv)
60{
61    ScrnInfoPtr pScrn = priv;
62    I830Ptr pI830 = I830PTR(pScrn);
63
64    BEGIN_LP_RING(4);
65    OUT_RING(MI_BATCH_BUFFER);
66    OUT_RING(bo->offset);
67    OUT_RING(bo->offset + pI830->batch_used - 4);
68    OUT_RING(MI_NOOP);
69    ADVANCE_LP_RING();
70
71    return 0;
72}
73
74/**
75 * Creates a fence value representing a request to be passed.
76 *
77 * Stub implementation that should be avoided when DRM functions are available.
78 */
79static unsigned int
80intel_nondrm_emit(void *priv)
81{
82    static unsigned int fence = 0;
83
84    /* Match DRM in not using half the range. The fake bufmgr relies on this. */
85    if (++fence >= 0x8000000)
86	fence = 1;
87
88    return fence;
89}
90
91/**
92 * Waits on a fence representing a request to be passed.
93 *
94 * Stub implementation that should be avoided when DRM functions are available.
95 */
96static void
97intel_nondrm_wait(unsigned int fence, void *priv)
98{
99    ScrnInfoPtr pScrn = priv;
100
101    i830_wait_ring_idle(pScrn);
102}
103
104static void
105intel_next_batch(ScrnInfoPtr pScrn)
106{
107    I830Ptr pI830 = I830PTR(pScrn);
108
109    /* The 865 has issues with larger-than-page-sized batch buffers. */
110    if (IS_I865G(pI830))
111	pI830->batch_bo = dri_bo_alloc(pI830->bufmgr, "batch", 4096, 4096);
112    else
113	pI830->batch_bo = dri_bo_alloc(pI830->bufmgr, "batch", 4096 * 4, 4096);
114
115    if (dri_bo_map(pI830->batch_bo, 1) != 0)
116	FatalError("Failed to map batchbuffer: %s\n", strerror(errno));
117
118    pI830->batch_used = 0;
119    pI830->batch_ptr = pI830->batch_bo->virtual;
120
121    /* If we are using DRI2, we don't know when another client has executed,
122     * so we have to reinitialize our 3D state per batch.
123     */
124    if (pI830->directRenderingType == DRI_DRI2)
125	pI830->last_3d = LAST_3D_OTHER;
126}
127
128void
129intel_batch_init(ScrnInfoPtr pScrn)
130{
131    I830Ptr pI830 = I830PTR(pScrn);
132
133    pI830->batch_emit_start = 0;
134    pI830->batch_emitting = 0;
135
136    intel_next_batch(pScrn);
137
138    if (!pI830->have_gem) {
139	if (IS_I830(pI830) || IS_845G(pI830)) {
140	    intel_bufmgr_fake_set_exec_callback(pI830->bufmgr,
141						intel_nondrm_exec_i830,
142						pScrn);
143	} else {
144	    intel_bufmgr_fake_set_exec_callback(pI830->bufmgr,
145						intel_nondrm_exec,
146						pScrn);
147	}
148	intel_bufmgr_fake_set_fence_callback(pI830->bufmgr,
149					     intel_nondrm_emit,
150					     intel_nondrm_wait,
151					     pScrn);
152    }
153}
154
155void
156intel_batch_teardown(ScrnInfoPtr pScrn)
157{
158    I830Ptr pI830 = I830PTR(pScrn);
159
160    if (pI830->batch_ptr != NULL) {
161	dri_bo_unmap(pI830->batch_bo);
162	pI830->batch_ptr = NULL;
163
164	dri_bo_unreference(pI830->batch_bo);
165	pI830->batch_bo = NULL;
166
167	dri_bo_unreference(pI830->last_batch_bo);
168	pI830->last_batch_bo = NULL;
169    }
170}
171
172void
173intel_batch_flush(ScrnInfoPtr pScrn, Bool flushed)
174{
175    I830Ptr pI830 = I830PTR(pScrn);
176    int ret;
177
178    if (pI830->batch_used == 0)
179	return;
180
181    /* If we're not using GEM, then emit a flush after each batch buffer */
182    if (!pI830->have_gem && !flushed) {
183	int flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
184
185	if (IS_I965G(pI830))
186	    flags = 0;
187
188	*(uint32_t *)(pI830->batch_ptr + pI830->batch_used) = MI_FLUSH | flags;
189	pI830->batch_used += 4;
190    }
191
192    /* Emit a padding dword if we aren't going to be quad-word aligned. */
193    if ((pI830->batch_used & 4) == 0) {
194	*(uint32_t *)(pI830->batch_ptr + pI830->batch_used) = MI_NOOP;
195	pI830->batch_used += 4;
196    }
197
198    /* Mark the end of the batchbuffer. */
199    *(uint32_t *)(pI830->batch_ptr + pI830->batch_used) = MI_BATCH_BUFFER_END;
200    pI830->batch_used += 4;
201
202    dri_bo_unmap(pI830->batch_bo);
203    pI830->batch_ptr = NULL;
204
205    ret = dri_bo_exec(pI830->batch_bo, pI830->batch_used, NULL, 0, 0xffffffff);
206    if (ret != 0)
207	FatalError("Failed to submit batchbuffer: %s\n", strerror(-ret));
208
209    /* Save a ref to the last batch emitted, which we use for syncing
210     * in debug code.
211     */
212    dri_bo_unreference(pI830->last_batch_bo);
213    pI830->last_batch_bo = pI830->batch_bo;
214    pI830->batch_bo = NULL;
215
216    intel_next_batch(pScrn);
217
218    /* Mark that we need to flush whatever potential rendering we've done in the
219     * blockhandler.  We could set this less often, but it's probably not worth
220     * the work.
221     */
222    if (pI830->have_gem)
223	pI830->need_mi_flush = TRUE;
224
225    if (pI830->batch_flush_notify)
226	pI830->batch_flush_notify (pScrn);
227}
228
229/** Waits on the last emitted batchbuffer to be completed. */
230void
231intel_batch_wait_last(ScrnInfoPtr scrn)
232{
233    I830Ptr pI830 = I830PTR(scrn);
234
235    /* Map it CPU write, which guarantees it's done.  This is a completely
236     * non performance path, so we don't need anything better.
237     */
238    drm_intel_bo_map(pI830->last_batch_bo, TRUE);
239    drm_intel_bo_unmap(pI830->last_batch_bo);
240}
241
242