1/*
2 * Copyright © 2009 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Author:
24 *    Zou Nan hai <nanhai.zou@intel.com>
25 */
26#include "xvmc_vld.h"
27#include "i965_hwmc.h"
28#include "i810_reg.h"
29#include "brw_defines.h"
30#include "brw_structs.h"
31
32#ifndef ALIGN
33#define ALIGN(m,n) (((m) + (n) - 1) & ~((n) - 1))
34#endif
35
36#define BATCH_STRUCT(x) intelBatchbufferData(&x, sizeof(x), 0)
37#define VLD_MAX_SLICE_SIZE (32 * 1024)
38#define CS_SIZE 	30
39#define URB_SIZE 	384
40/* idct table */
41#define C0 23170
42#define C1 22725
43#define C2 21407
44#define C3 19266
45#define C4 16383
46#define C5 12873
47#define C6 8867
48#define C7 4520
49const uint32_t idct_table[] = {
50    C4, C1, C2, C3, C4, C5, C6, C7,    		//g5
51    C4, C1, C2, C3, C4, C5, C6, C7,
52    C4, C3, C6,-C7,-C4,-C1,-C2,-C5,
53    C4, C3, C6,-C7,-C4,-C1,-C2,-C5,
54    C4, C5,-C6,-C1,-C4, C7, C2, C3,
55    C4, C5,-C6,-C1,-C4, C7, C2, C3,
56    C4, C7,-C2,-C5, C4, C3,-C6,-C1,
57    C4, C7,-C2,-C5, C4, C3,-C6,-C1,
58    C4,-C7,-C2, C5, C4,-C3,-C6, C1,
59    C4,-C7,-C2, C5, C4,-C3,-C6, C1,
60    C4,-C5,-C6, C1,-C4,-C7, C2,-C3,
61    C4,-C5,-C6, C1,-C4,-C7, C2,-C3,
62    C4,-C3, C6, C7,-C4, C1,-C2, C5,
63    C4,-C3, C6, C7,-C4, C1,-C2, C5,
64    C4,-C1, C2,-C3, C4,-C5, C6,-C7,
65    C4,-C1, C2,-C3, C4,-C5, C6,-C7		//g20
66};
67#undef C0
68#undef C1
69#undef C2
70#undef C3
71#undef C4
72#undef C5
73#undef C6
74#undef C7
75
76#define INTERFACE_NUM	8
77enum interface {
78    FRAME_INTRA = 0,
79	FRAME_FRAME_PRED_FORWARD,
80	FRAME_FRAME_PRED_BACKWARD,
81	FRAME_FRAME_PRED_BIDIRECT,
82	FRAME_FIELD_PRED_FORWARD,
83	FRAME_FIELD_PRED_BACKWARD,
84	FRAME_FIELD_PRED_BIDIRECT,
85    LIB_INTERFACE
86};
87
88/*kernels for vld mode*/
89static uint32_t lib_kernel[][4] = {
90   #include "shader/vld/lib.g4b"
91};
92static uint32_t ipicture_kernel[][4] = {
93   #include "shader/vld/ipicture.g4b"
94};
95static uint32_t frame_forward_kernel[][4] = {
96   #include "shader/vld/frame_forward.g4b"
97};
98static uint32_t frame_backward_kernel[][4] = {
99   #include "shader/vld/frame_backward.g4b"
100};
101static uint32_t frame_f_b_kernel[][4] = {
102   #include "shader/vld/frame_f_b.g4b"
103};
104static uint32_t field_forward_kernel[][4] = {
105   #include "shader/vld/field_forward.g4b"
106};
107static uint32_t field_backward_kernel[][4] = {
108   #include "shader/vld/field_backward.g4b"
109};
110static uint32_t field_f_b_kernel[][4] = {
111   #include "shader/vld/field_f_b.g4b"
112};
113
114/* on IGDNG */
115static uint32_t lib_kernel_gen5[][4] = {
116   #include "shader/vld/lib.g4b.gen5"
117};
118static uint32_t ipicture_kernel_gen5[][4] = {
119   #include "shader/vld/ipicture.g4b.gen5"
120};
121static uint32_t frame_forward_kernel_gen5[][4] = {
122   #include "shader/vld/frame_forward.g4b.gen5"
123};
124static uint32_t frame_backward_kernel_gen5[][4] = {
125   #include "shader/vld/frame_backward.g4b.gen5"
126};
127static uint32_t frame_f_b_kernel_gen5[][4] = {
128   #include "shader/vld/frame_f_b.g4b.gen5"
129};
130static uint32_t field_forward_kernel_gen5[][4] = {
131   #include "shader/vld/field_forward.g4b.gen5"
132};
133static uint32_t field_backward_kernel_gen5[][4] = {
134   #include "shader/vld/field_backward.g4b.gen5"
135};
136static uint32_t field_f_b_kernel_gen5[][4] = {
137   #include "shader/vld/field_f_b.g4b.gen5"
138};
139
140/*kernels for mc mode*/
141static uint32_t lib_kernel_idct[][4] = {
142   #include "shader/mc/lib_igd.g4b"
143};
144static uint32_t ipicture_kernel_idct[][4] = {
145   #include "shader/mc/ipicture_igd.g4b"
146};
147static uint32_t frame_forward_kernel_idct[][4] = {
148   #include "shader/mc/frame_forward_igd.g4b"
149};
150static uint32_t frame_backward_kernel_idct[][4] = {
151   #include "shader/mc/frame_backward_igd.g4b"
152};
153static uint32_t frame_f_b_kernel_idct[][4] = {
154   #include "shader/mc/frame_f_b_igd.g4b"
155};
156static uint32_t field_forward_kernel_idct[][4] = {
157   #include "shader/mc/field_forward_igd.g4b"
158};
159static uint32_t field_backward_kernel_idct[][4] = {
160   #include "shader/mc/field_backward_igd.g4b"
161};
162static uint32_t field_f_b_kernel_idct[][4] = {
163   #include "shader/mc/field_f_b_igd.g4b"
164};
165
166/* on IGDNG */
167static uint32_t lib_kernel_idct_gen5[][4] = {
168   #include "shader/mc/lib_igd.g4b.gen5"
169};
170static uint32_t ipicture_kernel_idct_gen5[][4] = {
171   #include "shader/mc/ipicture_igd.g4b.gen5"
172};
173static uint32_t frame_forward_kernel_idct_gen5[][4] = {
174   #include "shader/mc/frame_forward_igd.g4b.gen5"
175};
176static uint32_t frame_backward_kernel_idct_gen5[][4] = {
177   #include "shader/mc/frame_backward_igd.g4b.gen5"
178};
179static uint32_t frame_f_b_kernel_idct_gen5[][4] = {
180   #include "shader/mc/frame_f_b_igd.g4b.gen5"
181};
182static uint32_t field_forward_kernel_idct_gen5[][4] = {
183   #include "shader/mc/field_forward_igd.g4b.gen5"
184};
185static uint32_t field_backward_kernel_idct_gen5[][4] = {
186   #include "shader/mc/field_backward_igd.g4b.gen5"
187};
188static uint32_t field_f_b_kernel_idct_gen5[][4] = {
189   #include "shader/mc/field_f_b_igd.g4b.gen5"
190};
191
192struct media_kernel {
193   uint32_t (*bin)[4];
194   int size;
195};
196static struct media_kernel media_kernels[] = {
197	/*kernels for vld mode*/
198    {ipicture_kernel, sizeof(ipicture_kernel)},
199    {frame_forward_kernel, sizeof(frame_forward_kernel)},
200    {frame_backward_kernel, sizeof(frame_backward_kernel)},
201    {frame_f_b_kernel, sizeof(frame_f_b_kernel)},
202    {field_forward_kernel, sizeof(field_forward_kernel)},
203    {field_backward_kernel, sizeof(field_backward_kernel)},
204    {field_f_b_kernel, sizeof(field_f_b_kernel)},
205    {lib_kernel, sizeof(lib_kernel)},
206	/*kernels for mc mode*/
207    {ipicture_kernel_idct, sizeof(ipicture_kernel_idct)},
208    {frame_forward_kernel_idct, sizeof(frame_forward_kernel_idct)},
209    {frame_backward_kernel_idct, sizeof(frame_backward_kernel_idct)},
210    {frame_f_b_kernel_idct, sizeof(frame_f_b_kernel_idct)},
211    {field_forward_kernel_idct, sizeof(field_forward_kernel_idct)},
212    {field_backward_kernel_idct, sizeof(field_backward_kernel_idct)},
213    {field_f_b_kernel_idct, sizeof(field_f_b_kernel_idct)},
214    {lib_kernel_idct, sizeof(lib_kernel_idct)}
215};
216
217static struct media_kernel media_gen5_kernels[] = {
218	/*kernels for vld mode*/
219    {ipicture_kernel_gen5, sizeof(ipicture_kernel_gen5)},
220    {frame_forward_kernel_gen5, sizeof(frame_forward_kernel_gen5)},
221    {frame_backward_kernel_gen5, sizeof(frame_backward_kernel_gen5)},
222    {frame_f_b_kernel_gen5, sizeof(frame_f_b_kernel_gen5)},
223    {field_forward_kernel_gen5, sizeof(field_forward_kernel_gen5)},
224    {field_backward_kernel_gen5, sizeof(field_backward_kernel_gen5)},
225    {field_f_b_kernel_gen5, sizeof(field_f_b_kernel_gen5)},
226    {lib_kernel_gen5, sizeof(lib_kernel_gen5)},
227	/*kernels for mc mode*/
228    {ipicture_kernel_idct_gen5, sizeof(ipicture_kernel_idct_gen5)},
229    {frame_forward_kernel_idct_gen5, sizeof(frame_forward_kernel_idct_gen5)},
230    {frame_backward_kernel_idct_gen5, sizeof(frame_backward_kernel_idct_gen5)},
231    {frame_f_b_kernel_idct_gen5, sizeof(frame_f_b_kernel_idct_gen5)},
232    {field_forward_kernel_idct_gen5, sizeof(field_forward_kernel_idct_gen5)},
233    {field_backward_kernel_idct_gen5, sizeof(field_backward_kernel_idct_gen5)},
234    {field_f_b_kernel_idct_gen5, sizeof(field_f_b_kernel_idct_gen5)},
235    {lib_kernel_idct_gen5, sizeof(lib_kernel_idct_gen5)}
236};
237
238#define MEDIA_KERNEL_NUM (sizeof(media_kernels)/sizeof(media_kernels[0]))
239
240struct media_kernel_obj {
241    dri_bo *bo;
242};
243
244struct interface_descriptor_obj {
245   dri_bo *bo;
246   struct media_kernel_obj kernels[MEDIA_KERNEL_NUM];
247};
248
249struct vfe_state_obj {
250   dri_bo *bo;
251   struct interface_descriptor_obj interface;
252};
253
254struct vld_state_obj {
255   dri_bo *bo;
256};
257
258struct surface_obj {
259     dri_bo *bo;
260};
261
262struct surface_state_obj {
263      struct surface_obj surface;
264      dri_bo *bo;
265};
266
267struct binding_table_obj {
268    dri_bo *bo;
269    struct surface_state_obj surface_states[I965_MAX_SURFACES];
270};
271
272struct slice_data_obj {
273    dri_bo *bo;
274};
275
276struct mb_data_obj {
277    dri_bo *bo;
278};
279
280struct cs_state_obj {
281    dri_bo *bo;
282};
283
284static struct media_state {
285    struct vfe_state_obj vfe_state;
286    struct vld_state_obj vld_state;
287    struct binding_table_obj binding_table;
288    struct cs_state_obj cs_object;
289    struct slice_data_obj slice_data;
290	struct mb_data_obj mb_data;
291} media_state;
292
293/* XvMCQMatrix * 2 + idct_table + 8 * kernel offset pointer */
294#define CS_OBJECT_SIZE (32*20 + sizeof(unsigned int) * 8)
295static int free_object(struct media_state *s)
296{
297    int i;
298#define FREE_ONE_BO(bo) \
299    if (bo) \
300        drm_intel_bo_unreference(bo)
301    FREE_ONE_BO(s->vfe_state.bo);
302    FREE_ONE_BO(s->vfe_state.interface.bo);
303    for (i = 0; i < MEDIA_KERNEL_NUM; i++)
304        FREE_ONE_BO(s->vfe_state.interface.kernels[i].bo);
305    FREE_ONE_BO(s->binding_table.bo);
306    for (i = 0; i < I965_MAX_SURFACES; i++)
307        FREE_ONE_BO(s->binding_table.surface_states[i].bo);
308    FREE_ONE_BO(s->slice_data.bo);
309    FREE_ONE_BO(s->mb_data.bo);
310    FREE_ONE_BO(s->cs_object.bo);
311    FREE_ONE_BO(s->vld_state.bo);
312}
313
314static int alloc_object(struct media_state *s)
315{
316    int i;
317
318    for (i = 0; i < I965_MAX_SURFACES; i++) {
319        s->binding_table.surface_states[i].bo =
320            drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface_state",
321 		sizeof(struct brw_surface_state), 0x1000);
322        if (!s->binding_table.surface_states[i].bo)
323            goto out;
324    }
325    return 0;
326out:
327    free_object(s);
328    return BadAlloc;
329}
330
331static void flush()
332{
333#define FLUSH_STATE_CACHE  	1
334    struct brw_mi_flush f;
335    memset(&f, 0, sizeof(f));
336    f.opcode = CMD_MI_FLUSH;
337    f.flags = (1<<FLUSH_STATE_CACHE);
338    BATCH_STRUCT(f);
339}
340
341static Status vfe_state(int vfe_mode)
342{
343	struct brw_vfe_state tmp, *vfe_state = &tmp;
344	memset(vfe_state, 0, sizeof(*vfe_state));
345	if (vfe_mode == VFE_VLD_MODE) {
346		vfe_state->vfe0.extend_vfe_state_present = 1;
347	} else {
348		vfe_state->vfe0.extend_vfe_state_present = 0;
349	}
350	vfe_state->vfe1.vfe_mode = vfe_mode;
351	vfe_state->vfe1.num_urb_entries = 1;
352	vfe_state->vfe1.children_present = 0;
353	vfe_state->vfe1.urb_entry_alloc_size = 2;
354	vfe_state->vfe1.max_threads = 31;
355	vfe_state->vfe2.interface_descriptor_base =
356		media_state.vfe_state.interface.bo->offset >> 4;
357
358    if (media_state.vfe_state.bo)
359        drm_intel_bo_unreference(media_state.vfe_state.bo);
360
361    media_state.vfe_state.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
362        "vfe state", sizeof(struct brw_vfe_state), 0x1000);
363    if (!media_state.vfe_state.bo)
364        return BadAlloc;
365
366    drm_intel_bo_subdata(media_state.vfe_state.bo, 0, sizeof(tmp), &tmp);
367
368    drm_intel_bo_emit_reloc(media_state.vfe_state.bo,
369		offsetof(struct brw_vfe_state, vfe2),
370		media_state.vfe_state.interface.bo, 0,
371		I915_GEM_DOMAIN_INSTRUCTION, 0);
372    return Success;
373}
374
375static Status interface_descriptor()
376{
377    int i;
378    struct brw_interface_descriptor tmp, *desc = &tmp;
379
380    if (media_state.vfe_state.interface.bo)
381        drm_intel_bo_unreference(media_state.vfe_state.interface.bo);
382
383    media_state.vfe_state.interface.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
384        "interfaces", MEDIA_KERNEL_NUM*sizeof(struct brw_interface_descriptor),
385	    0x1000);
386    if (!media_state.vfe_state.interface.bo)
387        return BadAlloc;
388
389    for (i = 0; i < MEDIA_KERNEL_NUM; i++) {
390		memset(desc, 0, sizeof(*desc));
391		desc->desc0.grf_reg_blocks = 15;
392		desc->desc0.kernel_start_pointer =
393				media_state.vfe_state.interface.kernels[i].bo->offset >> 6;
394
395		desc->desc1.const_urb_entry_read_offset = 0;
396		desc->desc1.const_urb_entry_read_len = 30;
397
398		desc->desc3.binding_table_entry_count = I965_MAX_SURFACES - 1;
399		desc->desc3.binding_table_pointer =
400				media_state.binding_table.bo->offset >> 5;
401
402		drm_intel_bo_subdata(media_state.vfe_state.interface.bo, i*sizeof(tmp), sizeof(tmp), desc);
403
404		drm_intel_bo_emit_reloc(
405			media_state.vfe_state.interface.bo,
406			i * sizeof(*desc) +
407			offsetof(struct brw_interface_descriptor, desc0),
408			media_state.vfe_state.interface.kernels[i].bo,
409			desc->desc0.grf_reg_blocks,
410			I915_GEM_DOMAIN_INSTRUCTION, 0);
411
412		drm_intel_bo_emit_reloc(
413			media_state.vfe_state.interface.bo,
414			i * sizeof(*desc) +
415			offsetof(struct brw_interface_descriptor, desc3),
416			media_state.binding_table.bo,
417			desc->desc3.binding_table_entry_count,
418			I915_GEM_DOMAIN_INSTRUCTION, 0);
419    }
420    return Success;
421}
422
423static int setup_media_kernels(struct i965_xvmc_context *i965_ctx)
424{
425    int i;
426
427    assert(MEDIA_KERNEL_NUM == sizeof(media_gen5_kernels) / sizeof(media_gen5_kernels[0]));
428
429    for (i = 0; i < MEDIA_KERNEL_NUM; i++) {
430        if (i965_ctx->is_igdng)
431            media_state.vfe_state.interface.kernels[i].bo =
432		drm_intel_bo_alloc(xvmc_driver->bufmgr, "kernel",
433                                   media_gen5_kernels[i].size, 0x1000);
434        else
435            media_state.vfe_state.interface.kernels[i].bo =
436                drm_intel_bo_alloc(xvmc_driver->bufmgr, "kernels",
437                                   media_kernels[i].size, 0x1000);
438
439        if (!media_state.vfe_state.interface.kernels[i].bo)
440            goto out;
441    }
442
443    for (i = 0; i < MEDIA_KERNEL_NUM; i++) {
444        dri_bo *bo = media_state.vfe_state.interface.kernels[i].bo;
445
446        if (i965_ctx->is_igdng)
447            drm_intel_bo_subdata(bo, 0, media_gen5_kernels[i].size, media_gen5_kernels[i].bin);
448        else
449            drm_intel_bo_subdata(bo, 0, media_kernels[i].size, media_kernels[i].bin);
450    }
451    return 0;
452out:
453    free_object(&media_state);
454    return BadAlloc;
455}
456
457static Status binding_tables()
458{
459   unsigned int table[I965_MAX_SURFACES];
460   int i;
461
462   if (media_state.binding_table.bo)
463       drm_intel_bo_unreference(media_state.binding_table.bo);
464   media_state.binding_table.bo =
465	drm_intel_bo_alloc(xvmc_driver->bufmgr, "binding_table",
466		I965_MAX_SURFACES*4, 0x1000);
467   if (!media_state.binding_table.bo)
468       return BadAlloc;
469
470   for (i = 0; i < I965_MAX_SURFACES; i++) {
471       table[i] = media_state.binding_table.surface_states[i].bo->offset;
472       drm_intel_bo_emit_reloc(media_state.binding_table.bo,
473	    i * sizeof(unsigned int),
474	    media_state.binding_table.surface_states[i].bo, 0,
475	    I915_GEM_DOMAIN_INSTRUCTION, 0);
476   }
477
478   drm_intel_bo_subdata(media_state.binding_table.bo, 0, sizeof(table), table);
479   return Success;
480}
481
482static Status cs_init(int interface_offset)
483{
484   char buf[CS_OBJECT_SIZE];
485   unsigned int *lib_reloc;
486   int i;
487
488   if (media_state.cs_object.bo)
489       drm_intel_bo_unreference(media_state.cs_object.bo);
490
491   media_state.cs_object.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr, "cs object", CS_OBJECT_SIZE, 64);
492   if (!media_state.cs_object.bo)
493       return BadAlloc;
494
495   memcpy(buf + 32*4, idct_table, sizeof(idct_table));
496   /* idct lib reloction */
497   lib_reloc = (unsigned int *)(buf + 32*20);
498   for (i = 0; i < 8; i++)
499       lib_reloc[i] =
500	   media_state.vfe_state.interface.kernels[LIB_INTERFACE + interface_offset].bo->offset;
501   drm_intel_bo_subdata(media_state.cs_object.bo, 32*4, 32*16 + 8*sizeof(unsigned int), buf + 32*4);
502
503   for (i = 0; i < 8; i++)
504       drm_intel_bo_emit_reloc(media_state.cs_object.bo,
505           32*20 + sizeof(unsigned int) * i,
506           media_state.vfe_state.interface.kernels[LIB_INTERFACE + interface_offset].bo, 0,
507           I915_GEM_DOMAIN_INSTRUCTION, 0);
508
509   return Success;
510}
511
512static Status create_context(Display *display, XvMCContext *context,
513	int priv_count, CARD32 *priv_data)
514{
515    struct i965_xvmc_context *i965_ctx;
516    i965_ctx = (struct i965_xvmc_context *)priv_data;
517    context->privData = priv_data;
518
519    if (alloc_object(&media_state))
520        return BadAlloc;
521
522    if (setup_media_kernels(i965_ctx))
523        return BadAlloc;
524    return Success;
525}
526
527static Status destroy_context(Display *display, XvMCContext *context)
528{
529    struct i965_xvmc_context *i965_ctx;
530    i965_ctx = context->privData;
531    Xfree(i965_ctx);
532    return Success;
533}
534
535#define STRIDE(w)               (w)
536#define SIZE_YUV420(w, h)       (h * (STRIDE(w) + STRIDE(w >> 1)))
537static Status create_surface(Display *display,
538	XvMCContext *context, XvMCSurface *surface, int priv_count,
539	CARD32 *priv_data)
540{
541    struct i965_xvmc_surface *priv_surface =
542	(struct i965_xvmc_surface *)priv_data;
543    size_t size = SIZE_YUV420(priv_surface->w, priv_surface->h);
544    surface->privData = priv_data;
545    priv_surface->bo = drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface",
546	    size, 0x1000);
547
548    return Success;
549}
550static Status destroy_surface(Display *display,
551	XvMCSurface *surface)
552{
553    struct i965_xvmc_surface *priv_surface =
554	surface->privData;
555    XSync(display, False);
556    drm_intel_bo_unreference(priv_surface->bo);
557    return Success;
558}
559
560static Status load_qmatrix(Display *display, XvMCContext *context,
561	const XvMCQMatrix *qmx)
562{
563    Status ret;
564    ret = cs_init(0);
565    if (ret != Success)
566        return ret;
567    drm_intel_bo_subdata(media_state.cs_object.bo, 0, 64, qmx->intra_quantiser_matrix);
568    drm_intel_bo_subdata(media_state.cs_object.bo, 64, 64, qmx->non_intra_quantiser_matrix);
569
570    return Success;
571}
572
573static Status get_surface_status(Display *display, XvMCSurface *surface,
574	int *status)
575{
576    *status = 0;
577    return Success;
578}
579
580static Status vld_state(const XvMCMpegControl *control)
581{
582    struct brw_vld_state tmp, *vld = &tmp;
583
584    if (media_state.vld_state.bo)
585	drm_intel_bo_unreference(media_state.vld_state.bo);
586    media_state.vld_state.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
587	    "vld state", sizeof(struct brw_vld_state), 64);
588    if (!media_state.vld_state.bo)
589	return BadAlloc;
590
591    memset(vld, 0, sizeof(*vld));
592    vld->vld0.f_code_0_0 = control->FHMV_range + 1;
593    vld->vld0.f_code_0_1 = control->FVMV_range + 1;
594    vld->vld0.f_code_1_0 = control->BHMV_range + 1;
595    vld->vld0.f_code_1_1 = control->BVMV_range + 1;
596    vld->vld0.intra_dc_precision = control->intra_dc_precision;
597    vld->vld0.picture_structure = control->picture_structure;
598    vld->vld0.top_field_first = !!(control->flags & XVMC_TOP_FIELD_FIRST);
599    vld->vld0.frame_predict_frame_dct =
600	!!(control->flags & XVMC_PRED_DCT_FRAME);
601    vld->vld0.concealment_motion_vector =
602	!!(control->flags & XVMC_CONCEALMENT_MOTION_VECTORS);
603    vld->vld0.quantizer_scale_type = !!(control->flags & XVMC_Q_SCALE_TYPE);
604    vld->vld0.intra_vlc_format = !!(control->flags & XVMC_INTRA_VLC_FORMAT);
605    vld->vld0.scan_order = !!(control->flags & XVMC_ALTERNATE_SCAN);
606
607    vld->vld1.picture_coding_type = control->picture_coding_type;
608
609    vld->desc_remap_table0.index_0 = FRAME_INTRA;
610    vld->desc_remap_table0.index_1 = FRAME_FRAME_PRED_FORWARD;
611    vld->desc_remap_table0.index_2 = FRAME_FIELD_PRED_FORWARD;
612    vld->desc_remap_table0.index_3 = FRAME_FIELD_PRED_BIDIRECT; /* dual prime */
613    vld->desc_remap_table0.index_4 = FRAME_FRAME_PRED_BACKWARD;
614    vld->desc_remap_table0.index_5 = FRAME_FIELD_PRED_BACKWARD;
615    vld->desc_remap_table0.index_6 = FRAME_FRAME_PRED_BIDIRECT;
616    vld->desc_remap_table0.index_7 = FRAME_FIELD_PRED_BIDIRECT;
617
618    vld->desc_remap_table1.index_8 = FRAME_INTRA;
619    vld->desc_remap_table1.index_9 = FRAME_FRAME_PRED_FORWARD;
620    vld->desc_remap_table1.index_10 = FRAME_FIELD_PRED_FORWARD;
621    vld->desc_remap_table1.index_11 = FRAME_FIELD_PRED_BIDIRECT;
622    vld->desc_remap_table1.index_12 = FRAME_FRAME_PRED_BACKWARD;
623    vld->desc_remap_table1.index_13 = FRAME_FIELD_PRED_BACKWARD;
624    vld->desc_remap_table1.index_14 = FRAME_FRAME_PRED_BIDIRECT;
625    vld->desc_remap_table1.index_15 = FRAME_FIELD_PRED_BIDIRECT;
626
627    drm_intel_bo_subdata(media_state.vld_state.bo, 0, sizeof(tmp), vld);
628    return Success;
629}
630
631static Status setup_media_surface(int index, dri_bo *bo,
632	unsigned long offset, int w, int h, Bool write)
633{
634    struct brw_surface_state tmp, *ss = &tmp;
635    memset(ss, 0, sizeof(*ss));
636    ss->ss0.surface_type = BRW_SURFACE_2D;
637    ss->ss0.surface_format = BRW_SURFACEFORMAT_R8_SINT;
638    ss->ss1.base_addr = offset + bo->offset;
639    ss->ss2.width = w - 1;
640    ss->ss2.height = h - 1;
641    ss->ss3.pitch = w - 1;
642
643    if (media_state.binding_table.surface_states[index].bo)
644        drm_intel_bo_unreference(media_state.binding_table.surface_states[index].bo);
645
646    media_state.binding_table.surface_states[index].bo =
647            drm_intel_bo_alloc(xvmc_driver->bufmgr, "surface_state",
648 		sizeof(struct brw_surface_state), 0x1000);
649    if (!media_state.binding_table.surface_states[index].bo)
650        return BadAlloc;
651
652    drm_intel_bo_subdata(
653	    media_state.binding_table.surface_states[index].bo,
654	    0, sizeof(*ss), ss);
655    drm_intel_bo_emit_reloc(media_state.binding_table.surface_states[index].bo,
656	    offsetof(struct brw_surface_state, ss1),
657	    bo, offset,
658	    I915_GEM_DOMAIN_RENDER, write?I915_GEM_DOMAIN_RENDER:0);
659    return Success;
660}
661
662static Status setup_surface(struct i965_xvmc_surface *target,
663	 struct i965_xvmc_surface *past,
664	 struct i965_xvmc_surface *future,
665	 int w, int h)
666{
667    Status ret;
668    ret = setup_media_surface(0, target->bo, 0, w, h, TRUE);
669    if (ret != Success)
670	return ret;
671    ret = setup_media_surface(1, target->bo, w*h, w/2, h/2, TRUE);
672    if (ret != Success)
673	return ret;
674    ret = setup_media_surface(2, target->bo, w*h + w*h/4, w/2, h/2, TRUE);
675    if (ret != Success)
676	return ret;
677    if (past) {
678	ret = setup_media_surface(4, past->bo, 0, w, h, FALSE);
679	if (ret != Success)
680	    return ret;
681	ret = setup_media_surface(5, past->bo, w*h, w/2, h/2, FALSE);
682	if (ret != Success)
683	    return ret;
684	ret = setup_media_surface(6, past->bo, w*h + w*h/4, w/2, h/2, FALSE);
685	if (ret != Success)
686	    return ret;
687    }
688    if (future) {
689	ret = setup_media_surface(7, future->bo, 0, w, h, FALSE);
690	if (ret != Success)
691	    return ret;
692	ret = setup_media_surface(8, future->bo, w*h, w/2, h/2, FALSE);
693	if (ret != Success)
694	    return ret;
695	ret = setup_media_surface(9, future->bo, w*h + w*h/4, w/2, h/2, FALSE);
696	if (ret != Success)
697	    return ret;
698    }
699    return Success;
700}
701
702static  Status begin_surface(Display *display, XvMCContext *context,
703	    XvMCSurface *target,
704	    XvMCSurface *past,
705	    XvMCSurface *future,
706	    const XvMCMpegControl *control)
707{
708    struct i965_xvmc_contex *i965_ctx;
709    struct i965_xvmc_surface *priv_target, *priv_past, *priv_future;
710    intel_xvmc_context_ptr intel_ctx;
711    Status ret;
712
713    intel_ctx = intel_xvmc_find_context(context->context_id);
714    priv_target = target->privData;
715    priv_past = past?past->privData:NULL;
716    priv_future = future?future->privData:NULL;
717
718    ret = vld_state(control);
719    if (ret != Success)
720        return ret;
721    ret = setup_surface(priv_target, priv_past, priv_future,
722	    context->width, context->height);
723    if (ret != Success)
724        return ret;
725    ret = binding_tables();
726    if (ret != Success)
727        return ret;
728    ret = interface_descriptor();
729    if (ret != Success)
730        return ret;
731    ret = vfe_state(VFE_VLD_MODE);
732    if (ret != Success)
733        return ret;
734
735    LOCK_HARDWARE(intel_ctx->hw_context);
736    flush();
737    UNLOCK_HARDWARE(intel_ctx->hw_context);
738    return Success;
739}
740
741static Status put_slice(Display *display, XvMCContext *context,
742	unsigned char *slice, int nbytes)
743{
744    return Success;
745}
746
747static void state_base_address(struct i965_xvmc_context *i965_ctx)
748{
749    BATCH_LOCALS;
750
751    if (i965_ctx->is_igdng) {
752        BEGIN_BATCH(8);
753        OUT_BATCH(BRW_STATE_BASE_ADDRESS|6);
754        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
755        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
756        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
757        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
758        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
759        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
760        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
761        ADVANCE_BATCH();
762    } else {
763        BEGIN_BATCH(6);
764        OUT_BATCH(BRW_STATE_BASE_ADDRESS|4);
765        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
766        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
767        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
768        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
769        OUT_BATCH(0 | BASE_ADDRESS_MODIFY);
770        ADVANCE_BATCH();
771    }
772}
773static void pipeline_select()
774{
775    BATCH_LOCALS;
776    BEGIN_BATCH(1);
777    OUT_BATCH(NEW_PIPELINE_SELECT|PIPELINE_SELECT_MEDIA);
778    ADVANCE_BATCH();
779}
780
781static void media_state_pointers(int vfe_mode)
782{
783    BATCH_LOCALS;
784    BEGIN_BATCH(3);
785    OUT_BATCH(BRW_MEDIA_STATE_POINTERS|1);
786	if (vfe_mode == VFE_VLD_MODE)
787		OUT_RELOC(media_state.vld_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
788	else
789		OUT_BATCH(0);
790    OUT_RELOC(media_state.vfe_state.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
791    ADVANCE_BATCH();
792}
793
794static void align_urb_fence()
795{
796    BATCH_LOCALS;
797    int i, offset_to_next_cacheline;
798    unsigned long batch_offset;
799    BEGIN_BATCH(3);
800    batch_offset = (void *)batch_ptr - xvmc_driver->alloc.ptr;
801    offset_to_next_cacheline = ALIGN(batch_offset, 64) - batch_offset;
802    if (offset_to_next_cacheline <= 12 && offset_to_next_cacheline != 0) {
803	for (i = 0; i < offset_to_next_cacheline/4; i++)
804	    OUT_BATCH(0);
805	ADVANCE_BATCH();
806    }
807}
808
809static void urb_layout()
810{
811    BATCH_LOCALS;
812    align_urb_fence();
813    BEGIN_BATCH(3);
814    OUT_BATCH(BRW_URB_FENCE |
815	    UF0_VFE_REALLOC |
816	    UF0_CS_REALLOC |
817	    UF0_SF_REALLOC |
818	    UF0_CLIP_REALLOC |
819	    UF0_GS_REALLOC |
820	    UF0_VS_REALLOC |
821	    1);
822
823    OUT_BATCH((0 << UF1_CLIP_FENCE_SHIFT) |
824	    (0 << UF1_GS_FENCE_SHIFT) |
825	    (0 << UF1_VS_FENCE_SHIFT));
826
827    OUT_BATCH((0 << UF2_CS_FENCE_SHIFT) |
828	    (0 << UF2_SF_FENCE_SHIFT) |
829	    ((URB_SIZE - CS_SIZE - 1)<< UF2_VFE_FENCE_SHIFT) |	/* VFE_SIZE */
830	    ((URB_SIZE)<< UF2_CS_FENCE_SHIFT));			/* CS_SIZE */
831    ADVANCE_BATCH();
832}
833
834static void cs_urb_layout()
835{
836    BATCH_LOCALS;
837    BEGIN_BATCH(2);
838    OUT_BATCH(BRW_CS_URB_STATE | 0);
839    OUT_BATCH((CS_SIZE << 4) |    /* URB Entry Allocation Size */
840	    (1 << 0));    /* Number of URB Entries */
841    ADVANCE_BATCH();
842}
843
844static void cs_buffer()
845{
846    BATCH_LOCALS;
847    BEGIN_BATCH(2);
848    OUT_BATCH(BRW_CONSTANT_BUFFER|0|(1<<8));
849    OUT_RELOC(media_state.cs_object.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, CS_SIZE);
850    ADVANCE_BATCH();
851}
852
853/* kick media object to gpu in idct mode*/
854static void send_media_object(XvMCMacroBlock *mb, dri_bo *bo,
855							uint32_t offset,
856							enum interface interface)
857{
858    BATCH_LOCALS;
859    BEGIN_BATCH(13);
860    OUT_BATCH(BRW_MEDIA_OBJECT|11);
861    OUT_BATCH(interface);
862    OUT_BATCH(6*128);
863    OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, 0, offset);
864
865    OUT_BATCH(mb->x<<4);
866    OUT_BATCH(mb->y<<4);
867	OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, 0, offset);
868    OUT_BATCH_SHORT(mb->coded_block_pattern);
869    OUT_BATCH_SHORT(mb->PMV[0][0][0]);
870    OUT_BATCH_SHORT(mb->PMV[0][0][1]);
871    OUT_BATCH_SHORT(mb->PMV[0][1][0]);
872    OUT_BATCH_SHORT(mb->PMV[0][1][1]);
873
874    OUT_BATCH_SHORT(mb->PMV[1][0][0]);
875    OUT_BATCH_SHORT(mb->PMV[1][0][1]);
876    OUT_BATCH_SHORT(mb->PMV[1][1][0]);
877    OUT_BATCH_SHORT(mb->PMV[1][1][1]);
878    OUT_BATCH_CHAR(mb->dct_type);
879    OUT_BATCH_CHAR(mb->motion_vertical_field_select);
880
881    OUT_BATCH(0xffffffff);
882    ADVANCE_BATCH();
883}
884
885/* kick media object to gpu in vld mode*/
886static void vld_send_media_object(dri_bo *bo,
887	int slice_len, int mb_h_pos, int mb_v_pos, int mb_bit_offset,
888	int mb_count, int q_scale_code)
889{
890    BATCH_LOCALS;
891    BEGIN_BATCH(6);
892    OUT_BATCH(BRW_MEDIA_OBJECT|4);
893    OUT_BATCH(0);
894    OUT_BATCH(slice_len);
895    OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
896    OUT_BATCH((mb_h_pos<<24)|(mb_v_pos<<16)|(mb_count<<8)|(mb_bit_offset));
897    OUT_BATCH(q_scale_code<<24);
898    ADVANCE_BATCH();
899}
900
901static Status put_slice2(Display *display, XvMCContext *context,
902	unsigned char *slice, int nbytes, int sliceCode)
903{
904    unsigned int bit_buf;
905    intel_xvmc_context_ptr intel_ctx;
906    struct i965_xvmc_context *i965_ctx;
907    int q_scale_code, mb_row;
908
909    i965_ctx = (struct i965_xvmc_context *)context->privData;
910    mb_row = *(slice - 1) - 1;
911    bit_buf = (slice[0]<<24) | (slice[1]<<16) | (slice[2]<<8) | (slice[3]);
912
913    q_scale_code = bit_buf>>27;
914
915    if (media_state.slice_data.bo) {
916        if (xvmc_driver->kernel_exec_fencing)
917            drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo);
918        else
919            drm_intel_bo_unmap(media_state.slice_data.bo);
920
921        drm_intel_bo_unreference(media_state.slice_data.bo);
922    }
923    media_state.slice_data.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
924	    "slice data", VLD_MAX_SLICE_SIZE, 64);
925    if (!media_state.slice_data.bo)
926        return BadAlloc;
927    if (xvmc_driver->kernel_exec_fencing)
928        drm_intel_gem_bo_map_gtt(media_state.slice_data.bo);
929    else
930        drm_intel_bo_map(media_state.slice_data.bo, 1);
931
932    memcpy(media_state.slice_data.bo->virtual, slice, nbytes);
933
934    intel_ctx = intel_xvmc_find_context(context->context_id);
935    LOCK_HARDWARE(intel_ctx->hw_context);
936    state_base_address(i965_ctx);
937    pipeline_select();
938    media_state_pointers(VFE_VLD_MODE);
939    urb_layout();
940    cs_urb_layout();
941    cs_buffer();
942    vld_send_media_object(media_state.slice_data.bo,
943	    nbytes,
944	    0, mb_row, 6, 127, q_scale_code);
945    intelFlushBatch(TRUE);
946    UNLOCK_HARDWARE(intel_ctx->hw_context);
947
948    return Success;
949}
950
951static Status put_surface(Display *display,XvMCSurface *surface,
952	Drawable draw, short srcx, short srcy,
953	unsigned short srcw, unsigned short srch,
954	short destx, short desty,
955	unsigned short destw, unsigned short desth,
956	int flags, struct intel_xvmc_command *data)
957{
958	struct i965_xvmc_surface *private_surface =
959		surface->privData;
960        uint32_t handle;
961
962        drm_intel_bo_flink(private_surface->bo, &handle);
963        data->handle = handle;
964	return Success;
965}
966
967static Status render_surface(Display *display,
968	XvMCContext *context,
969	unsigned int picture_structure,
970	XvMCSurface *target_surface,
971	XvMCSurface *past_surface,
972	XvMCSurface *future_surface,
973	unsigned int flags,
974	unsigned int num_macroblocks,
975	unsigned int first_macroblock,
976	XvMCMacroBlockArray *macroblock_array,
977	XvMCBlockArray *blocks)
978{
979    struct i965_xvmc_surface *priv_target, *priv_past, *priv_future;
980    intel_xvmc_context_ptr intel_ctx;
981    XvMCMacroBlock *mb;
982    Status ret;
983    unsigned short *block_ptr;
984    int i, j;
985    int block_offset = 0;
986    struct i965_xvmc_context *i965_ctx;
987
988    intel_ctx = intel_xvmc_find_context(context->context_id);
989    if (!intel_ctx) {
990	XVMC_ERR("Can't find intel xvmc context\n");
991	return BadValue;
992    }
993
994    i965_ctx = (struct i965_xvmc_context *)context->privData;
995    priv_target = target_surface->privData;
996    priv_past = past_surface?past_surface->privData:NULL;
997    priv_future = future_surface?future_surface->privData:NULL;
998
999    ret = setup_surface(priv_target, priv_past, priv_future,
1000	    context->width, context->height);
1001    if (ret != Success)
1002	return ret;
1003    ret = binding_tables();
1004    if (ret != Success)
1005	return ret;
1006    ret = interface_descriptor();
1007    if (ret != Success)
1008	return ret;
1009    ret = cs_init(INTERFACE_NUM);
1010    if (ret != Success)
1011	return ret;
1012    ret = vfe_state(VFE_GENERIC_MODE);
1013    if (ret != Success)
1014	return ret;
1015
1016    if (media_state.mb_data.bo) {
1017	if (xvmc_driver->kernel_exec_fencing)
1018	    drm_intel_gem_bo_unmap_gtt(media_state.mb_data.bo);
1019	else
1020	    drm_intel_bo_unmap(media_state.mb_data.bo);
1021
1022	drm_intel_bo_unreference(media_state.mb_data.bo);
1023    }
1024    unsigned int block_num =
1025	(((context->width + 15) >> 4) * ((context->height + 15) >> 4));
1026    unsigned int surface_size = (64 * sizeof(short) * 6 * block_num);
1027    media_state.mb_data.bo = drm_intel_bo_alloc(xvmc_driver->bufmgr,
1028	    "macroblock data", surface_size, 64);
1029    if(!media_state.mb_data.bo)
1030	return BadAlloc;
1031    if(xvmc_driver->kernel_exec_fencing)
1032	drm_intel_gem_bo_map_gtt(media_state.mb_data.bo);
1033    else
1034	drm_intel_bo_map(media_state.mb_data.bo, 1);
1035
1036    block_ptr = media_state.mb_data.bo->virtual;
1037    unsigned short *mb_block_ptr;
1038    for (i = first_macroblock;
1039	    i < num_macroblocks + first_macroblock; i++) {
1040	mb = &macroblock_array->macro_blocks[i];
1041	mb_block_ptr = &blocks->blocks[(mb->index<<6)];
1042
1043	if (mb->coded_block_pattern & 0x20) {
1044	    for (j = 0; j < 8; j++)
1045		memcpy(block_ptr + 16*j, mb_block_ptr + 8*j, 16);
1046	    mb_block_ptr += 64;
1047	}
1048	if (mb->coded_block_pattern & 0x10) {
1049	    for (j = 0; j < 8; j++)
1050		memcpy(block_ptr + 16*j + 8, mb_block_ptr + 8*j, 16);
1051	    mb_block_ptr += 64;
1052	}
1053
1054	block_ptr += 2*64;
1055	if (mb->coded_block_pattern & 0x08) {
1056	    for (j = 0; j < 8; j++)
1057		memcpy(block_ptr + 16*j, mb_block_ptr + 8*j, 16);
1058	    mb_block_ptr += 64;
1059	}
1060	if (mb->coded_block_pattern & 0x04) {
1061	    for (j = 0; j < 8; j++)
1062		memcpy(block_ptr + 16*j + 8, mb_block_ptr + 8*j, 16);
1063	    mb_block_ptr += 64;
1064	}
1065
1066	block_ptr += 2*64;
1067	if (mb->coded_block_pattern & 0x2) {
1068	    memcpy(block_ptr, mb_block_ptr, 128);
1069	    mb_block_ptr += 64;
1070	}
1071
1072	block_ptr += 64;
1073	if (mb->coded_block_pattern & 0x1)
1074	    memcpy(block_ptr, mb_block_ptr, 128);
1075	block_ptr += 64;
1076    }
1077
1078    LOCK_HARDWARE(intel_ctx->hw_context);
1079    state_base_address(i965_ctx);
1080    flush();
1081    pipeline_select();
1082    urb_layout();
1083    media_state_pointers(VFE_GENERIC_MODE);
1084    cs_urb_layout();
1085    cs_buffer();
1086    for (i = first_macroblock;
1087	    i < num_macroblocks + first_macroblock;
1088	    i++, block_offset += 128*6) {
1089	mb = &macroblock_array->macro_blocks[i];
1090
1091	if (mb->macroblock_type & XVMC_MB_TYPE_INTRA) {
1092	    send_media_object(mb, media_state.mb_data.bo, block_offset,
1093		    FRAME_INTRA + INTERFACE_NUM);
1094	} else {
1095	    if (((mb->motion_type & 3) == XVMC_PREDICTION_FRAME)) {
1096		if ((mb->macroblock_type & XVMC_MB_TYPE_MOTION_FORWARD)) {
1097		    if ((mb->macroblock_type & XVMC_MB_TYPE_MOTION_BACKWARD)) {
1098			send_media_object(mb, media_state.mb_data.bo, block_offset,
1099				FRAME_FRAME_PRED_BIDIRECT + INTERFACE_NUM);
1100		    } else {
1101			send_media_object(mb, media_state.mb_data.bo, block_offset,
1102				FRAME_FRAME_PRED_FORWARD + INTERFACE_NUM);
1103		    }
1104		} else if ((mb->macroblock_type&XVMC_MB_TYPE_MOTION_BACKWARD)) {
1105		    send_media_object(mb, media_state.mb_data.bo, block_offset,
1106			    FRAME_FRAME_PRED_BACKWARD + INTERFACE_NUM);
1107		}
1108	    } else if ((mb->motion_type & 3) == XVMC_PREDICTION_FIELD) {
1109		if ((mb->macroblock_type&XVMC_MB_TYPE_MOTION_FORWARD)) {
1110		    if (((mb->macroblock_type&XVMC_MB_TYPE_MOTION_BACKWARD))) {
1111			send_media_object(mb, media_state.mb_data.bo, block_offset,
1112				FRAME_FIELD_PRED_BIDIRECT + INTERFACE_NUM);
1113		    } else {
1114			send_media_object(mb, media_state.mb_data.bo, block_offset,
1115				FRAME_FIELD_PRED_FORWARD + INTERFACE_NUM);
1116		    }
1117		} else if ((mb->macroblock_type&XVMC_MB_TYPE_MOTION_BACKWARD)) {
1118		    send_media_object(mb, media_state.mb_data.bo, block_offset,
1119			    FRAME_FIELD_PRED_BACKWARD + INTERFACE_NUM);
1120		}
1121	    } else {
1122		send_media_object(mb, media_state.mb_data.bo, block_offset,
1123			FRAME_FIELD_PRED_BIDIRECT + INTERFACE_NUM); /*dual prime*/
1124	    }
1125	}
1126    }
1127    intelFlushBatch(TRUE);
1128    UNLOCK_HARDWARE(intel_ctx->hw_context);
1129    return Success;
1130}
1131
1132struct _intel_xvmc_driver xvmc_vld_driver = {
1133    .type = XVMC_I965_MPEG2_VLD,
1134    .create_context = create_context,
1135    .destroy_context = destroy_context,
1136    .create_surface = create_surface,
1137    .destroy_surface = destroy_surface,
1138    .load_qmatrix = load_qmatrix,
1139    .get_surface_status = get_surface_status,
1140    .begin_surface = begin_surface,
1141	.render_surface = render_surface,
1142    .put_surface = put_surface,
1143    .put_slice = put_slice,
1144    .put_slice2 = put_slice2
1145};
1146