1/*
2 * Copyright © 2014-2018 NVIDIA Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <inttypes.h>
25#include <stdlib.h>
26
27#include "util/u_debug.h"
28#include "util/u_inlines.h"
29#include "util/u_upload_mgr.h"
30
31#include "tegra_context.h"
32#include "tegra_resource.h"
33#include "tegra_screen.h"
34
35static void
36tegra_destroy(struct pipe_context *pcontext)
37{
38   struct tegra_context *context = to_tegra_context(pcontext);
39
40   if (context->base.stream_uploader)
41      u_upload_destroy(context->base.stream_uploader);
42
43   context->gpu->destroy(context->gpu);
44   free(context);
45}
46
47static void
48tegra_draw_vbo(struct pipe_context *pcontext,
49               const struct pipe_draw_info *pinfo)
50{
51   struct tegra_context *context = to_tegra_context(pcontext);
52   struct pipe_draw_indirect_info indirect;
53   struct pipe_draw_info info;
54
55   if (pinfo && (pinfo->indirect || pinfo->index_size)) {
56      memcpy(&info, pinfo, sizeof(info));
57
58      if (pinfo->indirect) {
59         memcpy(&indirect, pinfo->indirect, sizeof(indirect));
60         indirect.buffer = tegra_resource_unwrap(info.indirect->buffer);
61         info.indirect = &indirect;
62      }
63
64      if (pinfo->index_size && !pinfo->has_user_indices)
65         info.index.resource = tegra_resource_unwrap(info.index.resource);
66
67      pinfo = &info;
68   }
69
70   context->gpu->draw_vbo(context->gpu, pinfo);
71}
72
73static void
74tegra_render_condition(struct pipe_context *pcontext,
75                       struct pipe_query *query,
76                       boolean condition,
77                       unsigned int mode)
78{
79   struct tegra_context *context = to_tegra_context(pcontext);
80
81   context->gpu->render_condition(context->gpu, query, condition, mode);
82}
83
84static struct pipe_query *
85tegra_create_query(struct pipe_context *pcontext, unsigned int query_type,
86                   unsigned int index)
87{
88   struct tegra_context *context = to_tegra_context(pcontext);
89
90   return context->gpu->create_query(context->gpu, query_type, index);
91}
92
93static struct pipe_query *
94tegra_create_batch_query(struct pipe_context *pcontext,
95                         unsigned int num_queries,
96                         unsigned int *queries)
97{
98   struct tegra_context *context = to_tegra_context(pcontext);
99
100   return context->gpu->create_batch_query(context->gpu, num_queries,
101                                           queries);
102}
103
104static void
105tegra_destroy_query(struct pipe_context *pcontext, struct pipe_query *query)
106{
107   struct tegra_context *context = to_tegra_context(pcontext);
108
109   context->gpu->destroy_query(context->gpu, query);
110}
111
112static boolean
113tegra_begin_query(struct pipe_context *pcontext, struct pipe_query *query)
114{
115   struct tegra_context *context = to_tegra_context(pcontext);
116
117   return context->gpu->begin_query(context->gpu, query);
118}
119
120static bool
121tegra_end_query(struct pipe_context *pcontext, struct pipe_query *query)
122{
123   struct tegra_context *context = to_tegra_context(pcontext);
124
125   return context->gpu->end_query(context->gpu, query);
126}
127
128static boolean
129tegra_get_query_result(struct pipe_context *pcontext,
130                       struct pipe_query *query,
131                       boolean wait,
132                       union pipe_query_result *result)
133{
134   struct tegra_context *context = to_tegra_context(pcontext);
135
136   return context->gpu->get_query_result(context->gpu, query, wait,
137                     result);
138}
139
140static void
141tegra_get_query_result_resource(struct pipe_context *pcontext,
142                                struct pipe_query *query,
143                                boolean wait,
144                                enum pipe_query_value_type result_type,
145                                int index,
146                                struct pipe_resource *resource,
147                                unsigned int offset)
148{
149   struct tegra_context *context = to_tegra_context(pcontext);
150
151   context->gpu->get_query_result_resource(context->gpu, query, wait,
152                                           result_type, index, resource,
153                                           offset);
154}
155
156static void
157tegra_set_active_query_state(struct pipe_context *pcontext, boolean enable)
158{
159   struct tegra_context *context = to_tegra_context(pcontext);
160
161   context->gpu->set_active_query_state(context->gpu, enable);
162}
163
164static void *
165tegra_create_blend_state(struct pipe_context *pcontext,
166                         const struct pipe_blend_state *cso)
167{
168   struct tegra_context *context = to_tegra_context(pcontext);
169
170   return context->gpu->create_blend_state(context->gpu, cso);
171}
172
173static void
174tegra_bind_blend_state(struct pipe_context *pcontext, void *so)
175{
176   struct tegra_context *context = to_tegra_context(pcontext);
177
178   context->gpu->bind_blend_state(context->gpu, so);
179}
180
181static void
182tegra_delete_blend_state(struct pipe_context *pcontext, void *so)
183{
184   struct tegra_context *context = to_tegra_context(pcontext);
185
186   context->gpu->delete_blend_state(context->gpu, so);
187}
188
189static void *
190tegra_create_sampler_state(struct pipe_context *pcontext,
191                           const struct pipe_sampler_state *cso)
192{
193   struct tegra_context *context = to_tegra_context(pcontext);
194
195   return context->gpu->create_sampler_state(context->gpu, cso);
196}
197
198static void
199tegra_bind_sampler_states(struct pipe_context *pcontext, unsigned shader,
200                          unsigned start_slot, unsigned num_samplers,
201                          void **samplers)
202{
203   struct tegra_context *context = to_tegra_context(pcontext);
204
205   context->gpu->bind_sampler_states(context->gpu, shader, start_slot,
206                                     num_samplers, samplers);
207}
208
209static void
210tegra_delete_sampler_state(struct pipe_context *pcontext, void *so)
211{
212   struct tegra_context *context = to_tegra_context(pcontext);
213
214   context->gpu->delete_sampler_state(context->gpu, so);
215}
216
217static void *
218tegra_create_rasterizer_state(struct pipe_context *pcontext,
219                              const struct pipe_rasterizer_state *cso)
220{
221   struct tegra_context *context = to_tegra_context(pcontext);
222
223   return context->gpu->create_rasterizer_state(context->gpu, cso);
224}
225
226static void
227tegra_bind_rasterizer_state(struct pipe_context *pcontext, void *so)
228{
229   struct tegra_context *context = to_tegra_context(pcontext);
230
231   context->gpu->bind_rasterizer_state(context->gpu, so);
232}
233
234static void
235tegra_delete_rasterizer_state(struct pipe_context *pcontext, void *so)
236{
237   struct tegra_context *context = to_tegra_context(pcontext);
238
239   context->gpu->delete_rasterizer_state(context->gpu, so);
240}
241
242static void *
243tegra_create_depth_stencil_alpha_state(struct pipe_context *pcontext,
244                                       const struct pipe_depth_stencil_alpha_state *cso)
245{
246   struct tegra_context *context = to_tegra_context(pcontext);
247
248   return context->gpu->create_depth_stencil_alpha_state(context->gpu, cso);
249}
250
251static void
252tegra_bind_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
253{
254   struct tegra_context *context = to_tegra_context(pcontext);
255
256   context->gpu->bind_depth_stencil_alpha_state(context->gpu, so);
257}
258
259static void
260tegra_delete_depth_stencil_alpha_state(struct pipe_context *pcontext, void *so)
261{
262   struct tegra_context *context = to_tegra_context(pcontext);
263
264   context->gpu->delete_depth_stencil_alpha_state(context->gpu, so);
265}
266
267static void *
268tegra_create_fs_state(struct pipe_context *pcontext,
269                      const struct pipe_shader_state *cso)
270{
271   struct tegra_context *context = to_tegra_context(pcontext);
272
273   return context->gpu->create_fs_state(context->gpu, cso);
274}
275
276static void
277tegra_bind_fs_state(struct pipe_context *pcontext, void *so)
278{
279   struct tegra_context *context = to_tegra_context(pcontext);
280
281   context->gpu->bind_fs_state(context->gpu, so);
282}
283
284static void
285tegra_delete_fs_state(struct pipe_context *pcontext, void *so)
286{
287   struct tegra_context *context = to_tegra_context(pcontext);
288
289   context->gpu->delete_fs_state(context->gpu, so);
290}
291
292static void *
293tegra_create_vs_state(struct pipe_context *pcontext,
294                      const struct pipe_shader_state *cso)
295{
296   struct tegra_context *context = to_tegra_context(pcontext);
297
298   return context->gpu->create_vs_state(context->gpu, cso);
299}
300
301static void
302tegra_bind_vs_state(struct pipe_context *pcontext, void *so)
303{
304   struct tegra_context *context = to_tegra_context(pcontext);
305
306   context->gpu->bind_vs_state(context->gpu, so);
307}
308
309static void
310tegra_delete_vs_state(struct pipe_context *pcontext, void *so)
311{
312   struct tegra_context *context = to_tegra_context(pcontext);
313
314   context->gpu->delete_vs_state(context->gpu, so);
315}
316
317static void *
318tegra_create_gs_state(struct pipe_context *pcontext,
319                      const struct pipe_shader_state *cso)
320{
321   struct tegra_context *context = to_tegra_context(pcontext);
322
323   return context->gpu->create_gs_state(context->gpu, cso);
324}
325
326static void
327tegra_bind_gs_state(struct pipe_context *pcontext, void *so)
328{
329   struct tegra_context *context = to_tegra_context(pcontext);
330
331   context->gpu->bind_gs_state(context->gpu, so);
332}
333
334static void
335tegra_delete_gs_state(struct pipe_context *pcontext, void *so)
336{
337   struct tegra_context *context = to_tegra_context(pcontext);
338
339   context->gpu->delete_gs_state(context->gpu, so);
340}
341
342static void *
343tegra_create_tcs_state(struct pipe_context *pcontext,
344                       const struct pipe_shader_state *cso)
345{
346   struct tegra_context *context = to_tegra_context(pcontext);
347
348   return context->gpu->create_tcs_state(context->gpu, cso);
349}
350
351static void
352tegra_bind_tcs_state(struct pipe_context *pcontext, void *so)
353{
354   struct tegra_context *context = to_tegra_context(pcontext);
355
356   context->gpu->bind_tcs_state(context->gpu, so);
357}
358
359static void
360tegra_delete_tcs_state(struct pipe_context *pcontext, void *so)
361{
362   struct tegra_context *context = to_tegra_context(pcontext);
363
364   context->gpu->delete_tcs_state(context->gpu, so);
365}
366
367static void *
368tegra_create_tes_state(struct pipe_context *pcontext,
369                       const struct pipe_shader_state *cso)
370{
371   struct tegra_context *context = to_tegra_context(pcontext);
372
373   return context->gpu->create_tes_state(context->gpu, cso);
374}
375
376static void
377tegra_bind_tes_state(struct pipe_context *pcontext, void *so)
378{
379   struct tegra_context *context = to_tegra_context(pcontext);
380
381   context->gpu->bind_tes_state(context->gpu, so);
382}
383
384static void
385tegra_delete_tes_state(struct pipe_context *pcontext, void *so)
386{
387   struct tegra_context *context = to_tegra_context(pcontext);
388
389   context->gpu->delete_tes_state(context->gpu, so);
390}
391
392static void *
393tegra_create_vertex_elements_state(struct pipe_context *pcontext,
394                                   unsigned num_elements,
395                                   const struct pipe_vertex_element *elements)
396{
397   struct tegra_context *context = to_tegra_context(pcontext);
398
399   return context->gpu->create_vertex_elements_state(context->gpu,
400                                                     num_elements,
401                                                     elements);
402}
403
404static void
405tegra_bind_vertex_elements_state(struct pipe_context *pcontext, void *so)
406{
407   struct tegra_context *context = to_tegra_context(pcontext);
408
409   context->gpu->bind_vertex_elements_state(context->gpu, so);
410}
411
412static void
413tegra_delete_vertex_elements_state(struct pipe_context *pcontext, void *so)
414{
415   struct tegra_context *context = to_tegra_context(pcontext);
416
417   context->gpu->delete_vertex_elements_state(context->gpu, so);
418}
419
420static void
421tegra_set_blend_color(struct pipe_context *pcontext,
422                      const struct pipe_blend_color *color)
423{
424   struct tegra_context *context = to_tegra_context(pcontext);
425
426   context->gpu->set_blend_color(context->gpu, color);
427}
428
429static void
430tegra_set_stencil_ref(struct pipe_context *pcontext,
431                      const struct pipe_stencil_ref *ref)
432{
433   struct tegra_context *context = to_tegra_context(pcontext);
434
435   context->gpu->set_stencil_ref(context->gpu, ref);
436}
437
438static void
439tegra_set_sample_mask(struct pipe_context *pcontext, unsigned int mask)
440{
441   struct tegra_context *context = to_tegra_context(pcontext);
442
443   context->gpu->set_sample_mask(context->gpu, mask);
444}
445
446static void
447tegra_set_min_samples(struct pipe_context *pcontext, unsigned int samples)
448{
449   struct tegra_context *context = to_tegra_context(pcontext);
450
451   context->gpu->set_min_samples(context->gpu, samples);
452}
453
454static void
455tegra_set_clip_state(struct pipe_context *pcontext,
456                     const struct pipe_clip_state *state)
457{
458   struct tegra_context *context = to_tegra_context(pcontext);
459
460   context->gpu->set_clip_state(context->gpu, state);
461}
462
463static void
464tegra_set_constant_buffer(struct pipe_context *pcontext, unsigned int shader,
465                          unsigned int index,
466                          const struct pipe_constant_buffer *buf)
467{
468   struct tegra_context *context = to_tegra_context(pcontext);
469   struct pipe_constant_buffer buffer;
470
471   if (buf && buf->buffer) {
472      memcpy(&buffer, buf, sizeof(buffer));
473      buffer.buffer = tegra_resource_unwrap(buffer.buffer);
474      buf = &buffer;
475   }
476
477   context->gpu->set_constant_buffer(context->gpu, shader, index, buf);
478}
479
480static void
481tegra_set_framebuffer_state(struct pipe_context *pcontext,
482                            const struct pipe_framebuffer_state *fb)
483{
484   struct tegra_context *context = to_tegra_context(pcontext);
485   struct pipe_framebuffer_state state;
486   unsigned i;
487
488   if (fb) {
489      memcpy(&state, fb, sizeof(state));
490
491      for (i = 0; i < fb->nr_cbufs; i++)
492         state.cbufs[i] = tegra_surface_unwrap(fb->cbufs[i]);
493
494      while (i < PIPE_MAX_COLOR_BUFS)
495         state.cbufs[i++] = NULL;
496
497      state.zsbuf = tegra_surface_unwrap(fb->zsbuf);
498
499      fb = &state;
500   }
501
502   context->gpu->set_framebuffer_state(context->gpu, fb);
503}
504
505static void
506tegra_set_polygon_stipple(struct pipe_context *pcontext,
507                          const struct pipe_poly_stipple *stipple)
508{
509   struct tegra_context *context = to_tegra_context(pcontext);
510
511   context->gpu->set_polygon_stipple(context->gpu, stipple);
512}
513
514static void
515tegra_set_scissor_states(struct pipe_context *pcontext, unsigned start_slot,
516                         unsigned num_scissors,
517                         const struct pipe_scissor_state *scissors)
518{
519   struct tegra_context *context = to_tegra_context(pcontext);
520
521   context->gpu->set_scissor_states(context->gpu, start_slot, num_scissors,
522                                    scissors);
523}
524
525static void
526tegra_set_window_rectangles(struct pipe_context *pcontext, boolean include,
527                            unsigned int num_rectangles,
528                            const struct pipe_scissor_state *rectangles)
529{
530   struct tegra_context *context = to_tegra_context(pcontext);
531
532   context->gpu->set_window_rectangles(context->gpu, include, num_rectangles,
533                                       rectangles);
534}
535
536static void
537tegra_set_viewport_states(struct pipe_context *pcontext, unsigned start_slot,
538                          unsigned num_viewports,
539                          const struct pipe_viewport_state *viewports)
540{
541   struct tegra_context *context = to_tegra_context(pcontext);
542
543   context->gpu->set_viewport_states(context->gpu, start_slot, num_viewports,
544                                     viewports);
545}
546
547static void
548tegra_set_sampler_views(struct pipe_context *pcontext, unsigned shader,
549                        unsigned start_slot, unsigned num_views,
550                        struct pipe_sampler_view **pviews)
551{
552   struct pipe_sampler_view *views[PIPE_MAX_SHADER_SAMPLER_VIEWS];
553   struct tegra_context *context = to_tegra_context(pcontext);
554   unsigned i;
555
556   for (i = 0; i < num_views; i++)
557      views[i] = tegra_sampler_view_unwrap(pviews[i]);
558
559   context->gpu->set_sampler_views(context->gpu, shader, start_slot,
560                                   num_views, views);
561}
562
563static void
564tegra_set_tess_state(struct pipe_context *pcontext,
565                     const float default_outer_level[4],
566                     const float default_inner_level[2])
567{
568   struct tegra_context *context = to_tegra_context(pcontext);
569
570   context->gpu->set_tess_state(context->gpu, default_outer_level,
571                                default_inner_level);
572}
573
574static void
575tegra_set_debug_callback(struct pipe_context *pcontext,
576                         const struct pipe_debug_callback *callback)
577{
578   struct tegra_context *context = to_tegra_context(pcontext);
579
580   context->gpu->set_debug_callback(context->gpu, callback);
581}
582
583static void
584tegra_set_shader_buffers(struct pipe_context *pcontext, unsigned int shader,
585                         unsigned start, unsigned count,
586                         const struct pipe_shader_buffer *buffers,
587                         unsigned writable_bitmask)
588{
589   struct tegra_context *context = to_tegra_context(pcontext);
590
591   context->gpu->set_shader_buffers(context->gpu, shader, start, count,
592                                    buffers, writable_bitmask);
593}
594
595static void
596tegra_set_shader_images(struct pipe_context *pcontext, unsigned int shader,
597                        unsigned start, unsigned count,
598                        const struct pipe_image_view *images)
599{
600   struct tegra_context *context = to_tegra_context(pcontext);
601
602   context->gpu->set_shader_images(context->gpu, shader, start, count,
603                                   images);
604}
605
606static void
607tegra_set_vertex_buffers(struct pipe_context *pcontext, unsigned start_slot,
608                         unsigned num_buffers,
609                         const struct pipe_vertex_buffer *buffers)
610{
611   struct tegra_context *context = to_tegra_context(pcontext);
612   struct pipe_vertex_buffer buf[PIPE_MAX_SHADER_INPUTS];
613   unsigned i;
614
615   if (num_buffers && buffers) {
616      memcpy(buf, buffers, num_buffers * sizeof(struct pipe_vertex_buffer));
617
618      for (i = 0; i < num_buffers; i++) {
619         if (!buf[i].is_user_buffer)
620            buf[i].buffer.resource = tegra_resource_unwrap(buf[i].buffer.resource);
621      }
622
623      buffers = buf;
624   }
625
626   context->gpu->set_vertex_buffers(context->gpu, start_slot, num_buffers,
627                                    buffers);
628}
629
630static struct pipe_stream_output_target *
631tegra_create_stream_output_target(struct pipe_context *pcontext,
632                                  struct pipe_resource *presource,
633                                  unsigned buffer_offset,
634                                  unsigned buffer_size)
635{
636   struct tegra_resource *resource = to_tegra_resource(presource);
637   struct tegra_context *context = to_tegra_context(pcontext);
638
639   return context->gpu->create_stream_output_target(context->gpu,
640                                                    resource->gpu,
641                                                    buffer_offset,
642                                                    buffer_size);
643}
644
645static void
646tegra_stream_output_target_destroy(struct pipe_context *pcontext,
647                                   struct pipe_stream_output_target *target)
648{
649   struct tegra_context *context = to_tegra_context(pcontext);
650
651   context->gpu->stream_output_target_destroy(context->gpu, target);
652}
653
654static void
655tegra_set_stream_output_targets(struct pipe_context *pcontext,
656                                unsigned num_targets,
657                                struct pipe_stream_output_target **targets,
658                                const unsigned *offsets)
659{
660   struct tegra_context *context = to_tegra_context(pcontext);
661
662   context->gpu->set_stream_output_targets(context->gpu, num_targets,
663                                           targets, offsets);
664}
665
666static void
667tegra_resource_copy_region(struct pipe_context *pcontext,
668                           struct pipe_resource *pdst,
669                           unsigned int dst_level,
670                           unsigned int dstx,
671                           unsigned int dsty,
672                           unsigned int dstz,
673                           struct pipe_resource *psrc,
674                           unsigned int src_level,
675                           const struct pipe_box *src_box)
676{
677   struct tegra_context *context = to_tegra_context(pcontext);
678   struct tegra_resource *dst = to_tegra_resource(pdst);
679   struct tegra_resource *src = to_tegra_resource(psrc);
680
681   context->gpu->resource_copy_region(context->gpu, dst->gpu, dst_level, dstx,
682                                      dsty, dstz, src->gpu, src_level,
683                                      src_box);
684}
685
686static void
687tegra_blit(struct pipe_context *pcontext, const struct pipe_blit_info *pinfo)
688{
689   struct tegra_context *context = to_tegra_context(pcontext);
690   struct pipe_blit_info info;
691
692   if (pinfo) {
693      memcpy(&info, pinfo, sizeof(info));
694      info.dst.resource = tegra_resource_unwrap(info.dst.resource);
695      info.src.resource = tegra_resource_unwrap(info.src.resource);
696      pinfo = &info;
697   }
698
699   context->gpu->blit(context->gpu, pinfo);
700}
701
702static void
703tegra_clear(struct pipe_context *pcontext, unsigned buffers,
704            const union pipe_color_union *color, double depth,
705            unsigned stencil)
706{
707   struct tegra_context *context = to_tegra_context(pcontext);
708
709   context->gpu->clear(context->gpu, buffers, color, depth, stencil);
710}
711
712static void
713tegra_clear_render_target(struct pipe_context *pcontext,
714                          struct pipe_surface *pdst,
715                          const union pipe_color_union *color,
716                          unsigned int dstx,
717                          unsigned int dsty,
718                          unsigned int width,
719                          unsigned int height,
720                          bool render_condition)
721{
722   struct tegra_context *context = to_tegra_context(pcontext);
723   struct tegra_surface *dst = to_tegra_surface(pdst);
724
725   context->gpu->clear_render_target(context->gpu, dst->gpu, color, dstx,
726                                     dsty, width, height, render_condition);
727}
728
729static void
730tegra_clear_depth_stencil(struct pipe_context *pcontext,
731                          struct pipe_surface *pdst,
732                          unsigned int flags,
733                          double depth,
734                          unsigned int stencil,
735                          unsigned int dstx,
736                          unsigned int dsty,
737                          unsigned int width,
738                          unsigned int height,
739                          bool render_condition)
740{
741   struct tegra_context *context = to_tegra_context(pcontext);
742   struct tegra_surface *dst = to_tegra_surface(pdst);
743
744   context->gpu->clear_depth_stencil(context->gpu, dst->gpu, flags, depth,
745                                     stencil, dstx, dsty, width, height,
746                                     render_condition);
747}
748
749static void
750tegra_clear_texture(struct pipe_context *pcontext,
751                    struct pipe_resource *presource,
752                    unsigned int level,
753                    const struct pipe_box *box,
754                    const void *data)
755{
756   struct tegra_resource *resource = to_tegra_resource(presource);
757   struct tegra_context *context = to_tegra_context(pcontext);
758
759   context->gpu->clear_texture(context->gpu, resource->gpu, level, box, data);
760}
761
762static void
763tegra_clear_buffer(struct pipe_context *pcontext,
764                   struct pipe_resource *presource,
765                   unsigned int offset,
766                   unsigned int size,
767                   const void *value,
768                   int value_size)
769{
770   struct tegra_resource *resource = to_tegra_resource(presource);
771   struct tegra_context *context = to_tegra_context(pcontext);
772
773   context->gpu->clear_buffer(context->gpu, resource->gpu, offset, size,
774                              value, value_size);
775}
776
777static void
778tegra_flush(struct pipe_context *pcontext, struct pipe_fence_handle **fence,
779            unsigned flags)
780{
781   struct tegra_context *context = to_tegra_context(pcontext);
782
783   context->gpu->flush(context->gpu, fence, flags);
784}
785
786static void
787tegra_create_fence_fd(struct pipe_context *pcontext,
788                      struct pipe_fence_handle **fence,
789                      int fd, enum pipe_fd_type type)
790{
791   struct tegra_context *context = to_tegra_context(pcontext);
792
793   assert(type == PIPE_FD_TYPE_NATIVE_SYNC);
794   context->gpu->create_fence_fd(context->gpu, fence, fd, type);
795}
796
797static void
798tegra_fence_server_sync(struct pipe_context *pcontext,
799                        struct pipe_fence_handle *fence)
800{
801   struct tegra_context *context = to_tegra_context(pcontext);
802
803   context->gpu->fence_server_sync(context->gpu, fence);
804}
805
806static struct pipe_sampler_view *
807tegra_create_sampler_view(struct pipe_context *pcontext,
808                          struct pipe_resource *presource,
809                          const struct pipe_sampler_view *template)
810{
811   struct tegra_resource *resource = to_tegra_resource(presource);
812   struct tegra_context *context = to_tegra_context(pcontext);
813   struct tegra_sampler_view *view;
814
815   view = calloc(1, sizeof(*view));
816   if (!view)
817      return NULL;
818
819   view->gpu = context->gpu->create_sampler_view(context->gpu, resource->gpu,
820                                                 template);
821   memcpy(&view->base, view->gpu, sizeof(*view->gpu));
822   /* overwrite to prevent reference from being released */
823   view->base.texture = NULL;
824
825   pipe_reference_init(&view->base.reference, 1);
826   pipe_resource_reference(&view->base.texture, presource);
827   view->base.context = pcontext;
828
829   return &view->base;
830}
831
832static void
833tegra_sampler_view_destroy(struct pipe_context *pcontext,
834                           struct pipe_sampler_view *pview)
835{
836   struct tegra_sampler_view *view = to_tegra_sampler_view(pview);
837
838   pipe_resource_reference(&view->base.texture, NULL);
839   pipe_sampler_view_reference(&view->gpu, NULL);
840   free(view);
841}
842
843static struct pipe_surface *
844tegra_create_surface(struct pipe_context *pcontext,
845                     struct pipe_resource *presource,
846                     const struct pipe_surface *template)
847{
848   struct tegra_resource *resource = to_tegra_resource(presource);
849   struct tegra_context *context = to_tegra_context(pcontext);
850   struct tegra_surface *surface;
851
852   surface = calloc(1, sizeof(*surface));
853   if (!surface)
854      return NULL;
855
856   surface->gpu = context->gpu->create_surface(context->gpu, resource->gpu,
857                                               template);
858   if (!surface->gpu) {
859      free(surface);
860      return NULL;
861   }
862
863   memcpy(&surface->base, surface->gpu, sizeof(*surface->gpu));
864   /* overwrite to prevent reference from being released */
865   surface->base.texture = NULL;
866
867   pipe_reference_init(&surface->base.reference, 1);
868   pipe_resource_reference(&surface->base.texture, presource);
869   surface->base.context = &context->base;
870
871   return &surface->base;
872}
873
874static void
875tegra_surface_destroy(struct pipe_context *pcontext,
876                      struct pipe_surface *psurface)
877{
878   struct tegra_surface *surface = to_tegra_surface(psurface);
879
880   pipe_resource_reference(&surface->base.texture, NULL);
881   pipe_surface_reference(&surface->gpu, NULL);
882   free(surface);
883}
884
885static void *
886tegra_transfer_map(struct pipe_context *pcontext,
887                   struct pipe_resource *presource,
888                   unsigned level, unsigned usage,
889                   const struct pipe_box *box,
890                   struct pipe_transfer **ptransfer)
891{
892   struct tegra_resource *resource = to_tegra_resource(presource);
893   struct tegra_context *context = to_tegra_context(pcontext);
894   struct tegra_transfer *transfer;
895
896   transfer = calloc(1, sizeof(*transfer));
897   if (!transfer)
898      return NULL;
899
900   transfer->map = context->gpu->transfer_map(context->gpu, resource->gpu,
901                                              level, usage, box,
902                                              &transfer->gpu);
903   memcpy(&transfer->base, transfer->gpu, sizeof(*transfer->gpu));
904   transfer->base.resource = NULL;
905   pipe_resource_reference(&transfer->base.resource, presource);
906
907   *ptransfer = &transfer->base;
908
909   return transfer->map;
910}
911
912static void
913tegra_transfer_flush_region(struct pipe_context *pcontext,
914                            struct pipe_transfer *ptransfer,
915                            const struct pipe_box *box)
916{
917   struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
918   struct tegra_context *context = to_tegra_context(pcontext);
919
920   context->gpu->transfer_flush_region(context->gpu, transfer->gpu, box);
921}
922
923static void
924tegra_transfer_unmap(struct pipe_context *pcontext,
925                     struct pipe_transfer *ptransfer)
926{
927   struct tegra_transfer *transfer = to_tegra_transfer(ptransfer);
928   struct tegra_context *context = to_tegra_context(pcontext);
929
930   context->gpu->transfer_unmap(context->gpu, transfer->gpu);
931   pipe_resource_reference(&transfer->base.resource, NULL);
932   free(transfer);
933}
934
935static void
936tegra_buffer_subdata(struct pipe_context *pcontext,
937                     struct pipe_resource *presource,
938                     unsigned usage, unsigned offset,
939                     unsigned size, const void *data)
940{
941   struct tegra_resource *resource = to_tegra_resource(presource);
942   struct tegra_context *context = to_tegra_context(pcontext);
943
944   context->gpu->buffer_subdata(context->gpu, resource->gpu, usage, offset,
945                                size, data);
946}
947
948static void
949tegra_texture_subdata(struct pipe_context *pcontext,
950                      struct pipe_resource *presource,
951                      unsigned level,
952                      unsigned usage,
953                      const struct pipe_box *box,
954                      const void *data,
955                      unsigned stride,
956                      unsigned layer_stride)
957{
958   struct tegra_resource *resource = to_tegra_resource(presource);
959   struct tegra_context *context = to_tegra_context(pcontext);
960
961   context->gpu->texture_subdata(context->gpu, resource->gpu, level, usage,
962                                 box, data, stride, layer_stride);
963}
964
965static void
966tegra_texture_barrier(struct pipe_context *pcontext, unsigned int flags)
967{
968   struct tegra_context *context = to_tegra_context(pcontext);
969
970   context->gpu->texture_barrier(context->gpu, flags);
971}
972
973static void
974tegra_memory_barrier(struct pipe_context *pcontext, unsigned int flags)
975{
976   struct tegra_context *context = to_tegra_context(pcontext);
977
978   if (!(flags & ~PIPE_BARRIER_UPDATE))
979      return;
980
981   context->gpu->memory_barrier(context->gpu, flags);
982}
983
984static struct pipe_video_codec *
985tegra_create_video_codec(struct pipe_context *pcontext,
986                         const struct pipe_video_codec *template)
987{
988   struct tegra_context *context = to_tegra_context(pcontext);
989
990   return context->gpu->create_video_codec(context->gpu, template);
991}
992
993static struct pipe_video_buffer *
994tegra_create_video_buffer(struct pipe_context *pcontext,
995                          const struct pipe_video_buffer *template)
996{
997   struct tegra_context *context = to_tegra_context(pcontext);
998
999   return context->gpu->create_video_buffer(context->gpu, template);
1000}
1001
1002static void *
1003tegra_create_compute_state(struct pipe_context *pcontext,
1004                           const struct pipe_compute_state *template)
1005{
1006   struct tegra_context *context = to_tegra_context(pcontext);
1007
1008   return context->gpu->create_compute_state(context->gpu, template);
1009}
1010
1011static void
1012tegra_bind_compute_state(struct pipe_context *pcontext, void *so)
1013{
1014   struct tegra_context *context = to_tegra_context(pcontext);
1015
1016   context->gpu->bind_compute_state(context->gpu, so);
1017}
1018
1019static void
1020tegra_delete_compute_state(struct pipe_context *pcontext, void *so)
1021{
1022   struct tegra_context *context = to_tegra_context(pcontext);
1023
1024   context->gpu->delete_compute_state(context->gpu, so);
1025}
1026
1027static void
1028tegra_set_compute_resources(struct pipe_context *pcontext,
1029                            unsigned int start, unsigned int count,
1030                            struct pipe_surface **resources)
1031{
1032   struct tegra_context *context = to_tegra_context(pcontext);
1033
1034   /* XXX unwrap resources */
1035
1036   context->gpu->set_compute_resources(context->gpu, start, count, resources);
1037}
1038
1039static void
1040tegra_set_global_binding(struct pipe_context *pcontext, unsigned int first,
1041                         unsigned int count, struct pipe_resource **resources,
1042                         uint32_t **handles)
1043{
1044   struct tegra_context *context = to_tegra_context(pcontext);
1045
1046   /* XXX unwrap resources */
1047
1048   context->gpu->set_global_binding(context->gpu, first, count, resources,
1049                                    handles);
1050}
1051
1052static void
1053tegra_launch_grid(struct pipe_context *pcontext,
1054                  const struct pipe_grid_info *info)
1055{
1056   struct tegra_context *context = to_tegra_context(pcontext);
1057
1058   /* XXX unwrap info->indirect? */
1059
1060   context->gpu->launch_grid(context->gpu, info);
1061}
1062
1063static void
1064tegra_get_sample_position(struct pipe_context *pcontext, unsigned int count,
1065                          unsigned int index, float *value)
1066{
1067   struct tegra_context *context = to_tegra_context(pcontext);
1068
1069   context->gpu->get_sample_position(context->gpu, count, index, value);
1070}
1071
1072static uint64_t
1073tegra_get_timestamp(struct pipe_context *pcontext)
1074{
1075   struct tegra_context *context = to_tegra_context(pcontext);
1076
1077   return context->gpu->get_timestamp(context->gpu);
1078}
1079
1080static void
1081tegra_flush_resource(struct pipe_context *pcontext,
1082                     struct pipe_resource *presource)
1083{
1084   struct tegra_resource *resource = to_tegra_resource(presource);
1085   struct tegra_context *context = to_tegra_context(pcontext);
1086
1087   context->gpu->flush_resource(context->gpu, resource->gpu);
1088}
1089
1090static void
1091tegra_invalidate_resource(struct pipe_context *pcontext,
1092                          struct pipe_resource *presource)
1093{
1094   struct tegra_resource *resource = to_tegra_resource(presource);
1095   struct tegra_context *context = to_tegra_context(pcontext);
1096
1097   context->gpu->invalidate_resource(context->gpu, resource->gpu);
1098}
1099
1100static enum pipe_reset_status
1101tegra_get_device_reset_status(struct pipe_context *pcontext)
1102{
1103   struct tegra_context *context = to_tegra_context(pcontext);
1104
1105   return context->gpu->get_device_reset_status(context->gpu);
1106}
1107
1108static void
1109tegra_set_device_reset_callback(struct pipe_context *pcontext,
1110                                const struct pipe_device_reset_callback *cb)
1111{
1112   struct tegra_context *context = to_tegra_context(pcontext);
1113
1114   context->gpu->set_device_reset_callback(context->gpu, cb);
1115}
1116
1117static void
1118tegra_dump_debug_state(struct pipe_context *pcontext, FILE *stream,
1119                       unsigned int flags)
1120{
1121   struct tegra_context *context = to_tegra_context(pcontext);
1122
1123   context->gpu->dump_debug_state(context->gpu, stream, flags);
1124}
1125
1126static void
1127tegra_emit_string_marker(struct pipe_context *pcontext, const char *string,
1128                         int length)
1129{
1130   struct tegra_context *context = to_tegra_context(pcontext);
1131
1132   context->gpu->emit_string_marker(context->gpu, string, length);
1133}
1134
1135static boolean
1136tegra_generate_mipmap(struct pipe_context *pcontext,
1137                      struct pipe_resource *presource,
1138                      enum pipe_format format,
1139                      unsigned int base_level,
1140                      unsigned int last_level,
1141                      unsigned int first_layer,
1142                      unsigned int last_layer)
1143{
1144   struct tegra_resource *resource = to_tegra_resource(presource);
1145   struct tegra_context *context = to_tegra_context(pcontext);
1146
1147   return context->gpu->generate_mipmap(context->gpu, resource->gpu, format,
1148                                        base_level, last_level, first_layer,
1149                                        last_layer);
1150}
1151
1152static uint64_t
1153tegra_create_texture_handle(struct pipe_context *pcontext,
1154                            struct pipe_sampler_view *view,
1155                            const struct pipe_sampler_state *state)
1156{
1157   struct tegra_context *context = to_tegra_context(pcontext);
1158
1159   return context->gpu->create_texture_handle(context->gpu, view, state);
1160}
1161
1162static void tegra_delete_texture_handle(struct pipe_context *pcontext,
1163                                        uint64_t handle)
1164{
1165   struct tegra_context *context = to_tegra_context(pcontext);
1166
1167   context->gpu->delete_texture_handle(context->gpu, handle);
1168}
1169
1170static void tegra_make_texture_handle_resident(struct pipe_context *pcontext,
1171                                               uint64_t handle, bool resident)
1172{
1173   struct tegra_context *context = to_tegra_context(pcontext);
1174
1175   context->gpu->make_texture_handle_resident(context->gpu, handle, resident);
1176}
1177
1178static uint64_t tegra_create_image_handle(struct pipe_context *pcontext,
1179                                          const struct pipe_image_view *image)
1180{
1181   struct tegra_context *context = to_tegra_context(pcontext);
1182
1183   return context->gpu->create_image_handle(context->gpu, image);
1184}
1185
1186static void tegra_delete_image_handle(struct pipe_context *pcontext,
1187                                      uint64_t handle)
1188{
1189   struct tegra_context *context = to_tegra_context(pcontext);
1190
1191   context->gpu->delete_image_handle(context->gpu, handle);
1192}
1193
1194static void tegra_make_image_handle_resident(struct pipe_context *pcontext,
1195                                             uint64_t handle, unsigned access,
1196                                             bool resident)
1197{
1198   struct tegra_context *context = to_tegra_context(pcontext);
1199
1200   context->gpu->make_image_handle_resident(context->gpu, handle, access,
1201                                            resident);
1202}
1203
1204struct pipe_context *
1205tegra_screen_context_create(struct pipe_screen *pscreen, void *priv,
1206                            unsigned int flags)
1207{
1208   struct tegra_screen *screen = to_tegra_screen(pscreen);
1209   struct tegra_context *context;
1210
1211   context = calloc(1, sizeof(*context));
1212   if (!context)
1213      return NULL;
1214
1215   context->gpu = screen->gpu->context_create(screen->gpu, priv, flags);
1216   if (!context->gpu) {
1217      debug_error("failed to create GPU context\n");
1218      goto free;
1219   }
1220
1221   context->base.screen = &screen->base;
1222   context->base.priv = priv;
1223
1224   /*
1225    * Create custom stream and const uploaders. Note that technically nouveau
1226    * already creates uploaders that could be reused, but that would make the
1227    * resource unwrapping rather complicate. The reason for that is that both
1228    * uploaders create resources based on the context that they were created
1229    * from, which means that nouveau's uploader will use the nouveau context
1230    * which means that those resources must not be unwrapped. So before each
1231    * resource is unwrapped, the code would need to check that it does not
1232    * correspond to the uploaders' buffers.
1233    *
1234    * However, duplicating the uploaders here sounds worse than it is. The
1235    * default implementation that nouveau uses allocates buffers lazily, and
1236    * since it is never used, no buffers will every be allocated and the only
1237    * memory wasted is that occupied by the nouveau uploader itself.
1238    */
1239   context->base.stream_uploader = u_upload_create_default(&context->base);
1240   if (!context->base.stream_uploader)
1241      goto destroy;
1242
1243   context->base.const_uploader = context->base.stream_uploader;
1244
1245   context->base.destroy = tegra_destroy;
1246
1247   context->base.draw_vbo = tegra_draw_vbo;
1248
1249   context->base.render_condition = tegra_render_condition;
1250
1251   context->base.create_query = tegra_create_query;
1252   context->base.create_batch_query = tegra_create_batch_query;
1253   context->base.destroy_query = tegra_destroy_query;
1254   context->base.begin_query = tegra_begin_query;
1255   context->base.end_query = tegra_end_query;
1256   context->base.get_query_result = tegra_get_query_result;
1257   context->base.get_query_result_resource = tegra_get_query_result_resource;
1258   context->base.set_active_query_state = tegra_set_active_query_state;
1259
1260   context->base.create_blend_state = tegra_create_blend_state;
1261   context->base.bind_blend_state = tegra_bind_blend_state;
1262   context->base.delete_blend_state = tegra_delete_blend_state;
1263
1264   context->base.create_sampler_state = tegra_create_sampler_state;
1265   context->base.bind_sampler_states = tegra_bind_sampler_states;
1266   context->base.delete_sampler_state = tegra_delete_sampler_state;
1267
1268   context->base.create_rasterizer_state = tegra_create_rasterizer_state;
1269   context->base.bind_rasterizer_state = tegra_bind_rasterizer_state;
1270   context->base.delete_rasterizer_state = tegra_delete_rasterizer_state;
1271
1272   context->base.create_depth_stencil_alpha_state = tegra_create_depth_stencil_alpha_state;
1273   context->base.bind_depth_stencil_alpha_state = tegra_bind_depth_stencil_alpha_state;
1274   context->base.delete_depth_stencil_alpha_state = tegra_delete_depth_stencil_alpha_state;
1275
1276   context->base.create_fs_state = tegra_create_fs_state;
1277   context->base.bind_fs_state = tegra_bind_fs_state;
1278   context->base.delete_fs_state = tegra_delete_fs_state;
1279
1280   context->base.create_vs_state = tegra_create_vs_state;
1281   context->base.bind_vs_state = tegra_bind_vs_state;
1282   context->base.delete_vs_state = tegra_delete_vs_state;
1283
1284   context->base.create_gs_state = tegra_create_gs_state;
1285   context->base.bind_gs_state = tegra_bind_gs_state;
1286   context->base.delete_gs_state = tegra_delete_gs_state;
1287
1288   context->base.create_tcs_state = tegra_create_tcs_state;
1289   context->base.bind_tcs_state = tegra_bind_tcs_state;
1290   context->base.delete_tcs_state = tegra_delete_tcs_state;
1291
1292   context->base.create_tes_state = tegra_create_tes_state;
1293   context->base.bind_tes_state = tegra_bind_tes_state;
1294   context->base.delete_tes_state = tegra_delete_tes_state;
1295
1296   context->base.create_vertex_elements_state = tegra_create_vertex_elements_state;
1297   context->base.bind_vertex_elements_state = tegra_bind_vertex_elements_state;
1298   context->base.delete_vertex_elements_state = tegra_delete_vertex_elements_state;
1299
1300   context->base.set_blend_color = tegra_set_blend_color;
1301   context->base.set_stencil_ref = tegra_set_stencil_ref;
1302   context->base.set_sample_mask = tegra_set_sample_mask;
1303   context->base.set_min_samples = tegra_set_min_samples;
1304   context->base.set_clip_state = tegra_set_clip_state;
1305
1306   context->base.set_constant_buffer = tegra_set_constant_buffer;
1307   context->base.set_framebuffer_state = tegra_set_framebuffer_state;
1308   context->base.set_polygon_stipple = tegra_set_polygon_stipple;
1309   context->base.set_scissor_states = tegra_set_scissor_states;
1310   context->base.set_window_rectangles = tegra_set_window_rectangles;
1311   context->base.set_viewport_states = tegra_set_viewport_states;
1312   context->base.set_sampler_views = tegra_set_sampler_views;
1313   context->base.set_tess_state = tegra_set_tess_state;
1314
1315   context->base.set_debug_callback = tegra_set_debug_callback;
1316
1317   context->base.set_shader_buffers = tegra_set_shader_buffers;
1318   context->base.set_shader_images = tegra_set_shader_images;
1319   context->base.set_vertex_buffers = tegra_set_vertex_buffers;
1320
1321   context->base.create_stream_output_target = tegra_create_stream_output_target;
1322   context->base.stream_output_target_destroy = tegra_stream_output_target_destroy;
1323   context->base.set_stream_output_targets = tegra_set_stream_output_targets;
1324
1325   context->base.resource_copy_region = tegra_resource_copy_region;
1326   context->base.blit = tegra_blit;
1327   context->base.clear = tegra_clear;
1328   context->base.clear_render_target = tegra_clear_render_target;
1329   context->base.clear_depth_stencil = tegra_clear_depth_stencil;
1330   context->base.clear_texture = tegra_clear_texture;
1331   context->base.clear_buffer = tegra_clear_buffer;
1332   context->base.flush = tegra_flush;
1333
1334   context->base.create_fence_fd = tegra_create_fence_fd;
1335   context->base.fence_server_sync = tegra_fence_server_sync;
1336
1337   context->base.create_sampler_view = tegra_create_sampler_view;
1338   context->base.sampler_view_destroy = tegra_sampler_view_destroy;
1339
1340   context->base.create_surface = tegra_create_surface;
1341   context->base.surface_destroy = tegra_surface_destroy;
1342
1343   context->base.transfer_map = tegra_transfer_map;
1344   context->base.transfer_flush_region = tegra_transfer_flush_region;
1345   context->base.transfer_unmap = tegra_transfer_unmap;
1346   context->base.buffer_subdata = tegra_buffer_subdata;
1347   context->base.texture_subdata = tegra_texture_subdata;
1348
1349   context->base.texture_barrier = tegra_texture_barrier;
1350   context->base.memory_barrier = tegra_memory_barrier;
1351
1352   context->base.create_video_codec = tegra_create_video_codec;
1353   context->base.create_video_buffer = tegra_create_video_buffer;
1354
1355   context->base.create_compute_state = tegra_create_compute_state;
1356   context->base.bind_compute_state = tegra_bind_compute_state;
1357   context->base.delete_compute_state = tegra_delete_compute_state;
1358   context->base.set_compute_resources = tegra_set_compute_resources;
1359   context->base.set_global_binding = tegra_set_global_binding;
1360   context->base.launch_grid = tegra_launch_grid;
1361   context->base.get_sample_position = tegra_get_sample_position;
1362   context->base.get_timestamp = tegra_get_timestamp;
1363
1364   context->base.flush_resource = tegra_flush_resource;
1365   context->base.invalidate_resource = tegra_invalidate_resource;
1366
1367   context->base.get_device_reset_status = tegra_get_device_reset_status;
1368   context->base.set_device_reset_callback = tegra_set_device_reset_callback;
1369   context->base.dump_debug_state = tegra_dump_debug_state;
1370   context->base.emit_string_marker = tegra_emit_string_marker;
1371
1372   context->base.generate_mipmap = tegra_generate_mipmap;
1373
1374   context->base.create_texture_handle = tegra_create_texture_handle;
1375   context->base.delete_texture_handle = tegra_delete_texture_handle;
1376   context->base.make_texture_handle_resident = tegra_make_texture_handle_resident;
1377   context->base.create_image_handle = tegra_create_image_handle;
1378   context->base.delete_image_handle = tegra_delete_image_handle;
1379   context->base.make_image_handle_resident = tegra_make_image_handle_resident;
1380
1381   return &context->base;
1382
1383destroy:
1384   context->gpu->destroy(context->gpu);
1385free:
1386   free(context);
1387   return NULL;
1388}
1389