msm_ringbuffer.c revision 7ec681f3
1/*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include <assert.h>
28#include <inttypes.h>
29
30#include "util/hash_table.h"
31#include "util/set.h"
32#include "util/slab.h"
33
34#include "drm/freedreno_ringbuffer.h"
35#include "msm_priv.h"
36
37/* The legacy implementation of submit/ringbuffer, which still does the
38 * traditional reloc and cmd tracking
39 */
40
41#define INIT_SIZE 0x1000
42
43struct msm_submit {
44   struct fd_submit base;
45
46   DECLARE_ARRAY(struct drm_msm_gem_submit_bo, submit_bos);
47   DECLARE_ARRAY(struct fd_bo *, bos);
48
49   /* maps fd_bo to idx in bos table: */
50   struct hash_table *bo_table;
51
52   struct slab_mempool ring_pool;
53
54   /* hash-set of associated rings: */
55   struct set *ring_set;
56
57   /* Allow for sub-allocation of stateobj ring buffers (ie. sharing
58    * the same underlying bo)..
59    *
60    * We also rely on previous stateobj having been fully constructed
61    * so we can reclaim extra space at it's end.
62    */
63   struct fd_ringbuffer *suballoc_ring;
64};
65FD_DEFINE_CAST(fd_submit, msm_submit);
66
67/* for FD_RINGBUFFER_GROWABLE rb's, tracks the 'finalized' cmdstream buffers
68 * and sizes.  Ie. a finalized buffer can have no more commands appended to
69 * it.
70 */
71struct msm_cmd {
72   struct fd_bo *ring_bo;
73   unsigned size;
74   DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
75};
76
77static struct msm_cmd *
78cmd_new(struct fd_bo *ring_bo)
79{
80   struct msm_cmd *cmd = malloc(sizeof(*cmd));
81   cmd->ring_bo = fd_bo_ref(ring_bo);
82   cmd->size = 0;
83   cmd->nr_relocs = cmd->max_relocs = 0;
84   cmd->relocs = NULL;
85   return cmd;
86}
87
88static void
89cmd_free(struct msm_cmd *cmd)
90{
91   fd_bo_del(cmd->ring_bo);
92   free(cmd->relocs);
93   free(cmd);
94}
95
96struct msm_ringbuffer {
97   struct fd_ringbuffer base;
98
99   /* for FD_RINGBUFFER_STREAMING rb's which are sub-allocated */
100   unsigned offset;
101
102   union {
103      /* for _FD_RINGBUFFER_OBJECT case: */
104      struct {
105         struct fd_pipe *pipe;
106         DECLARE_ARRAY(struct fd_bo *, reloc_bos);
107         struct set *ring_set;
108      };
109      /* for other cases: */
110      struct {
111         struct fd_submit *submit;
112         DECLARE_ARRAY(struct msm_cmd *, cmds);
113      };
114   } u;
115
116   struct msm_cmd *cmd; /* current cmd */
117   struct fd_bo *ring_bo;
118};
119FD_DEFINE_CAST(fd_ringbuffer, msm_ringbuffer);
120
121static void finalize_current_cmd(struct fd_ringbuffer *ring);
122static struct fd_ringbuffer *
123msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
124                    enum fd_ringbuffer_flags flags);
125
126/* add (if needed) bo to submit and return index: */
127static uint32_t
128append_bo(struct msm_submit *submit, struct fd_bo *bo)
129{
130   struct msm_bo *msm_bo = to_msm_bo(bo);
131   uint32_t idx;
132
133   /* NOTE: it is legal to use the same bo on different threads for
134    * different submits.  But it is not legal to use the same submit
135    * from given threads.
136    */
137   idx = READ_ONCE(msm_bo->idx);
138
139   if (unlikely((idx >= submit->nr_submit_bos) ||
140                (submit->submit_bos[idx].handle != bo->handle))) {
141      uint32_t hash = _mesa_hash_pointer(bo);
142      struct hash_entry *entry;
143
144      entry = _mesa_hash_table_search_pre_hashed(submit->bo_table, hash, bo);
145      if (entry) {
146         /* found */
147         idx = (uint32_t)(uintptr_t)entry->data;
148      } else {
149         idx = APPEND(
150            submit, submit_bos,
151            (struct drm_msm_gem_submit_bo){
152               .flags = bo->reloc_flags & (MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE),
153               .handle = bo->handle,
154               .presumed = 0,
155            });
156         APPEND(submit, bos, fd_bo_ref(bo));
157
158         _mesa_hash_table_insert_pre_hashed(submit->bo_table, hash, bo,
159                                            (void *)(uintptr_t)idx);
160      }
161      msm_bo->idx = idx;
162   }
163
164   return idx;
165}
166
167static void
168append_ring(struct set *set, struct fd_ringbuffer *ring)
169{
170   uint32_t hash = _mesa_hash_pointer(ring);
171
172   if (!_mesa_set_search_pre_hashed(set, hash, ring)) {
173      fd_ringbuffer_ref(ring);
174      _mesa_set_add_pre_hashed(set, hash, ring);
175   }
176}
177
178static void
179msm_submit_suballoc_ring_bo(struct fd_submit *submit,
180                            struct msm_ringbuffer *msm_ring, uint32_t size)
181{
182   struct msm_submit *msm_submit = to_msm_submit(submit);
183   unsigned suballoc_offset = 0;
184   struct fd_bo *suballoc_bo = NULL;
185
186   if (msm_submit->suballoc_ring) {
187      struct msm_ringbuffer *suballoc_ring =
188         to_msm_ringbuffer(msm_submit->suballoc_ring);
189
190      suballoc_bo = suballoc_ring->ring_bo;
191      suballoc_offset =
192         fd_ringbuffer_size(msm_submit->suballoc_ring) + suballoc_ring->offset;
193
194      suballoc_offset = align(suballoc_offset, 0x10);
195
196      if ((size + suballoc_offset) > suballoc_bo->size) {
197         suballoc_bo = NULL;
198      }
199   }
200
201   if (!suballoc_bo) {
202      // TODO possibly larger size for streaming bo?
203      msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, 0x8000);
204      msm_ring->offset = 0;
205   } else {
206      msm_ring->ring_bo = fd_bo_ref(suballoc_bo);
207      msm_ring->offset = suballoc_offset;
208   }
209
210   struct fd_ringbuffer *old_suballoc_ring = msm_submit->suballoc_ring;
211
212   msm_submit->suballoc_ring = fd_ringbuffer_ref(&msm_ring->base);
213
214   if (old_suballoc_ring)
215      fd_ringbuffer_del(old_suballoc_ring);
216}
217
218static struct fd_ringbuffer *
219msm_submit_new_ringbuffer(struct fd_submit *submit, uint32_t size,
220                          enum fd_ringbuffer_flags flags)
221{
222   struct msm_submit *msm_submit = to_msm_submit(submit);
223   struct msm_ringbuffer *msm_ring;
224
225   msm_ring = slab_alloc_st(&msm_submit->ring_pool);
226
227   msm_ring->u.submit = submit;
228
229   /* NOTE: needs to be before _suballoc_ring_bo() since it could
230    * increment the refcnt of the current ring
231    */
232   msm_ring->base.refcnt = 1;
233
234   if (flags & FD_RINGBUFFER_STREAMING) {
235      msm_submit_suballoc_ring_bo(submit, msm_ring, size);
236   } else {
237      if (flags & FD_RINGBUFFER_GROWABLE)
238         size = INIT_SIZE;
239
240      msm_ring->offset = 0;
241      msm_ring->ring_bo = fd_bo_new_ring(submit->pipe->dev, size);
242   }
243
244   if (!msm_ringbuffer_init(msm_ring, size, flags))
245      return NULL;
246
247   return &msm_ring->base;
248}
249
250static struct drm_msm_gem_submit_reloc *
251handle_stateobj_relocs(struct msm_submit *submit, struct msm_ringbuffer *ring)
252{
253   struct msm_cmd *cmd = ring->cmd;
254   struct drm_msm_gem_submit_reloc *relocs;
255
256   relocs = malloc(cmd->nr_relocs * sizeof(*relocs));
257
258   for (unsigned i = 0; i < cmd->nr_relocs; i++) {
259      unsigned idx = cmd->relocs[i].reloc_idx;
260      struct fd_bo *bo = ring->u.reloc_bos[idx];
261
262      relocs[i] = cmd->relocs[i];
263      relocs[i].reloc_idx = append_bo(submit, bo);
264   }
265
266   return relocs;
267}
268
269static int
270msm_submit_flush(struct fd_submit *submit, int in_fence_fd,
271                 struct fd_submit_fence *out_fence)
272{
273   struct msm_submit *msm_submit = to_msm_submit(submit);
274   struct msm_pipe *msm_pipe = to_msm_pipe(submit->pipe);
275   struct drm_msm_gem_submit req = {
276      .flags = msm_pipe->pipe,
277      .queueid = msm_pipe->queue_id,
278   };
279   int ret;
280
281   finalize_current_cmd(submit->primary);
282   append_ring(msm_submit->ring_set, submit->primary);
283
284   unsigned nr_cmds = 0;
285   unsigned nr_objs = 0;
286
287   set_foreach (msm_submit->ring_set, entry) {
288      struct fd_ringbuffer *ring = (void *)entry->key;
289      if (ring->flags & _FD_RINGBUFFER_OBJECT) {
290         nr_cmds += 1;
291         nr_objs += 1;
292      } else {
293         if (ring != submit->primary)
294            finalize_current_cmd(ring);
295         nr_cmds += to_msm_ringbuffer(ring)->u.nr_cmds;
296      }
297   }
298
299   void *obj_relocs[nr_objs];
300   struct drm_msm_gem_submit_cmd cmds[nr_cmds];
301   unsigned i = 0, o = 0;
302
303   set_foreach (msm_submit->ring_set, entry) {
304      struct fd_ringbuffer *ring = (void *)entry->key;
305      struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
306
307      debug_assert(i < nr_cmds);
308
309      // TODO handle relocs:
310      if (ring->flags & _FD_RINGBUFFER_OBJECT) {
311
312         debug_assert(o < nr_objs);
313
314         void *relocs = handle_stateobj_relocs(msm_submit, msm_ring);
315         obj_relocs[o++] = relocs;
316
317         cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
318         cmds[i].submit_idx = append_bo(msm_submit, msm_ring->ring_bo);
319         cmds[i].submit_offset = msm_ring->offset;
320         cmds[i].size = offset_bytes(ring->cur, ring->start);
321         cmds[i].pad = 0;
322         cmds[i].nr_relocs = msm_ring->cmd->nr_relocs;
323         cmds[i].relocs = VOID2U64(relocs);
324
325         i++;
326      } else {
327         for (unsigned j = 0; j < msm_ring->u.nr_cmds; j++) {
328            if (ring->flags & FD_RINGBUFFER_PRIMARY) {
329               cmds[i].type = MSM_SUBMIT_CMD_BUF;
330            } else {
331               cmds[i].type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
332            }
333            cmds[i].submit_idx =
334               append_bo(msm_submit, msm_ring->u.cmds[j]->ring_bo);
335            cmds[i].submit_offset = msm_ring->offset;
336            cmds[i].size = msm_ring->u.cmds[j]->size;
337            cmds[i].pad = 0;
338            cmds[i].nr_relocs = msm_ring->u.cmds[j]->nr_relocs;
339            cmds[i].relocs = VOID2U64(msm_ring->u.cmds[j]->relocs);
340
341            i++;
342         }
343      }
344   }
345
346   simple_mtx_lock(&table_lock);
347   for (unsigned j = 0; j < msm_submit->nr_bos; j++) {
348      fd_bo_add_fence(msm_submit->bos[j], submit->pipe, submit->fence);
349   }
350   simple_mtx_unlock(&table_lock);
351
352   if (in_fence_fd != -1) {
353      req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
354      req.fence_fd = in_fence_fd;
355   }
356
357   if (out_fence && out_fence->use_fence_fd) {
358      req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
359   }
360
361   /* needs to be after get_cmd() as that could create bos/cmds table: */
362   req.bos = VOID2U64(msm_submit->submit_bos),
363   req.nr_bos = msm_submit->nr_submit_bos;
364   req.cmds = VOID2U64(cmds), req.nr_cmds = nr_cmds;
365
366   DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
367
368   ret = drmCommandWriteRead(submit->pipe->dev->fd, DRM_MSM_GEM_SUBMIT, &req,
369                             sizeof(req));
370   if (ret) {
371      ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
372      msm_dump_submit(&req);
373   } else if (!ret && out_fence) {
374      out_fence->fence.kfence = req.fence;
375      out_fence->fence.ufence = submit->fence;
376      out_fence->fence_fd = req.fence_fd;
377   }
378
379   for (unsigned o = 0; o < nr_objs; o++)
380      free(obj_relocs[o]);
381
382   return ret;
383}
384
385static void
386unref_rings(struct set_entry *entry)
387{
388   struct fd_ringbuffer *ring = (void *)entry->key;
389   fd_ringbuffer_del(ring);
390}
391
392static void
393msm_submit_destroy(struct fd_submit *submit)
394{
395   struct msm_submit *msm_submit = to_msm_submit(submit);
396
397   if (msm_submit->suballoc_ring)
398      fd_ringbuffer_del(msm_submit->suballoc_ring);
399
400   _mesa_hash_table_destroy(msm_submit->bo_table, NULL);
401   _mesa_set_destroy(msm_submit->ring_set, unref_rings);
402
403   // TODO it would be nice to have a way to debug_assert() if all
404   // rb's haven't been free'd back to the slab, because that is
405   // an indication that we are leaking bo's
406   slab_destroy(&msm_submit->ring_pool);
407
408   for (unsigned i = 0; i < msm_submit->nr_bos; i++)
409      fd_bo_del(msm_submit->bos[i]);
410
411   free(msm_submit->submit_bos);
412   free(msm_submit->bos);
413   free(msm_submit);
414}
415
416static const struct fd_submit_funcs submit_funcs = {
417   .new_ringbuffer = msm_submit_new_ringbuffer,
418   .flush = msm_submit_flush,
419   .destroy = msm_submit_destroy,
420};
421
422struct fd_submit *
423msm_submit_new(struct fd_pipe *pipe)
424{
425   struct msm_submit *msm_submit = calloc(1, sizeof(*msm_submit));
426   struct fd_submit *submit;
427
428   msm_submit->bo_table = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
429                                                  _mesa_key_pointer_equal);
430   msm_submit->ring_set =
431      _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
432   // TODO tune size:
433   slab_create(&msm_submit->ring_pool, sizeof(struct msm_ringbuffer), 16);
434
435   submit = &msm_submit->base;
436   submit->funcs = &submit_funcs;
437
438   return submit;
439}
440
441static void
442finalize_current_cmd(struct fd_ringbuffer *ring)
443{
444   struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
445
446   debug_assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
447
448   if (!msm_ring->cmd)
449      return;
450
451   debug_assert(msm_ring->cmd->ring_bo == msm_ring->ring_bo);
452
453   msm_ring->cmd->size = offset_bytes(ring->cur, ring->start);
454   APPEND(&msm_ring->u, cmds, msm_ring->cmd);
455   msm_ring->cmd = NULL;
456}
457
458static void
459msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
460{
461   struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
462   struct fd_pipe *pipe = msm_ring->u.submit->pipe;
463
464   debug_assert(ring->flags & FD_RINGBUFFER_GROWABLE);
465
466   finalize_current_cmd(ring);
467
468   fd_bo_del(msm_ring->ring_bo);
469   msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
470   msm_ring->cmd = cmd_new(msm_ring->ring_bo);
471
472   ring->start = fd_bo_map(msm_ring->ring_bo);
473   ring->end = &(ring->start[size / 4]);
474   ring->cur = ring->start;
475   ring->size = size;
476}
477
478static void
479msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
480                          const struct fd_reloc *reloc)
481{
482   struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
483   struct fd_pipe *pipe;
484   unsigned reloc_idx;
485
486   if (ring->flags & _FD_RINGBUFFER_OBJECT) {
487      unsigned idx = APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(reloc->bo));
488
489      /* this gets fixed up at submit->flush() time, since this state-
490       * object rb can be used with many different submits
491       */
492      reloc_idx = idx;
493
494      pipe = msm_ring->u.pipe;
495   } else {
496      struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
497
498      reloc_idx = append_bo(msm_submit, reloc->bo);
499
500      pipe = msm_ring->u.submit->pipe;
501   }
502
503   APPEND(msm_ring->cmd, relocs,
504          (struct drm_msm_gem_submit_reloc){
505             .reloc_idx = reloc_idx,
506             .reloc_offset = reloc->offset,
507             .or = reloc->orlo,
508             .shift = reloc->shift,
509             .submit_offset =
510                offset_bytes(ring->cur, ring->start) + msm_ring->offset,
511          });
512
513   ring->cur++;
514
515   if (fd_dev_64b(&pipe->dev_id)) {
516      APPEND(msm_ring->cmd, relocs,
517             (struct drm_msm_gem_submit_reloc){
518                .reloc_idx = reloc_idx,
519                .reloc_offset = reloc->offset,
520                .or = reloc->orhi,
521                .shift = reloc->shift - 32,
522                .submit_offset =
523                   offset_bytes(ring->cur, ring->start) + msm_ring->offset,
524             });
525
526      ring->cur++;
527   }
528}
529
530static void
531append_stateobj_rings(struct msm_submit *submit, struct fd_ringbuffer *target)
532{
533   struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
534
535   debug_assert(target->flags & _FD_RINGBUFFER_OBJECT);
536
537   set_foreach (msm_target->u.ring_set, entry) {
538      struct fd_ringbuffer *ring = (void *)entry->key;
539
540      append_ring(submit->ring_set, ring);
541
542      if (ring->flags & _FD_RINGBUFFER_OBJECT) {
543         append_stateobj_rings(submit, ring);
544      }
545   }
546}
547
548static uint32_t
549msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
550                               struct fd_ringbuffer *target, uint32_t cmd_idx)
551{
552   struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
553   struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
554   struct fd_bo *bo;
555   uint32_t size;
556
557   if ((target->flags & FD_RINGBUFFER_GROWABLE) &&
558       (cmd_idx < msm_target->u.nr_cmds)) {
559      bo = msm_target->u.cmds[cmd_idx]->ring_bo;
560      size = msm_target->u.cmds[cmd_idx]->size;
561   } else {
562      bo = msm_target->ring_bo;
563      size = offset_bytes(target->cur, target->start);
564   }
565
566   msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
567                                      .bo = bo,
568                                      .iova = bo->iova + msm_target->offset,
569                                      .offset = msm_target->offset,
570                                   });
571
572   if (!size)
573      return 0;
574
575   if ((target->flags & _FD_RINGBUFFER_OBJECT) &&
576       !(ring->flags & _FD_RINGBUFFER_OBJECT)) {
577      struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
578
579      append_stateobj_rings(msm_submit, target);
580   }
581
582   if (ring->flags & _FD_RINGBUFFER_OBJECT) {
583      append_ring(msm_ring->u.ring_set, target);
584   } else {
585      struct msm_submit *msm_submit = to_msm_submit(msm_ring->u.submit);
586      append_ring(msm_submit->ring_set, target);
587   }
588
589   return size;
590}
591
592static uint32_t
593msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
594{
595   if (ring->flags & FD_RINGBUFFER_GROWABLE)
596      return to_msm_ringbuffer(ring)->u.nr_cmds + 1;
597   return 1;
598}
599
600static bool
601msm_ringbuffer_check_size(struct fd_ringbuffer *ring)
602{
603   assert(!(ring->flags & _FD_RINGBUFFER_OBJECT));
604   struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
605   struct fd_submit *submit = msm_ring->u.submit;
606   struct fd_pipe *pipe = submit->pipe;
607
608   if ((fd_device_version(pipe->dev) < FD_VERSION_UNLIMITED_CMDS) &&
609       ((ring->cur - ring->start) > (ring->size / 4 - 0x1000))) {
610      return false;
611   }
612
613   if (to_msm_submit(submit)->nr_bos > MAX_ARRAY_SIZE/2) {
614      return false;
615   }
616
617   return true;
618}
619
620static void
621msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
622{
623   struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
624
625   fd_bo_del(msm_ring->ring_bo);
626   if (msm_ring->cmd)
627      cmd_free(msm_ring->cmd);
628
629   if (ring->flags & _FD_RINGBUFFER_OBJECT) {
630      for (unsigned i = 0; i < msm_ring->u.nr_reloc_bos; i++) {
631         fd_bo_del(msm_ring->u.reloc_bos[i]);
632      }
633
634      _mesa_set_destroy(msm_ring->u.ring_set, unref_rings);
635
636      free(msm_ring->u.reloc_bos);
637      free(msm_ring);
638   } else {
639      struct fd_submit *submit = msm_ring->u.submit;
640
641      for (unsigned i = 0; i < msm_ring->u.nr_cmds; i++) {
642         cmd_free(msm_ring->u.cmds[i]);
643      }
644
645      free(msm_ring->u.cmds);
646      slab_free_st(&to_msm_submit(submit)->ring_pool, msm_ring);
647   }
648}
649
650static const struct fd_ringbuffer_funcs ring_funcs = {
651   .grow = msm_ringbuffer_grow,
652   .emit_reloc = msm_ringbuffer_emit_reloc,
653   .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
654   .cmd_count = msm_ringbuffer_cmd_count,
655   .check_size = msm_ringbuffer_check_size,
656   .destroy = msm_ringbuffer_destroy,
657};
658
659static inline struct fd_ringbuffer *
660msm_ringbuffer_init(struct msm_ringbuffer *msm_ring, uint32_t size,
661                    enum fd_ringbuffer_flags flags)
662{
663   struct fd_ringbuffer *ring = &msm_ring->base;
664
665   debug_assert(msm_ring->ring_bo);
666
667   uint8_t *base = fd_bo_map(msm_ring->ring_bo);
668   ring->start = (void *)(base + msm_ring->offset);
669   ring->end = &(ring->start[size / 4]);
670   ring->cur = ring->start;
671
672   ring->size = size;
673   ring->flags = flags;
674
675   ring->funcs = &ring_funcs;
676
677   msm_ring->u.cmds = NULL;
678   msm_ring->u.nr_cmds = msm_ring->u.max_cmds = 0;
679
680   msm_ring->cmd = cmd_new(msm_ring->ring_bo);
681
682   return ring;
683}
684
685struct fd_ringbuffer *
686msm_ringbuffer_new_object(struct fd_pipe *pipe, uint32_t size)
687{
688   struct msm_ringbuffer *msm_ring = malloc(sizeof(*msm_ring));
689
690   msm_ring->u.pipe = pipe;
691   msm_ring->offset = 0;
692   msm_ring->ring_bo = fd_bo_new_ring(pipe->dev, size);
693   msm_ring->base.refcnt = 1;
694
695   msm_ring->u.reloc_bos = NULL;
696   msm_ring->u.nr_reloc_bos = msm_ring->u.max_reloc_bos = 0;
697
698   msm_ring->u.ring_set =
699      _mesa_set_create(NULL, _mesa_hash_pointer, _mesa_key_pointer_equal);
700
701   return msm_ringbuffer_init(msm_ring, size, _FD_RINGBUFFER_OBJECT);
702}
703