freedreno_pipe.c revision 7ec681f3
1/*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "freedreno_drmif.h"
28#include "freedreno_priv.h"
29
30/**
31 * priority of zero is highest priority, and higher numeric values are
32 * lower priorities
33 */
34struct fd_pipe *
35fd_pipe_new2(struct fd_device *dev, enum fd_pipe_id id, uint32_t prio)
36{
37   struct fd_pipe *pipe;
38   uint64_t val;
39
40   if (id > FD_PIPE_MAX) {
41      ERROR_MSG("invalid pipe id: %d", id);
42      return NULL;
43   }
44
45   if ((prio != 1) && (fd_device_version(dev) < FD_VERSION_SUBMIT_QUEUES)) {
46      ERROR_MSG("invalid priority!");
47      return NULL;
48   }
49
50   pipe = dev->funcs->pipe_new(dev, id, prio);
51   if (!pipe) {
52      ERROR_MSG("allocation failed");
53      return NULL;
54   }
55
56   pipe->dev = fd_device_ref(dev);
57   pipe->id = id;
58   p_atomic_set(&pipe->refcnt, 1);
59
60   fd_pipe_get_param(pipe, FD_GPU_ID, &val);
61   pipe->dev_id.gpu_id = val;
62
63   fd_pipe_get_param(pipe, FD_CHIP_ID, &val);
64   pipe->dev_id.chip_id = val;
65
66   pipe->control_mem = fd_bo_new(dev, sizeof(*pipe->control),
67                                 FD_BO_CACHED_COHERENT,
68                                 "pipe-control");
69   pipe->control = fd_bo_map(pipe->control_mem);
70
71   /* We could be getting a bo from the bo-cache, make sure the fence value
72    * is not garbage:
73    */
74   pipe->control->fence = 0;
75
76   /* We don't want the control_mem bo to hold a reference to the ourself,
77    * so disable userspace fencing.  This also means that we won't be able
78    * to determine if the buffer is idle which is needed by bo-cache.  But
79    * pipe creation/destroy is not a high frequency event so just disable
80    * the bo-cache as well:
81    */
82   pipe->control_mem->nosync = true;
83   pipe->control_mem->bo_reuse = NO_CACHE;
84
85   return pipe;
86}
87
88struct fd_pipe *
89fd_pipe_new(struct fd_device *dev, enum fd_pipe_id id)
90{
91   return fd_pipe_new2(dev, id, 1);
92}
93
94struct fd_pipe *
95fd_pipe_ref(struct fd_pipe *pipe)
96{
97   simple_mtx_lock(&table_lock);
98   fd_pipe_ref_locked(pipe);
99   simple_mtx_unlock(&table_lock);
100   return pipe;
101}
102
103struct fd_pipe *
104fd_pipe_ref_locked(struct fd_pipe *pipe)
105{
106   simple_mtx_assert_locked(&table_lock);
107   pipe->refcnt++;
108   return pipe;
109}
110
111void
112fd_pipe_del(struct fd_pipe *pipe)
113{
114   simple_mtx_lock(&table_lock);
115   fd_pipe_del_locked(pipe);
116   simple_mtx_unlock(&table_lock);
117}
118
119void
120fd_pipe_del_locked(struct fd_pipe *pipe)
121{
122   simple_mtx_assert_locked(&table_lock);
123   if (!p_atomic_dec_zero(&pipe->refcnt))
124      return;
125   fd_bo_del_locked(pipe->control_mem);
126   fd_device_del_locked(pipe->dev);
127   pipe->funcs->destroy(pipe);
128}
129
130/**
131 * Discard any unflushed deferred submits.  This is called at context-
132 * destroy to make sure we don't leak unflushed submits.
133 */
134void
135fd_pipe_purge(struct fd_pipe *pipe)
136{
137   struct fd_device *dev = pipe->dev;
138   struct list_head deferred_submits;
139
140   list_inithead(&deferred_submits);
141
142   simple_mtx_lock(&dev->submit_lock);
143
144   foreach_submit_safe (deferred_submit, &dev->deferred_submits) {
145      if (deferred_submit->pipe != pipe)
146         continue;
147
148      list_del(&deferred_submit->node);
149      list_addtail(&deferred_submit->node, &deferred_submits);
150      dev->deferred_cmds -= fd_ringbuffer_cmd_count(deferred_submit->primary);
151   }
152
153   simple_mtx_unlock(&dev->submit_lock);
154
155   foreach_submit_safe (deferred_submit, &deferred_submits) {
156      list_del(&deferred_submit->node);
157      fd_submit_del(deferred_submit);
158   }
159}
160
161int
162fd_pipe_get_param(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value)
163{
164   return pipe->funcs->get_param(pipe, param, value);
165}
166
167const struct fd_dev_id *
168fd_pipe_dev_id(struct fd_pipe *pipe)
169{
170   return &pipe->dev_id;
171}
172
173int
174fd_pipe_wait(struct fd_pipe *pipe, const struct fd_fence *fence)
175{
176   return fd_pipe_wait_timeout(pipe, fence, ~0);
177}
178
179int
180fd_pipe_wait_timeout(struct fd_pipe *pipe, const struct fd_fence *fence,
181                     uint64_t timeout)
182{
183   if (!fd_fence_after(fence->ufence, pipe->control->fence))
184      return 0;
185
186   fd_pipe_flush(pipe, fence->ufence);
187
188   return pipe->funcs->wait(pipe, fence, timeout);
189}
190
191uint32_t
192fd_pipe_emit_fence(struct fd_pipe *pipe, struct fd_ringbuffer *ring)
193{
194   uint32_t fence = ++pipe->last_fence;
195
196   if (fd_dev_64b(&pipe->dev_id)) {
197      OUT_PKT7(ring, CP_EVENT_WRITE, 4);
198      OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS));
199      OUT_RELOC(ring, control_ptr(pipe, fence));   /* ADDR_LO/HI */
200      OUT_RING(ring, fence);
201   } else {
202      OUT_PKT3(ring, CP_EVENT_WRITE, 3);
203      OUT_RING(ring, CP_EVENT_WRITE_0_EVENT(CACHE_FLUSH_TS));
204      OUT_RELOC(ring, control_ptr(pipe, fence));   /* ADDR */
205      OUT_RING(ring, fence);
206   }
207
208   return fence;
209}
210