1/*
2 * Copyright 2018 Chromium.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24#include "util/u_box.h"
25#include "util/u_inlines.h"
26
27#include "virgl_protocol.h"
28#include "virgl_screen.h"
29#include "virgl_encode.h"
30#include "virgl_transfer_queue.h"
31
32struct list_action_args
33{
34   void *data;
35   struct virgl_transfer *queued;
36   struct virgl_transfer *current;
37};
38
39typedef bool (*compare_transfers_t)(struct virgl_transfer *queued,
40                                    struct virgl_transfer *current);
41
42typedef void (*list_action_t)(struct virgl_transfer_queue *queue,
43                              struct list_action_args *args);
44
45struct list_iteration_args
46{
47   void *data;
48   list_action_t action;
49   compare_transfers_t compare;
50   struct virgl_transfer *current;
51   enum virgl_transfer_queue_lists type;
52};
53
54static bool transfers_intersect(struct virgl_transfer *queued,
55                                struct virgl_transfer *current)
56{
57   boolean tmp;
58   struct pipe_resource *queued_res = queued->base.resource;
59   struct pipe_resource *current_res = current->base.resource;
60
61   if (queued_res != current_res)
62      return false;
63
64   tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
65   return (tmp == TRUE);
66}
67
68static bool transfers_overlap(struct virgl_transfer *queued,
69                              struct virgl_transfer *current)
70{
71   boolean tmp;
72   struct pipe_resource *queued_res = queued->base.resource;
73   struct pipe_resource *current_res = current->base.resource;
74
75   if (queued_res != current_res)
76      return false;
77
78   if (queued->base.level != current->base.level)
79      return false;
80
81   if (queued->base.box.z != current->base.box.z)
82      return true;
83
84   if (queued->base.box.depth != 1 || current->base.box.depth != 1)
85      return true;
86
87   /*
88    * Special case for boxes with [x: 0, width: 1] and [x: 1, width: 1].
89    */
90   if (queued_res->target == PIPE_BUFFER) {
91      if (queued->base.box.x + queued->base.box.width == current->base.box.x)
92         return false;
93
94      if (current->base.box.x + current->base.box.width == queued->base.box.x)
95         return false;
96   }
97
98   tmp = u_box_test_intersection_2d(&queued->base.box, &current->base.box);
99   return (tmp == TRUE);
100}
101
102static void set_true(UNUSED struct virgl_transfer_queue *queue,
103                     struct list_action_args *args)
104{
105   bool *val = args->data;
106   *val = true;
107}
108
109static void set_queued(UNUSED struct virgl_transfer_queue *queue,
110                       struct list_action_args *args)
111{
112   struct virgl_transfer *queued = args->queued;
113   struct virgl_transfer **val = args->data;
114   *val = queued;
115}
116
117static void remove_transfer(struct virgl_transfer_queue *queue,
118                            struct list_action_args *args)
119{
120   struct virgl_transfer *queued = args->queued;
121   struct pipe_resource *pres = queued->base.resource;
122   list_del(&queued->queue_link);
123   pipe_resource_reference(&pres, NULL);
124   virgl_resource_destroy_transfer(queue->pool, queued);
125}
126
127static void replace_unmapped_transfer(struct virgl_transfer_queue *queue,
128                                      struct list_action_args *args)
129{
130   struct virgl_transfer *current = args->current;
131   struct virgl_transfer *queued = args->queued;
132
133   u_box_union_2d(&current->base.box, &current->base.box, &queued->base.box);
134   current->offset = current->base.box.x;
135
136   remove_transfer(queue, args);
137   queue->num_dwords -= (VIRGL_TRANSFER3D_SIZE + 1);
138}
139
140static void transfer_put(struct virgl_transfer_queue *queue,
141                         struct list_action_args *args)
142{
143   struct virgl_transfer *queued = args->queued;
144   struct virgl_resource *res = virgl_resource(queued->base.resource);
145
146   queue->vs->vws->transfer_put(queue->vs->vws, res->hw_res, &queued->base.box,
147                                queued->base.stride, queued->l_stride,
148                                queued->offset, queued->base.level);
149
150   remove_transfer(queue, args);
151}
152
153static void transfer_write(struct virgl_transfer_queue *queue,
154                           struct list_action_args *args)
155{
156   struct virgl_transfer *queued = args->queued;
157   struct virgl_cmd_buf *buf = args->data;
158
159   // Takes a reference on the HW resource, which is released after
160   // the exec buffer command.
161   virgl_encode_transfer(queue->vs, buf, queued, VIRGL_TRANSFER_TO_HOST);
162
163   list_delinit(&queued->queue_link);
164   list_addtail(&queued->queue_link, &queue->lists[COMPLETED_LIST]);
165}
166
167static void compare_and_perform_action(struct virgl_transfer_queue *queue,
168                                       struct list_iteration_args *iter)
169{
170   struct list_action_args args;
171   struct virgl_transfer *queued, *tmp;
172   enum virgl_transfer_queue_lists type = iter->type;
173
174   memset(&args, 0, sizeof(args));
175   args.current = iter->current;
176   args.data = iter->data;
177
178   LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
179      if (iter->compare(queued, iter->current)) {
180         args.queued = queued;
181         iter->action(queue, &args);
182      }
183   }
184}
185
186static void intersect_and_set_queued_once(struct virgl_transfer_queue *queue,
187                                          struct list_iteration_args *iter)
188{
189   struct list_action_args args;
190   struct virgl_transfer *queued, *tmp;
191   enum virgl_transfer_queue_lists type = iter->type;
192
193   memset(&args, 0, sizeof(args));
194   args.current = iter->current;
195   args.data = iter->data;
196
197   LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
198      if (transfers_intersect(queued, iter->current)) {
199         args.queued = queued;
200         set_queued(queue, &args);
201         return;
202      }
203   }
204}
205
206static void perform_action(struct virgl_transfer_queue *queue,
207                           struct list_iteration_args *iter)
208{
209   struct list_action_args args;
210   struct virgl_transfer *queued, *tmp;
211   enum virgl_transfer_queue_lists type = iter->type;
212
213   memset(&args, 0, sizeof(args));
214   args.data = iter->data;
215
216   LIST_FOR_EACH_ENTRY_SAFE(queued, tmp, &queue->lists[type], queue_link) {
217      args.queued = queued;
218      iter->action(queue, &args);
219   }
220}
221
222static void add_internal(struct virgl_transfer_queue *queue,
223                         struct virgl_transfer *transfer)
224{
225   uint32_t dwords = VIRGL_TRANSFER3D_SIZE + 1;
226   if (queue->tbuf) {
227      if (queue->num_dwords + dwords >= VIRGL_MAX_TBUF_DWORDS) {
228         struct list_iteration_args iter;
229         struct virgl_winsys *vws = queue->vs->vws;
230
231         memset(&iter, 0, sizeof(iter));
232         iter.type = PENDING_LIST;
233         iter.action = transfer_write;
234         iter.data = queue->tbuf;
235         perform_action(queue, &iter);
236
237         vws->submit_cmd(vws, queue->tbuf, NULL);
238         queue->num_dwords = 0;
239      }
240   }
241
242   list_addtail(&transfer->queue_link, &queue->lists[PENDING_LIST]);
243   queue->num_dwords += dwords;
244}
245
246
247void virgl_transfer_queue_init(struct virgl_transfer_queue *queue,
248                               struct virgl_screen *vs,
249                               struct slab_child_pool *pool)
250{
251   queue->vs = vs;
252   queue->pool = pool;
253   queue->num_dwords = 0;
254
255   for (uint32_t i = 0; i < MAX_LISTS; i++)
256      list_inithead(&queue->lists[i]);
257
258   if ((vs->caps.caps.v2.capability_bits & VIRGL_CAP_TRANSFER) &&
259        vs->vws->supports_encoded_transfers)
260      queue->tbuf = vs->vws->cmd_buf_create(vs->vws, VIRGL_MAX_TBUF_DWORDS);
261   else
262      queue->tbuf = NULL;
263}
264
265void virgl_transfer_queue_fini(struct virgl_transfer_queue *queue)
266{
267   struct virgl_winsys *vws = queue->vs->vws;
268   struct list_iteration_args iter;
269
270   memset(&iter, 0, sizeof(iter));
271
272   iter.action = transfer_put;
273   iter.type = PENDING_LIST;
274   perform_action(queue, &iter);
275
276   iter.action = remove_transfer;
277   iter.type = COMPLETED_LIST;
278   perform_action(queue, &iter);
279
280   if (queue->tbuf)
281      vws->cmd_buf_destroy(queue->tbuf);
282
283   queue->vs = NULL;
284   queue->pool = NULL;
285   queue->tbuf = NULL;
286   queue->num_dwords = 0;
287}
288
289int virgl_transfer_queue_unmap(struct virgl_transfer_queue *queue,
290                               struct virgl_transfer *transfer)
291{
292   struct pipe_resource *res, *pres;
293   struct list_iteration_args iter;
294
295   pres = NULL;
296   res = transfer->base.resource;
297   pipe_resource_reference(&pres, res);
298
299   if (res->target == PIPE_BUFFER) {
300      memset(&iter, 0, sizeof(iter));
301      iter.current = transfer;
302      iter.compare = transfers_intersect;
303      iter.action = replace_unmapped_transfer;
304      iter.type = PENDING_LIST;
305      compare_and_perform_action(queue, &iter);
306   }
307
308   add_internal(queue, transfer);
309   return 0;
310}
311
312int virgl_transfer_queue_clear(struct virgl_transfer_queue *queue,
313                               struct virgl_cmd_buf *cbuf)
314{
315   struct list_iteration_args iter;
316
317   memset(&iter, 0, sizeof(iter));
318   iter.type = PENDING_LIST;
319   if (queue->tbuf) {
320      uint32_t prior_num_dwords = cbuf->cdw;
321      cbuf->cdw = 0;
322
323      iter.action = transfer_write;
324      iter.data = cbuf;
325      perform_action(queue, &iter);
326
327      virgl_encode_end_transfers(cbuf);
328      cbuf->cdw = prior_num_dwords;
329   } else {
330      iter.action = transfer_put;
331      perform_action(queue, &iter);
332   }
333
334   iter.action = remove_transfer;
335   iter.type = COMPLETED_LIST;
336   perform_action(queue, &iter);
337   queue->num_dwords = 0;
338
339   return 0;
340}
341
342bool virgl_transfer_queue_is_queued(struct virgl_transfer_queue *queue,
343                                    struct virgl_transfer *transfer)
344{
345   bool queued = false;
346   struct list_iteration_args iter;
347
348   memset(&iter, 0, sizeof(iter));
349   iter.current = transfer;
350   iter.compare = transfers_overlap;
351   iter.action = set_true;
352   iter.data = &queued;
353
354   iter.type = PENDING_LIST;
355   compare_and_perform_action(queue, &iter);
356
357   iter.type = COMPLETED_LIST;
358   compare_and_perform_action(queue, &iter);
359
360   return queued;
361}
362
363struct virgl_transfer *
364virgl_transfer_queue_extend(struct virgl_transfer_queue *queue,
365                            struct virgl_transfer *transfer)
366{
367   struct virgl_transfer *queued = NULL;
368   struct list_iteration_args iter;
369
370   if (transfer->base.resource->target == PIPE_BUFFER) {
371      memset(&iter, 0, sizeof(iter));
372      iter.current = transfer;
373      iter.data = &queued;
374      iter.type = PENDING_LIST;
375      intersect_and_set_queued_once(queue, &iter);
376   }
377
378   if (queued) {
379      u_box_union_2d(&queued->base.box, &queued->base.box, &transfer->base.box);
380      queued->offset = queued->base.box.x;
381   }
382
383   return queued;
384}
385