nouveau_buffer.c revision 01e04c3f
1
2#include "util/u_inlines.h"
3#include "util/u_memory.h"
4#include "util/u_math.h"
5#include "util/u_surface.h"
6
7#include "nouveau_screen.h"
8#include "nouveau_context.h"
9#include "nouveau_winsys.h"
10#include "nouveau_fence.h"
11#include "nouveau_buffer.h"
12#include "nouveau_mm.h"
13
14struct nouveau_transfer {
15   struct pipe_transfer base;
16
17   uint8_t *map;
18   struct nouveau_bo *bo;
19   struct nouveau_mm_allocation *mm;
20   uint32_t offset;
21};
22
23static inline struct nouveau_transfer *
24nouveau_transfer(struct pipe_transfer *transfer)
25{
26   return (struct nouveau_transfer *)transfer;
27}
28
29static inline bool
30nouveau_buffer_malloc(struct nv04_resource *buf)
31{
32   if (!buf->data)
33      buf->data = align_malloc(buf->base.width0, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
34   return !!buf->data;
35}
36
37static inline bool
38nouveau_buffer_allocate(struct nouveau_screen *screen,
39                        struct nv04_resource *buf, unsigned domain)
40{
41   uint32_t size = align(buf->base.width0, 0x100);
42
43   if (domain == NOUVEAU_BO_VRAM) {
44      buf->mm = nouveau_mm_allocate(screen->mm_VRAM, size,
45                                    &buf->bo, &buf->offset);
46      if (!buf->bo)
47         return nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_GART);
48      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_vid, buf->base.width0);
49   } else
50   if (domain == NOUVEAU_BO_GART) {
51      buf->mm = nouveau_mm_allocate(screen->mm_GART, size,
52                                    &buf->bo, &buf->offset);
53      if (!buf->bo)
54         return false;
55      NOUVEAU_DRV_STAT(screen, buf_obj_current_bytes_sys, buf->base.width0);
56   } else {
57      assert(domain == 0);
58      if (!nouveau_buffer_malloc(buf))
59         return false;
60   }
61   buf->domain = domain;
62   if (buf->bo)
63      buf->address = buf->bo->offset + buf->offset;
64
65   util_range_set_empty(&buf->valid_buffer_range);
66
67   return true;
68}
69
70static inline void
71release_allocation(struct nouveau_mm_allocation **mm,
72                   struct nouveau_fence *fence)
73{
74   nouveau_fence_work(fence, nouveau_mm_free_work, *mm);
75   (*mm) = NULL;
76}
77
78inline void
79nouveau_buffer_release_gpu_storage(struct nv04_resource *buf)
80{
81   if (buf->fence && buf->fence->state < NOUVEAU_FENCE_STATE_FLUSHED) {
82      nouveau_fence_work(buf->fence, nouveau_fence_unref_bo, buf->bo);
83      buf->bo = NULL;
84   } else {
85      nouveau_bo_ref(NULL, &buf->bo);
86   }
87
88   if (buf->mm)
89      release_allocation(&buf->mm, buf->fence);
90
91   if (buf->domain == NOUVEAU_BO_VRAM)
92      NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_vid, -(uint64_t)buf->base.width0);
93   if (buf->domain == NOUVEAU_BO_GART)
94      NOUVEAU_DRV_STAT_RES(buf, buf_obj_current_bytes_sys, -(uint64_t)buf->base.width0);
95
96   buf->domain = 0;
97}
98
99static inline bool
100nouveau_buffer_reallocate(struct nouveau_screen *screen,
101                          struct nv04_resource *buf, unsigned domain)
102{
103   nouveau_buffer_release_gpu_storage(buf);
104
105   nouveau_fence_ref(NULL, &buf->fence);
106   nouveau_fence_ref(NULL, &buf->fence_wr);
107
108   buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
109
110   return nouveau_buffer_allocate(screen, buf, domain);
111}
112
113static void
114nouveau_buffer_destroy(struct pipe_screen *pscreen,
115                       struct pipe_resource *presource)
116{
117   struct nv04_resource *res = nv04_resource(presource);
118
119   nouveau_buffer_release_gpu_storage(res);
120
121   if (res->data && !(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
122      align_free(res->data);
123
124   nouveau_fence_ref(NULL, &res->fence);
125   nouveau_fence_ref(NULL, &res->fence_wr);
126
127   util_range_destroy(&res->valid_buffer_range);
128
129   FREE(res);
130
131   NOUVEAU_DRV_STAT(nouveau_screen(pscreen), buf_obj_current_count, -1);
132}
133
134/* Set up a staging area for the transfer. This is either done in "regular"
135 * system memory if the driver supports push_data (nv50+) and the data is
136 * small enough (and permit_pb == true), or in GART memory.
137 */
138static uint8_t *
139nouveau_transfer_staging(struct nouveau_context *nv,
140                         struct nouveau_transfer *tx, bool permit_pb)
141{
142   const unsigned adj = tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK;
143   const unsigned size = align(tx->base.box.width, 4) + adj;
144
145   if (!nv->push_data)
146      permit_pb = false;
147
148   if ((size <= nv->screen->transfer_pushbuf_threshold) && permit_pb) {
149      tx->map = align_malloc(size, NOUVEAU_MIN_BUFFER_MAP_ALIGN);
150      if (tx->map)
151         tx->map += adj;
152   } else {
153      tx->mm =
154         nouveau_mm_allocate(nv->screen->mm_GART, size, &tx->bo, &tx->offset);
155      if (tx->bo) {
156         tx->offset += adj;
157         if (!nouveau_bo_map(tx->bo, 0, NULL))
158            tx->map = (uint8_t *)tx->bo->map + tx->offset;
159      }
160   }
161   return tx->map;
162}
163
164/* Copies data from the resource into the transfer's temporary GART
165 * buffer. Also updates buf->data if present.
166 *
167 * Maybe just migrate to GART right away if we actually need to do this. */
168static bool
169nouveau_transfer_read(struct nouveau_context *nv, struct nouveau_transfer *tx)
170{
171   struct nv04_resource *buf = nv04_resource(tx->base.resource);
172   const unsigned base = tx->base.box.x;
173   const unsigned size = tx->base.box.width;
174
175   NOUVEAU_DRV_STAT(nv->screen, buf_read_bytes_staging_vid, size);
176
177   nv->copy_data(nv, tx->bo, tx->offset, NOUVEAU_BO_GART,
178                 buf->bo, buf->offset + base, buf->domain, size);
179
180   if (nouveau_bo_wait(tx->bo, NOUVEAU_BO_RD, nv->client))
181      return false;
182
183   if (buf->data)
184      memcpy(buf->data + base, tx->map, size);
185
186   return true;
187}
188
189static void
190nouveau_transfer_write(struct nouveau_context *nv, struct nouveau_transfer *tx,
191                       unsigned offset, unsigned size)
192{
193   struct nv04_resource *buf = nv04_resource(tx->base.resource);
194   uint8_t *data = tx->map + offset;
195   const unsigned base = tx->base.box.x + offset;
196   const bool can_cb = !((base | size) & 3);
197
198   if (buf->data)
199      memcpy(data, buf->data + base, size);
200   else
201      buf->status |= NOUVEAU_BUFFER_STATUS_DIRTY;
202
203   if (buf->domain == NOUVEAU_BO_VRAM)
204      NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_vid, size);
205   if (buf->domain == NOUVEAU_BO_GART)
206      NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_staging_sys, size);
207
208   if (tx->bo)
209      nv->copy_data(nv, buf->bo, buf->offset + base, buf->domain,
210                    tx->bo, tx->offset + offset, NOUVEAU_BO_GART, size);
211   else
212   if (nv->push_cb && can_cb)
213      nv->push_cb(nv, buf,
214                  base, size / 4, (const uint32_t *)data);
215   else
216      nv->push_data(nv, buf->bo, buf->offset + base, buf->domain, size, data);
217
218   nouveau_fence_ref(nv->screen->fence.current, &buf->fence);
219   nouveau_fence_ref(nv->screen->fence.current, &buf->fence_wr);
220}
221
222/* Does a CPU wait for the buffer's backing data to become reliably accessible
223 * for write/read by waiting on the buffer's relevant fences.
224 */
225static inline bool
226nouveau_buffer_sync(struct nouveau_context *nv,
227                    struct nv04_resource *buf, unsigned rw)
228{
229   if (rw == PIPE_TRANSFER_READ) {
230      if (!buf->fence_wr)
231         return true;
232      NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
233                           !nouveau_fence_signalled(buf->fence_wr));
234      if (!nouveau_fence_wait(buf->fence_wr, &nv->debug))
235         return false;
236   } else {
237      if (!buf->fence)
238         return true;
239      NOUVEAU_DRV_STAT_RES(buf, buf_non_kernel_fence_sync_count,
240                           !nouveau_fence_signalled(buf->fence));
241      if (!nouveau_fence_wait(buf->fence, &nv->debug))
242         return false;
243
244      nouveau_fence_ref(NULL, &buf->fence);
245   }
246   nouveau_fence_ref(NULL, &buf->fence_wr);
247
248   return true;
249}
250
251static inline bool
252nouveau_buffer_busy(struct nv04_resource *buf, unsigned rw)
253{
254   if (rw == PIPE_TRANSFER_READ)
255      return (buf->fence_wr && !nouveau_fence_signalled(buf->fence_wr));
256   else
257      return (buf->fence && !nouveau_fence_signalled(buf->fence));
258}
259
260static inline void
261nouveau_buffer_transfer_init(struct nouveau_transfer *tx,
262                             struct pipe_resource *resource,
263                             const struct pipe_box *box,
264                             unsigned usage)
265{
266   tx->base.resource = resource;
267   tx->base.level = 0;
268   tx->base.usage = usage;
269   tx->base.box.x = box->x;
270   tx->base.box.y = 0;
271   tx->base.box.z = 0;
272   tx->base.box.width = box->width;
273   tx->base.box.height = 1;
274   tx->base.box.depth = 1;
275   tx->base.stride = 0;
276   tx->base.layer_stride = 0;
277
278   tx->bo = NULL;
279   tx->map = NULL;
280}
281
282static inline void
283nouveau_buffer_transfer_del(struct nouveau_context *nv,
284                            struct nouveau_transfer *tx)
285{
286   if (tx->map) {
287      if (likely(tx->bo)) {
288         nouveau_fence_work(nv->screen->fence.current,
289                            nouveau_fence_unref_bo, tx->bo);
290         if (tx->mm)
291            release_allocation(&tx->mm, nv->screen->fence.current);
292      } else {
293         align_free(tx->map -
294                    (tx->base.box.x & NOUVEAU_MIN_BUFFER_MAP_ALIGN_MASK));
295      }
296   }
297}
298
299/* Creates a cache in system memory of the buffer data. */
300static bool
301nouveau_buffer_cache(struct nouveau_context *nv, struct nv04_resource *buf)
302{
303   struct nouveau_transfer tx;
304   bool ret;
305   tx.base.resource = &buf->base;
306   tx.base.box.x = 0;
307   tx.base.box.width = buf->base.width0;
308   tx.bo = NULL;
309   tx.map = NULL;
310
311   if (!buf->data)
312      if (!nouveau_buffer_malloc(buf))
313         return false;
314   if (!(buf->status & NOUVEAU_BUFFER_STATUS_DIRTY))
315      return true;
316   nv->stats.buf_cache_count++;
317
318   if (!nouveau_transfer_staging(nv, &tx, false))
319      return false;
320
321   ret = nouveau_transfer_read(nv, &tx);
322   if (ret) {
323      buf->status &= ~NOUVEAU_BUFFER_STATUS_DIRTY;
324      memcpy(buf->data, tx.map, buf->base.width0);
325   }
326   nouveau_buffer_transfer_del(nv, &tx);
327   return ret;
328}
329
330
331#define NOUVEAU_TRANSFER_DISCARD \
332   (PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
333
334/* Checks whether it is possible to completely discard the memory backing this
335 * resource. This can be useful if we would otherwise have to wait for a read
336 * operation to complete on this data.
337 */
338static inline bool
339nouveau_buffer_should_discard(struct nv04_resource *buf, unsigned usage)
340{
341   if (!(usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE))
342      return false;
343   if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
344      return false;
345   if (unlikely(usage & PIPE_TRANSFER_PERSISTENT))
346      return false;
347   return buf->mm && nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE);
348}
349
350/* Returns a pointer to a memory area representing a window into the
351 * resource's data.
352 *
353 * This may or may not be the _actual_ memory area of the resource. However
354 * when calling nouveau_buffer_transfer_unmap, if it wasn't the actual memory
355 * area, the contents of the returned map are copied over to the resource.
356 *
357 * The usage indicates what the caller plans to do with the map:
358 *
359 *   WRITE means that the user plans to write to it
360 *
361 *   READ means that the user plans on reading from it
362 *
363 *   DISCARD_WHOLE_RESOURCE means that the whole resource is going to be
364 *   potentially overwritten, and even if it isn't, the bits that aren't don't
365 *   need to be maintained.
366 *
367 *   DISCARD_RANGE means that all the data in the specified range is going to
368 *   be overwritten.
369 *
370 * The strategy for determining what kind of memory area to return is complex,
371 * see comments inside of the function.
372 */
373static void *
374nouveau_buffer_transfer_map(struct pipe_context *pipe,
375                            struct pipe_resource *resource,
376                            unsigned level, unsigned usage,
377                            const struct pipe_box *box,
378                            struct pipe_transfer **ptransfer)
379{
380   struct nouveau_context *nv = nouveau_context(pipe);
381   struct nv04_resource *buf = nv04_resource(resource);
382   struct nouveau_transfer *tx = MALLOC_STRUCT(nouveau_transfer);
383   uint8_t *map;
384   int ret;
385
386   if (!tx)
387      return NULL;
388   nouveau_buffer_transfer_init(tx, resource, box, usage);
389   *ptransfer = &tx->base;
390
391   if (usage & PIPE_TRANSFER_READ)
392      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_rd, 1);
393   if (usage & PIPE_TRANSFER_WRITE)
394      NOUVEAU_DRV_STAT(nv->screen, buf_transfers_wr, 1);
395
396   /* If we are trying to write to an uninitialized range, the user shouldn't
397    * care what was there before. So we can treat the write as if the target
398    * range were being discarded. Furthermore, since we know that even if this
399    * buffer is busy due to GPU activity, because the contents were
400    * uninitialized, the GPU can't care what was there, and so we can treat
401    * the write as being unsynchronized.
402    */
403   if ((usage & PIPE_TRANSFER_WRITE) &&
404       !util_ranges_intersect(&buf->valid_buffer_range, box->x, box->x + box->width))
405      usage |= PIPE_TRANSFER_DISCARD_RANGE | PIPE_TRANSFER_UNSYNCHRONIZED;
406
407   if (buf->domain == NOUVEAU_BO_VRAM) {
408      if (usage & NOUVEAU_TRANSFER_DISCARD) {
409         /* Set up a staging area for the user to write to. It will be copied
410          * back into VRAM on unmap. */
411         if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE)
412            buf->status &= NOUVEAU_BUFFER_STATUS_REALLOC_MASK;
413         nouveau_transfer_staging(nv, tx, true);
414      } else {
415         if (buf->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING) {
416            /* The GPU is currently writing to this buffer. Copy its current
417             * contents to a staging area in the GART. This is necessary since
418             * not the whole area being mapped is being discarded.
419             */
420            if (buf->data) {
421               align_free(buf->data);
422               buf->data = NULL;
423            }
424            nouveau_transfer_staging(nv, tx, false);
425            nouveau_transfer_read(nv, tx);
426         } else {
427            /* The buffer is currently idle. Create a staging area for writes,
428             * and make sure that the cached data is up-to-date. */
429            if (usage & PIPE_TRANSFER_WRITE)
430               nouveau_transfer_staging(nv, tx, true);
431            if (!buf->data)
432               nouveau_buffer_cache(nv, buf);
433         }
434      }
435      return buf->data ? (buf->data + box->x) : tx->map;
436   } else
437   if (unlikely(buf->domain == 0)) {
438      return buf->data + box->x;
439   }
440
441   /* At this point, buf->domain == GART */
442
443   if (nouveau_buffer_should_discard(buf, usage)) {
444      int ref = buf->base.reference.count - 1;
445      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
446      if (ref > 0) /* any references inside context possible ? */
447         nv->invalidate_resource_storage(nv, &buf->base, ref);
448   }
449
450   /* Note that nouveau_bo_map ends up doing a nouveau_bo_wait with the
451    * relevant flags. If buf->mm is set, that means this resource is part of a
452    * larger slab bo that holds multiple resources. So in that case, don't
453    * wait on the whole slab and instead use the logic below to return a
454    * reasonable buffer for that case.
455    */
456   ret = nouveau_bo_map(buf->bo,
457                        buf->mm ? 0 : nouveau_screen_transfer_flags(usage),
458                        nv->client);
459   if (ret) {
460      FREE(tx);
461      return NULL;
462   }
463   map = (uint8_t *)buf->bo->map + buf->offset + box->x;
464
465   /* using kernel fences only if !buf->mm */
466   if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) || !buf->mm)
467      return map;
468
469   /* If the GPU is currently reading/writing this buffer, we shouldn't
470    * interfere with its progress. So instead we either wait for the GPU to
471    * complete its operation, or set up a staging area to perform our work in.
472    */
473   if (nouveau_buffer_busy(buf, usage & PIPE_TRANSFER_READ_WRITE)) {
474      if (unlikely(usage & (PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE |
475                            PIPE_TRANSFER_PERSISTENT))) {
476         /* Discarding was not possible, must sync because
477          * subsequent transfers might use UNSYNCHRONIZED. */
478         nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
479      } else
480      if (usage & PIPE_TRANSFER_DISCARD_RANGE) {
481         /* The whole range is being discarded, so it doesn't matter what was
482          * there before. No need to copy anything over. */
483         nouveau_transfer_staging(nv, tx, true);
484         map = tx->map;
485      } else
486      if (nouveau_buffer_busy(buf, PIPE_TRANSFER_READ)) {
487         if (usage & PIPE_TRANSFER_DONTBLOCK)
488            map = NULL;
489         else
490            nouveau_buffer_sync(nv, buf, usage & PIPE_TRANSFER_READ_WRITE);
491      } else {
492         /* It is expected that the returned buffer be a representation of the
493          * data in question, so we must copy it over from the buffer. */
494         nouveau_transfer_staging(nv, tx, true);
495         if (tx->map)
496            memcpy(tx->map, map, box->width);
497         map = tx->map;
498      }
499   }
500   if (!map)
501      FREE(tx);
502   return map;
503}
504
505
506
507static void
508nouveau_buffer_transfer_flush_region(struct pipe_context *pipe,
509                                     struct pipe_transfer *transfer,
510                                     const struct pipe_box *box)
511{
512   struct nouveau_transfer *tx = nouveau_transfer(transfer);
513   struct nv04_resource *buf = nv04_resource(transfer->resource);
514
515   if (tx->map)
516      nouveau_transfer_write(nouveau_context(pipe), tx, box->x, box->width);
517
518   util_range_add(&buf->valid_buffer_range,
519                  tx->base.box.x + box->x,
520                  tx->base.box.x + box->x + box->width);
521}
522
523/* Unmap stage of the transfer. If it was a WRITE transfer and the map that
524 * was returned was not the real resource's data, this needs to transfer the
525 * data back to the resource.
526 *
527 * Also marks vbo dirty based on the buffer's binding
528 */
529static void
530nouveau_buffer_transfer_unmap(struct pipe_context *pipe,
531                              struct pipe_transfer *transfer)
532{
533   struct nouveau_context *nv = nouveau_context(pipe);
534   struct nouveau_transfer *tx = nouveau_transfer(transfer);
535   struct nv04_resource *buf = nv04_resource(transfer->resource);
536
537   if (tx->base.usage & PIPE_TRANSFER_WRITE) {
538      if (!(tx->base.usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
539         if (tx->map)
540            nouveau_transfer_write(nv, tx, 0, tx->base.box.width);
541
542         util_range_add(&buf->valid_buffer_range,
543                        tx->base.box.x, tx->base.box.x + tx->base.box.width);
544      }
545
546      if (likely(buf->domain)) {
547         const uint8_t bind = buf->base.bind;
548         /* make sure we invalidate dedicated caches */
549         if (bind & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER))
550            nv->vbo_dirty = true;
551      }
552   }
553
554   if (!tx->bo && (tx->base.usage & PIPE_TRANSFER_WRITE))
555      NOUVEAU_DRV_STAT(nv->screen, buf_write_bytes_direct, tx->base.box.width);
556
557   nouveau_buffer_transfer_del(nv, tx);
558   FREE(tx);
559}
560
561
562void
563nouveau_copy_buffer(struct nouveau_context *nv,
564                    struct nv04_resource *dst, unsigned dstx,
565                    struct nv04_resource *src, unsigned srcx, unsigned size)
566{
567   assert(dst->base.target == PIPE_BUFFER && src->base.target == PIPE_BUFFER);
568
569   if (likely(dst->domain) && likely(src->domain)) {
570      nv->copy_data(nv,
571                    dst->bo, dst->offset + dstx, dst->domain,
572                    src->bo, src->offset + srcx, src->domain, size);
573
574      dst->status |= NOUVEAU_BUFFER_STATUS_GPU_WRITING;
575      nouveau_fence_ref(nv->screen->fence.current, &dst->fence);
576      nouveau_fence_ref(nv->screen->fence.current, &dst->fence_wr);
577
578      src->status |= NOUVEAU_BUFFER_STATUS_GPU_READING;
579      nouveau_fence_ref(nv->screen->fence.current, &src->fence);
580   } else {
581      struct pipe_box src_box;
582      src_box.x = srcx;
583      src_box.y = 0;
584      src_box.z = 0;
585      src_box.width = size;
586      src_box.height = 1;
587      src_box.depth = 1;
588      util_resource_copy_region(&nv->pipe,
589                                &dst->base, 0, dstx, 0, 0,
590                                &src->base, 0, &src_box);
591   }
592
593   util_range_add(&dst->valid_buffer_range, dstx, dstx + size);
594}
595
596
597void *
598nouveau_resource_map_offset(struct nouveau_context *nv,
599                            struct nv04_resource *res, uint32_t offset,
600                            uint32_t flags)
601{
602   if (unlikely(res->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY))
603      return res->data + offset;
604
605   if (res->domain == NOUVEAU_BO_VRAM) {
606      if (!res->data || (res->status & NOUVEAU_BUFFER_STATUS_GPU_WRITING))
607         nouveau_buffer_cache(nv, res);
608   }
609   if (res->domain != NOUVEAU_BO_GART)
610      return res->data + offset;
611
612   if (res->mm) {
613      unsigned rw;
614      rw = (flags & NOUVEAU_BO_WR) ? PIPE_TRANSFER_WRITE : PIPE_TRANSFER_READ;
615      nouveau_buffer_sync(nv, res, rw);
616      if (nouveau_bo_map(res->bo, 0, NULL))
617         return NULL;
618   } else {
619      if (nouveau_bo_map(res->bo, flags, nv->client))
620         return NULL;
621   }
622   return (uint8_t *)res->bo->map + res->offset + offset;
623}
624
625
626const struct u_resource_vtbl nouveau_buffer_vtbl =
627{
628   u_default_resource_get_handle,     /* get_handle */
629   nouveau_buffer_destroy,               /* resource_destroy */
630   nouveau_buffer_transfer_map,          /* transfer_map */
631   nouveau_buffer_transfer_flush_region, /* transfer_flush_region */
632   nouveau_buffer_transfer_unmap,        /* transfer_unmap */
633};
634
635struct pipe_resource *
636nouveau_buffer_create(struct pipe_screen *pscreen,
637                      const struct pipe_resource *templ)
638{
639   struct nouveau_screen *screen = nouveau_screen(pscreen);
640   struct nv04_resource *buffer;
641   bool ret;
642
643   buffer = CALLOC_STRUCT(nv04_resource);
644   if (!buffer)
645      return NULL;
646
647   buffer->base = *templ;
648   buffer->vtbl = &nouveau_buffer_vtbl;
649   pipe_reference_init(&buffer->base.reference, 1);
650   buffer->base.screen = pscreen;
651
652   if (buffer->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
653                             PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
654      buffer->domain = NOUVEAU_BO_GART;
655   } else if (buffer->base.bind == 0 || (buffer->base.bind &
656              (screen->vidmem_bindings & screen->sysmem_bindings))) {
657      switch (buffer->base.usage) {
658      case PIPE_USAGE_DEFAULT:
659      case PIPE_USAGE_IMMUTABLE:
660         buffer->domain = NV_VRAM_DOMAIN(screen);
661         break;
662      case PIPE_USAGE_DYNAMIC:
663         /* For most apps, we'd have to do staging transfers to avoid sync
664          * with this usage, and GART -> GART copies would be suboptimal.
665          */
666         buffer->domain = NV_VRAM_DOMAIN(screen);
667         break;
668      case PIPE_USAGE_STAGING:
669      case PIPE_USAGE_STREAM:
670         buffer->domain = NOUVEAU_BO_GART;
671         break;
672      default:
673         assert(0);
674         break;
675      }
676   } else {
677      if (buffer->base.bind & screen->vidmem_bindings)
678         buffer->domain = NV_VRAM_DOMAIN(screen);
679      else
680      if (buffer->base.bind & screen->sysmem_bindings)
681         buffer->domain = NOUVEAU_BO_GART;
682   }
683
684   ret = nouveau_buffer_allocate(screen, buffer, buffer->domain);
685
686   if (ret == false)
687      goto fail;
688
689   if (buffer->domain == NOUVEAU_BO_VRAM && screen->hint_buf_keep_sysmem_copy)
690      nouveau_buffer_cache(NULL, buffer);
691
692   NOUVEAU_DRV_STAT(screen, buf_obj_current_count, 1);
693
694   util_range_init(&buffer->valid_buffer_range);
695
696   return &buffer->base;
697
698fail:
699   FREE(buffer);
700   return NULL;
701}
702
703
704struct pipe_resource *
705nouveau_user_buffer_create(struct pipe_screen *pscreen, void *ptr,
706                           unsigned bytes, unsigned bind)
707{
708   struct nv04_resource *buffer;
709
710   buffer = CALLOC_STRUCT(nv04_resource);
711   if (!buffer)
712      return NULL;
713
714   pipe_reference_init(&buffer->base.reference, 1);
715   buffer->vtbl = &nouveau_buffer_vtbl;
716   buffer->base.screen = pscreen;
717   buffer->base.format = PIPE_FORMAT_R8_UNORM;
718   buffer->base.usage = PIPE_USAGE_IMMUTABLE;
719   buffer->base.bind = bind;
720   buffer->base.width0 = bytes;
721   buffer->base.height0 = 1;
722   buffer->base.depth0 = 1;
723
724   buffer->data = ptr;
725   buffer->status = NOUVEAU_BUFFER_STATUS_USER_MEMORY;
726
727   util_range_init(&buffer->valid_buffer_range);
728   util_range_add(&buffer->valid_buffer_range, 0, bytes);
729
730   return &buffer->base;
731}
732
733static inline bool
734nouveau_buffer_data_fetch(struct nouveau_context *nv, struct nv04_resource *buf,
735                          struct nouveau_bo *bo, unsigned offset, unsigned size)
736{
737   if (!nouveau_buffer_malloc(buf))
738      return false;
739   if (nouveau_bo_map(bo, NOUVEAU_BO_RD, nv->client))
740      return false;
741   memcpy(buf->data, (uint8_t *)bo->map + offset, size);
742   return true;
743}
744
745/* Migrate a linear buffer (vertex, index, constants) USER -> GART -> VRAM. */
746bool
747nouveau_buffer_migrate(struct nouveau_context *nv,
748                       struct nv04_resource *buf, const unsigned new_domain)
749{
750   struct nouveau_screen *screen = nv->screen;
751   struct nouveau_bo *bo;
752   const unsigned old_domain = buf->domain;
753   unsigned size = buf->base.width0;
754   unsigned offset;
755   int ret;
756
757   assert(new_domain != old_domain);
758
759   if (new_domain == NOUVEAU_BO_GART && old_domain == 0) {
760      if (!nouveau_buffer_allocate(screen, buf, new_domain))
761         return false;
762      ret = nouveau_bo_map(buf->bo, 0, nv->client);
763      if (ret)
764         return ret;
765      memcpy((uint8_t *)buf->bo->map + buf->offset, buf->data, size);
766      align_free(buf->data);
767   } else
768   if (old_domain != 0 && new_domain != 0) {
769      struct nouveau_mm_allocation *mm = buf->mm;
770
771      if (new_domain == NOUVEAU_BO_VRAM) {
772         /* keep a system memory copy of our data in case we hit a fallback */
773         if (!nouveau_buffer_data_fetch(nv, buf, buf->bo, buf->offset, size))
774            return false;
775         if (nouveau_mesa_debug)
776            debug_printf("migrating %u KiB to VRAM\n", size / 1024);
777      }
778
779      offset = buf->offset;
780      bo = buf->bo;
781      buf->bo = NULL;
782      buf->mm = NULL;
783      nouveau_buffer_allocate(screen, buf, new_domain);
784
785      nv->copy_data(nv, buf->bo, buf->offset, new_domain,
786                    bo, offset, old_domain, buf->base.width0);
787
788      nouveau_fence_work(screen->fence.current, nouveau_fence_unref_bo, bo);
789      if (mm)
790         release_allocation(&mm, screen->fence.current);
791   } else
792   if (new_domain == NOUVEAU_BO_VRAM && old_domain == 0) {
793      struct nouveau_transfer tx;
794      if (!nouveau_buffer_allocate(screen, buf, NOUVEAU_BO_VRAM))
795         return false;
796      tx.base.resource = &buf->base;
797      tx.base.box.x = 0;
798      tx.base.box.width = buf->base.width0;
799      tx.bo = NULL;
800      tx.map = NULL;
801      if (!nouveau_transfer_staging(nv, &tx, false))
802         return false;
803      nouveau_transfer_write(nv, &tx, 0, tx.base.box.width);
804      nouveau_buffer_transfer_del(nv, &tx);
805   } else
806      return false;
807
808   assert(buf->domain == new_domain);
809   return true;
810}
811
812/* Migrate data from glVertexAttribPointer(non-VBO) user buffers to GART.
813 * We'd like to only allocate @size bytes here, but then we'd have to rebase
814 * the vertex indices ...
815 */
816bool
817nouveau_user_buffer_upload(struct nouveau_context *nv,
818                           struct nv04_resource *buf,
819                           unsigned base, unsigned size)
820{
821   struct nouveau_screen *screen = nouveau_screen(buf->base.screen);
822   int ret;
823
824   assert(buf->status & NOUVEAU_BUFFER_STATUS_USER_MEMORY);
825
826   buf->base.width0 = base + size;
827   if (!nouveau_buffer_reallocate(screen, buf, NOUVEAU_BO_GART))
828      return false;
829
830   ret = nouveau_bo_map(buf->bo, 0, nv->client);
831   if (ret)
832      return false;
833   memcpy((uint8_t *)buf->bo->map + buf->offset + base, buf->data + base, size);
834
835   return true;
836}
837
838/* Invalidate underlying buffer storage, reset fences, reallocate to non-busy
839 * buffer.
840 */
841void
842nouveau_buffer_invalidate(struct pipe_context *pipe,
843                          struct pipe_resource *resource)
844{
845   struct nouveau_context *nv = nouveau_context(pipe);
846   struct nv04_resource *buf = nv04_resource(resource);
847   int ref = buf->base.reference.count - 1;
848
849   /* Shared buffers shouldn't get reallocated */
850   if (unlikely(buf->base.bind & PIPE_BIND_SHARED))
851      return;
852
853   /* We can't touch persistent/coherent buffers */
854   if (buf->base.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
855                          PIPE_RESOURCE_FLAG_MAP_COHERENT))
856      return;
857
858   /* If the buffer is sub-allocated and not currently being written, just
859    * wipe the valid buffer range. Otherwise we have to create fresh
860    * storage. (We don't keep track of fences for non-sub-allocated BO's.)
861    */
862   if (buf->mm && !nouveau_buffer_busy(buf, PIPE_TRANSFER_WRITE)) {
863      util_range_set_empty(&buf->valid_buffer_range);
864   } else {
865      nouveau_buffer_reallocate(nv->screen, buf, buf->domain);
866      if (ref > 0) /* any references inside context possible ? */
867         nv->invalidate_resource_storage(nv, &buf->base, ref);
868   }
869}
870
871
872/* Scratch data allocation. */
873
874static inline int
875nouveau_scratch_bo_alloc(struct nouveau_context *nv, struct nouveau_bo **pbo,
876                         unsigned size)
877{
878   return nouveau_bo_new(nv->screen->device, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
879                         4096, size, NULL, pbo);
880}
881
882static void
883nouveau_scratch_unref_bos(void *d)
884{
885   struct runout *b = d;
886   int i;
887
888   for (i = 0; i < b->nr; ++i)
889      nouveau_bo_ref(NULL, &b->bo[i]);
890
891   FREE(b);
892}
893
894void
895nouveau_scratch_runout_release(struct nouveau_context *nv)
896{
897   if (!nv->scratch.runout)
898      return;
899
900   if (!nouveau_fence_work(nv->screen->fence.current, nouveau_scratch_unref_bos,
901         nv->scratch.runout))
902      return;
903
904   nv->scratch.end = 0;
905   nv->scratch.runout = NULL;
906}
907
908/* Allocate an extra bo if we can't fit everything we need simultaneously.
909 * (Could happen for very large user arrays.)
910 */
911static inline bool
912nouveau_scratch_runout(struct nouveau_context *nv, unsigned size)
913{
914   int ret;
915   unsigned n;
916
917   if (nv->scratch.runout)
918      n = nv->scratch.runout->nr;
919   else
920      n = 0;
921   nv->scratch.runout = REALLOC(nv->scratch.runout, n == 0 ? 0 :
922                                (sizeof(*nv->scratch.runout) + (n + 0) * sizeof(void *)),
923                                 sizeof(*nv->scratch.runout) + (n + 1) * sizeof(void *));
924   nv->scratch.runout->nr = n + 1;
925   nv->scratch.runout->bo[n] = NULL;
926
927   ret = nouveau_scratch_bo_alloc(nv, &nv->scratch.runout->bo[n], size);
928   if (!ret) {
929      ret = nouveau_bo_map(nv->scratch.runout->bo[n], 0, NULL);
930      if (ret)
931         nouveau_bo_ref(NULL, &nv->scratch.runout->bo[--nv->scratch.runout->nr]);
932   }
933   if (!ret) {
934      nv->scratch.current = nv->scratch.runout->bo[n];
935      nv->scratch.offset = 0;
936      nv->scratch.end = size;
937      nv->scratch.map = nv->scratch.current->map;
938   }
939   return !ret;
940}
941
942/* Continue to next scratch buffer, if available (no wrapping, large enough).
943 * Allocate it if it has not yet been created.
944 */
945static inline bool
946nouveau_scratch_next(struct nouveau_context *nv, unsigned size)
947{
948   struct nouveau_bo *bo;
949   int ret;
950   const unsigned i = (nv->scratch.id + 1) % NOUVEAU_MAX_SCRATCH_BUFS;
951
952   if ((size > nv->scratch.bo_size) || (i == nv->scratch.wrap))
953      return false;
954   nv->scratch.id = i;
955
956   bo = nv->scratch.bo[i];
957   if (!bo) {
958      ret = nouveau_scratch_bo_alloc(nv, &bo, nv->scratch.bo_size);
959      if (ret)
960         return false;
961      nv->scratch.bo[i] = bo;
962   }
963   nv->scratch.current = bo;
964   nv->scratch.offset = 0;
965   nv->scratch.end = nv->scratch.bo_size;
966
967   ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, nv->client);
968   if (!ret)
969      nv->scratch.map = bo->map;
970   return !ret;
971}
972
973static bool
974nouveau_scratch_more(struct nouveau_context *nv, unsigned min_size)
975{
976   bool ret;
977
978   ret = nouveau_scratch_next(nv, min_size);
979   if (!ret)
980      ret = nouveau_scratch_runout(nv, min_size);
981   return ret;
982}
983
984
985/* Copy data to a scratch buffer and return address & bo the data resides in. */
986uint64_t
987nouveau_scratch_data(struct nouveau_context *nv,
988                     const void *data, unsigned base, unsigned size,
989                     struct nouveau_bo **bo)
990{
991   unsigned bgn = MAX2(base, nv->scratch.offset);
992   unsigned end = bgn + size;
993
994   if (end >= nv->scratch.end) {
995      end = base + size;
996      if (!nouveau_scratch_more(nv, end))
997         return 0;
998      bgn = base;
999   }
1000   nv->scratch.offset = align(end, 4);
1001
1002   memcpy(nv->scratch.map + bgn, (const uint8_t *)data + base, size);
1003
1004   *bo = nv->scratch.current;
1005   return (*bo)->offset + (bgn - base);
1006}
1007
1008void *
1009nouveau_scratch_get(struct nouveau_context *nv,
1010                    unsigned size, uint64_t *gpu_addr, struct nouveau_bo **pbo)
1011{
1012   unsigned bgn = nv->scratch.offset;
1013   unsigned end = nv->scratch.offset + size;
1014
1015   if (end >= nv->scratch.end) {
1016      end = size;
1017      if (!nouveau_scratch_more(nv, end))
1018         return NULL;
1019      bgn = 0;
1020   }
1021   nv->scratch.offset = align(end, 4);
1022
1023   *pbo = nv->scratch.current;
1024   *gpu_addr = nv->scratch.current->offset + bgn;
1025   return nv->scratch.map + bgn;
1026}
1027