1#ifndef __NOUVEAU_BUFFER_H__
2#define __NOUVEAU_BUFFER_H__
3
4#include "util/u_range.h"
5#include "util/u_transfer.h"
6#include "util/list.h"
7
8struct pipe_resource;
9struct nouveau_context;
10struct nouveau_bo;
11
12/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
13 *  resource->data has not been updated to reflect modified VRAM contents
14 *
15 * USER_MEMORY: resource->data is a pointer to client memory and may change
16 *  between GL calls
17 */
18#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
19#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
20#define NOUVEAU_BUFFER_STATUS_DIRTY       (1 << 2)
21#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
22
23#define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
24
25/* Resources, if mapped into the GPU's address space, are guaranteed to
26 * have constant virtual addresses (nv50+).
27 *
28 * The address of a resource will lie within the nouveau_bo referenced,
29 * and this bo should be added to the memory manager's validation list.
30 */
31struct nv04_resource {
32   struct pipe_resource base;
33   const struct u_resource_vtbl *vtbl;
34
35   uint64_t address; /* virtual address (nv50+) */
36
37   uint8_t *data; /* resource's contents, if domain == 0, or cached */
38   struct nouveau_bo *bo;
39   uint32_t offset; /* offset into the data/bo */
40
41   uint8_t status;
42   uint8_t domain;
43
44   uint16_t cb_bindings[6]; /* per-shader per-slot bindings */
45
46   struct nouveau_fence *fence;
47   struct nouveau_fence *fence_wr;
48
49   struct nouveau_mm_allocation *mm;
50
51   /* buffer range that has been initialized */
52   struct util_range valid_buffer_range;
53};
54
55void
56nouveau_buffer_release_gpu_storage(struct nv04_resource *);
57
58void
59nouveau_copy_buffer(struct nouveau_context *,
60                    struct nv04_resource *dst, unsigned dst_pos,
61                    struct nv04_resource *src, unsigned src_pos, unsigned size);
62
63bool
64nouveau_buffer_migrate(struct nouveau_context *,
65                       struct nv04_resource *, unsigned domain);
66
67void *
68nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
69                            uint32_t offset, uint32_t flags);
70
71static inline void
72nouveau_resource_unmap(struct nv04_resource *res)
73{
74   /* no-op */
75}
76
77static inline struct nv04_resource *
78nv04_resource(struct pipe_resource *resource)
79{
80   return (struct nv04_resource *)resource;
81}
82
83/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
84static inline bool
85nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
86{
87   return nv04_resource(resource)->domain != 0;
88}
89
90struct pipe_resource *
91nouveau_buffer_create(struct pipe_screen *pscreen,
92                      const struct pipe_resource *templ);
93
94struct pipe_resource *
95nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
96                           unsigned bytes, unsigned usage);
97
98bool
99nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
100                           unsigned base, unsigned size);
101
102void
103nouveau_buffer_invalidate(struct pipe_context *pipe,
104                          struct pipe_resource *resource);
105
106/* Copy data to a scratch buffer and return address & bo the data resides in.
107 * Returns 0 on failure.
108 */
109uint64_t
110nouveau_scratch_data(struct nouveau_context *,
111                     const void *data, unsigned base, unsigned size,
112                     struct nouveau_bo **);
113
114#endif
115