nouveau_buffer.h revision af69d88d
1#ifndef __NOUVEAU_BUFFER_H__
2#define __NOUVEAU_BUFFER_H__
3
4#include "util/u_range.h"
5#include "util/u_transfer.h"
6#include "util/u_double_list.h"
7
8struct pipe_resource;
9struct nouveau_context;
10struct nouveau_bo;
11
12/* DIRTY: buffer was (or will be after the next flush) written to by GPU and
13 *  resource->data has not been updated to reflect modified VRAM contents
14 *
15 * USER_MEMORY: resource->data is a pointer to client memory and may change
16 *  between GL calls
17 */
18#define NOUVEAU_BUFFER_STATUS_GPU_READING (1 << 0)
19#define NOUVEAU_BUFFER_STATUS_GPU_WRITING (1 << 1)
20#define NOUVEAU_BUFFER_STATUS_DIRTY       (1 << 2)
21#define NOUVEAU_BUFFER_STATUS_USER_MEMORY (1 << 7)
22
23#define NOUVEAU_BUFFER_STATUS_REALLOC_MASK NOUVEAU_BUFFER_STATUS_USER_MEMORY
24
25/* Resources, if mapped into the GPU's address space, are guaranteed to
26 * have constant virtual addresses (nv50+).
27 *
28 * The address of a resource will lie within the nouveau_bo referenced,
29 * and this bo should be added to the memory manager's validation list.
30 */
31struct nv04_resource {
32   struct pipe_resource base;
33   const struct u_resource_vtbl *vtbl;
34
35   uint64_t address; /* virtual address (nv50+) */
36
37   uint8_t *data; /* resource's contents, if domain == 0, or cached */
38   struct nouveau_bo *bo;
39   uint32_t offset; /* offset into the data/bo */
40
41   uint8_t status;
42   uint8_t domain;
43
44   struct nouveau_fence *fence;
45   struct nouveau_fence *fence_wr;
46
47   struct nouveau_mm_allocation *mm;
48
49   /* buffer range that has been initialized */
50   struct util_range valid_buffer_range;
51};
52
53void
54nouveau_buffer_release_gpu_storage(struct nv04_resource *);
55
56void
57nouveau_copy_buffer(struct nouveau_context *,
58                    struct nv04_resource *dst, unsigned dst_pos,
59                    struct nv04_resource *src, unsigned src_pos, unsigned size);
60
61boolean
62nouveau_buffer_migrate(struct nouveau_context *,
63                       struct nv04_resource *, unsigned domain);
64
65void *
66nouveau_resource_map_offset(struct nouveau_context *, struct nv04_resource *,
67                            uint32_t offset, uint32_t flags);
68
69static INLINE void
70nouveau_resource_unmap(struct nv04_resource *res)
71{
72   /* no-op */
73}
74
75static INLINE struct nv04_resource *
76nv04_resource(struct pipe_resource *resource)
77{
78   return (struct nv04_resource *)resource;
79}
80
81/* is resource mapped into the GPU's address space (i.e. VRAM or GART) ? */
82static INLINE boolean
83nouveau_resource_mapped_by_gpu(struct pipe_resource *resource)
84{
85   return nv04_resource(resource)->domain != 0;
86}
87
88struct pipe_resource *
89nouveau_buffer_create(struct pipe_screen *pscreen,
90                      const struct pipe_resource *templ);
91
92struct pipe_resource *
93nouveau_user_buffer_create(struct pipe_screen *screen, void *ptr,
94                           unsigned bytes, unsigned usage);
95
96boolean
97nouveau_user_buffer_upload(struct nouveau_context *, struct nv04_resource *,
98                           unsigned base, unsigned size);
99
100/* Copy data to a scratch buffer and return address & bo the data resides in.
101 * Returns 0 on failure.
102 */
103uint64_t
104nouveau_scratch_data(struct nouveau_context *,
105                     const void *data, unsigned base, unsigned size,
106                     struct nouveau_bo **);
107
108#endif
109