radeon_winsys.h revision 9f464c52
1/*
2 * Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
3 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
4 * Copyright 2018 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
25
26#ifndef RADEON_WINSYS_H
27#define RADEON_WINSYS_H
28
29/* The public winsys interface header for the radeon driver. */
30
31/* Whether the next IB can start immediately and not wait for draws and
32 * dispatches from the current IB to finish. */
33#define RADEON_FLUSH_START_NEXT_GFX_IB_NOW	(1u << 31)
34
35#define RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW \
36	(PIPE_FLUSH_ASYNC | RADEON_FLUSH_START_NEXT_GFX_IB_NOW)
37
38#include "pipebuffer/pb_buffer.h"
39
40#include "amd/common/ac_gpu_info.h"
41#include "amd/common/ac_surface.h"
42
43/* Tiling flags. */
44enum radeon_bo_layout {
45    RADEON_LAYOUT_LINEAR = 0,
46    RADEON_LAYOUT_TILED,
47    RADEON_LAYOUT_SQUARETILED,
48
49    RADEON_LAYOUT_UNKNOWN
50};
51
52enum radeon_bo_domain { /* bitfield */
53    RADEON_DOMAIN_GTT  = 2,
54    RADEON_DOMAIN_VRAM = 4,
55    RADEON_DOMAIN_VRAM_GTT = RADEON_DOMAIN_VRAM | RADEON_DOMAIN_GTT,
56    RADEON_DOMAIN_GDS = 8,
57    RADEON_DOMAIN_OA = 16,
58};
59
60enum radeon_bo_flag { /* bitfield */
61    RADEON_FLAG_GTT_WC =        (1 << 0),
62    RADEON_FLAG_NO_CPU_ACCESS = (1 << 1),
63    RADEON_FLAG_NO_SUBALLOC =   (1 << 2),
64    RADEON_FLAG_SPARSE =        (1 << 3),
65    RADEON_FLAG_NO_INTERPROCESS_SHARING = (1 << 4),
66    RADEON_FLAG_READ_ONLY =     (1 << 5),
67    RADEON_FLAG_32BIT =    (1 << 6),
68};
69
70enum radeon_bo_usage { /* bitfield */
71    RADEON_USAGE_READ = 2,
72    RADEON_USAGE_WRITE = 4,
73    RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE,
74
75    /* The winsys ensures that the CS submission will be scheduled after
76     * previously flushed CSs referencing this BO in a conflicting way.
77     */
78    RADEON_USAGE_SYNCHRONIZED = 8
79};
80
81enum radeon_transfer_flags {
82   /* Indicates that the caller will unmap the buffer.
83    *
84    * Not unmapping buffers is an important performance optimization for
85    * OpenGL (avoids kernel overhead for frequently mapped buffers).
86    */
87   RADEON_TRANSFER_TEMPORARY = (PIPE_TRANSFER_DRV_PRV << 0),
88};
89
90#define RADEON_SPARSE_PAGE_SIZE (64 * 1024)
91
92enum ring_type {
93    RING_GFX = 0,
94    RING_COMPUTE,
95    RING_DMA,
96    RING_UVD,
97    RING_VCE,
98    RING_UVD_ENC,
99    RING_VCN_DEC,
100    RING_VCN_ENC,
101    RING_VCN_JPEG,
102    RING_LAST,
103};
104
105enum radeon_value_id {
106    RADEON_REQUESTED_VRAM_MEMORY,
107    RADEON_REQUESTED_GTT_MEMORY,
108    RADEON_MAPPED_VRAM,
109    RADEON_MAPPED_GTT,
110    RADEON_BUFFER_WAIT_TIME_NS,
111    RADEON_NUM_MAPPED_BUFFERS,
112    RADEON_TIMESTAMP,
113    RADEON_NUM_GFX_IBS,
114    RADEON_NUM_SDMA_IBS,
115    RADEON_GFX_BO_LIST_COUNTER, /* number of BOs submitted in gfx IBs */
116    RADEON_GFX_IB_SIZE_COUNTER,
117    RADEON_NUM_BYTES_MOVED,
118    RADEON_NUM_EVICTIONS,
119    RADEON_NUM_VRAM_CPU_PAGE_FAULTS,
120    RADEON_VRAM_USAGE,
121    RADEON_VRAM_VIS_USAGE,
122    RADEON_GTT_USAGE,
123    RADEON_GPU_TEMPERATURE, /* DRM 2.42.0 */
124    RADEON_CURRENT_SCLK,
125    RADEON_CURRENT_MCLK,
126    RADEON_GPU_RESET_COUNTER, /* DRM 2.43.0 */
127    RADEON_CS_THREAD_TIME,
128};
129
130enum radeon_bo_priority {
131    /* Each group of two has the same priority. */
132    RADEON_PRIO_FENCE = 0,
133    RADEON_PRIO_TRACE,
134
135    RADEON_PRIO_SO_FILLED_SIZE = 2,
136    RADEON_PRIO_QUERY,
137
138    RADEON_PRIO_IB1 = 4, /* main IB submitted to the kernel */
139    RADEON_PRIO_IB2, /* IB executed with INDIRECT_BUFFER */
140
141    RADEON_PRIO_DRAW_INDIRECT = 6,
142    RADEON_PRIO_INDEX_BUFFER,
143
144    RADEON_PRIO_CP_DMA = 8,
145    RADEON_PRIO_BORDER_COLORS,
146
147    RADEON_PRIO_CONST_BUFFER = 10,
148    RADEON_PRIO_DESCRIPTORS,
149
150    RADEON_PRIO_SAMPLER_BUFFER = 12,
151    RADEON_PRIO_VERTEX_BUFFER,
152
153    RADEON_PRIO_SHADER_RW_BUFFER = 14,
154    RADEON_PRIO_COMPUTE_GLOBAL,
155
156    RADEON_PRIO_SAMPLER_TEXTURE = 16,
157    RADEON_PRIO_SHADER_RW_IMAGE,
158
159    RADEON_PRIO_SAMPLER_TEXTURE_MSAA = 18,
160    RADEON_PRIO_COLOR_BUFFER,
161
162    RADEON_PRIO_DEPTH_BUFFER = 20,
163
164    RADEON_PRIO_COLOR_BUFFER_MSAA = 22,
165
166    RADEON_PRIO_DEPTH_BUFFER_MSAA = 24,
167
168    RADEON_PRIO_SEPARATE_META = 26,
169    RADEON_PRIO_SHADER_BINARY, /* the hw can't hide instruction cache misses */
170
171    RADEON_PRIO_SHADER_RINGS = 28,
172
173    RADEON_PRIO_SCRATCH_BUFFER = 30,
174    /* 31 is the maximum value */
175};
176
177struct winsys_handle;
178struct radeon_winsys_ctx;
179
180struct radeon_cmdbuf_chunk {
181    unsigned cdw;  /* Number of used dwords. */
182    unsigned max_dw; /* Maximum number of dwords. */
183    uint32_t *buf; /* The base pointer of the chunk. */
184};
185
186struct radeon_cmdbuf {
187    struct radeon_cmdbuf_chunk current;
188    struct radeon_cmdbuf_chunk *prev;
189    unsigned                      num_prev; /* Number of previous chunks. */
190    unsigned                      max_prev; /* Space in array pointed to by prev. */
191    unsigned                      prev_dw; /* Total number of dwords in previous chunks. */
192
193    /* Memory usage of the buffer list. These are always 0 for preamble IBs. */
194    uint64_t                      used_vram;
195    uint64_t                      used_gart;
196    uint64_t                      gpu_address;
197};
198
199/* Tiling info for display code, DRI sharing, and other data. */
200struct radeon_bo_metadata {
201    /* Tiling flags describing the texture layout for display code
202     * and DRI sharing.
203     */
204    union {
205        struct {
206            enum radeon_bo_layout   microtile;
207            enum radeon_bo_layout   macrotile;
208            unsigned                pipe_config;
209            unsigned                bankw;
210            unsigned                bankh;
211            unsigned                tile_split;
212            unsigned                mtilea;
213            unsigned                num_banks;
214            unsigned                stride;
215            bool                    scanout;
216        } legacy;
217
218        struct {
219            /* surface flags */
220            unsigned swizzle_mode:5;
221
222            /* DCC flags */
223            /* [31:8]: max offset = 4GB - 256; 0 = DCC disabled */
224            unsigned dcc_offset_256B:24;
225            unsigned dcc_pitch_max:14;   /* (mip chain pitch - 1) for DCN */
226            unsigned dcc_independent_64B:1;
227        } gfx9;
228    } u;
229
230    /* Additional metadata associated with the buffer, in bytes.
231     * The maximum size is 64 * 4. This is opaque for the winsys & kernel.
232     * Supported by amdgpu only.
233     */
234    uint32_t                size_metadata;
235    uint32_t                metadata[64];
236};
237
238enum radeon_feature_id {
239    RADEON_FID_R300_HYPERZ_ACCESS,     /* ZMask + HiZ */
240    RADEON_FID_R300_CMASK_ACCESS,
241};
242
243struct radeon_bo_list_item {
244    uint64_t bo_size;
245    uint64_t vm_address;
246    uint32_t priority_usage; /* mask of (1 << RADEON_PRIO_*) */
247};
248
249struct radeon_winsys {
250    /**
251     * The screen object this winsys was created for
252     */
253    struct pipe_screen *screen;
254
255    /**
256     * Decrement the winsys reference count.
257     *
258     * \param ws  The winsys this function is called for.
259     * \return    True if the winsys and screen should be destroyed.
260     */
261    bool (*unref)(struct radeon_winsys *ws);
262
263    /**
264     * Destroy this winsys.
265     *
266     * \param ws        The winsys this function is called from.
267     */
268    void (*destroy)(struct radeon_winsys *ws);
269
270    /**
271     * Query an info structure from winsys.
272     *
273     * \param ws        The winsys this function is called from.
274     * \param info      Return structure
275     */
276    void (*query_info)(struct radeon_winsys *ws,
277                       struct radeon_info *info);
278
279    /**
280     * A hint for the winsys that it should pin its execution threads to
281     * a group of cores sharing a specific L3 cache if the CPU has multiple
282     * L3 caches. This is needed for good multithreading performance on
283     * AMD Zen CPUs.
284     */
285    void (*pin_threads_to_L3_cache)(struct radeon_winsys *ws, unsigned cache);
286
287    /**************************************************************************
288     * Buffer management. Buffer attributes are mostly fixed over its lifetime.
289     *
290     * Remember that gallium gets to choose the interface it needs, and the
291     * window systems must then implement that interface (rather than the
292     * other way around...).
293     *************************************************************************/
294
295    /**
296     * Create a buffer object.
297     *
298     * \param ws        The winsys this function is called from.
299     * \param size      The size to allocate.
300     * \param alignment An alignment of the buffer in memory.
301     * \param use_reusable_pool Whether the cache buffer manager should be used.
302     * \param domain    A bitmask of the RADEON_DOMAIN_* flags.
303     * \return          The created buffer object.
304     */
305    struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws,
306                                       uint64_t size,
307                                       unsigned alignment,
308                                       enum radeon_bo_domain domain,
309                                       enum radeon_bo_flag flags);
310
311    /**
312     * Map the entire data store of a buffer object into the client's address
313     * space.
314     *
315     * Callers are expected to unmap buffers again if and only if the
316     * RADEON_TRANSFER_TEMPORARY flag is set in \p usage.
317     *
318     * \param buf       A winsys buffer object to map.
319     * \param cs        A command stream to flush if the buffer is referenced by it.
320     * \param usage     A bitmask of the PIPE_TRANSFER_* and RADEON_TRANSFER_* flags.
321     * \return          The pointer at the beginning of the buffer.
322     */
323    void *(*buffer_map)(struct pb_buffer *buf,
324                        struct radeon_cmdbuf *cs,
325                        enum pipe_transfer_usage usage);
326
327    /**
328     * Unmap a buffer object from the client's address space.
329     *
330     * \param buf       A winsys buffer object to unmap.
331     */
332    void (*buffer_unmap)(struct pb_buffer *buf);
333
334    /**
335     * Wait for the buffer and return true if the buffer is not used
336     * by the device.
337     *
338     * The timeout of 0 will only return the status.
339     * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the buffer
340     * is idle.
341     */
342    bool (*buffer_wait)(struct pb_buffer *buf, uint64_t timeout,
343                        enum radeon_bo_usage usage);
344
345    /**
346     * Return buffer metadata.
347     * (tiling info for display code, DRI sharing, and other data)
348     *
349     * \param buf       A winsys buffer object to get the flags from.
350     * \param md        Metadata
351     */
352    void (*buffer_get_metadata)(struct pb_buffer *buf,
353                                struct radeon_bo_metadata *md);
354
355    /**
356     * Set buffer metadata.
357     * (tiling info for display code, DRI sharing, and other data)
358     *
359     * \param buf       A winsys buffer object to set the flags for.
360     * \param md        Metadata
361     */
362    void (*buffer_set_metadata)(struct pb_buffer *buf,
363                                struct radeon_bo_metadata *md);
364
365    /**
366     * Get a winsys buffer from a winsys handle. The internal structure
367     * of the handle is platform-specific and only a winsys should access it.
368     *
369     * \param ws        The winsys this function is called from.
370     * \param whandle   A winsys handle pointer as was received from a state
371     *                  tracker.
372     * \param stride    The returned buffer stride in bytes.
373     */
374    struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws,
375                                            struct winsys_handle *whandle,
376                                            unsigned vm_alignment,
377                                            unsigned *stride, unsigned *offset);
378
379    /**
380     * Get a winsys buffer from a user pointer. The resulting buffer can't
381     * be exported. Both pointer and size must be page aligned.
382     *
383     * \param ws        The winsys this function is called from.
384     * \param pointer   User pointer to turn into a buffer object.
385     * \param Size      Size in bytes for the new buffer.
386     */
387    struct pb_buffer *(*buffer_from_ptr)(struct radeon_winsys *ws,
388                                         void *pointer, uint64_t size);
389
390    /**
391     * Whether the buffer was created from a user pointer.
392     *
393     * \param buf       A winsys buffer object
394     * \return          whether \p buf was created via buffer_from_ptr
395     */
396    bool (*buffer_is_user_ptr)(struct pb_buffer *buf);
397
398    /** Whether the buffer was suballocated. */
399    bool (*buffer_is_suballocated)(struct pb_buffer *buf);
400
401    /**
402     * Get a winsys handle from a winsys buffer. The internal structure
403     * of the handle is platform-specific and only a winsys should access it.
404     *
405     * \param buf       A winsys buffer object to get the handle from.
406     * \param whandle   A winsys handle pointer.
407     * \param stride    A stride of the buffer in bytes, for texturing.
408     * \return          true on success.
409     */
410    bool (*buffer_get_handle)(struct pb_buffer *buf,
411                              unsigned stride, unsigned offset,
412                              unsigned slice_size,
413                              struct winsys_handle *whandle);
414
415    /**
416     * Change the commitment of a (64KB-page aligned) region of the given
417     * sparse buffer.
418     *
419     * \warning There is no automatic synchronization with command submission.
420     *
421     * \note Only implemented by the amdgpu winsys.
422     *
423     * \return false on out of memory or other failure, true on success.
424     */
425    bool (*buffer_commit)(struct pb_buffer *buf,
426                          uint64_t offset, uint64_t size,
427                          bool commit);
428
429    /**
430     * Return the virtual address of a buffer.
431     *
432     * When virtual memory is not in use, this is the offset relative to the
433     * relocation base (non-zero for sub-allocated buffers).
434     *
435     * \param buf       A winsys buffer object
436     * \return          virtual address
437     */
438    uint64_t (*buffer_get_virtual_address)(struct pb_buffer *buf);
439
440    /**
441     * Return the offset of this buffer relative to the relocation base.
442     * This is only non-zero for sub-allocated buffers.
443     *
444     * This is only supported in the radeon winsys, since amdgpu uses virtual
445     * addresses in submissions even for the video engines.
446     *
447     * \param buf      A winsys buffer object
448     * \return         the offset for relocations
449     */
450    unsigned (*buffer_get_reloc_offset)(struct pb_buffer *buf);
451
452    /**
453     * Query the initial placement of the buffer from the kernel driver.
454     */
455    enum radeon_bo_domain (*buffer_get_initial_domain)(struct pb_buffer *buf);
456
457    /**************************************************************************
458     * Command submission.
459     *
460     * Each pipe context should create its own command stream and submit
461     * commands independently of other contexts.
462     *************************************************************************/
463
464    /**
465     * Create a command submission context.
466     * Various command streams can be submitted to the same context.
467     */
468    struct radeon_winsys_ctx *(*ctx_create)(struct radeon_winsys *ws);
469
470    /**
471     * Destroy a context.
472     */
473    void (*ctx_destroy)(struct radeon_winsys_ctx *ctx);
474
475    /**
476     * Query a GPU reset status.
477     */
478    enum pipe_reset_status (*ctx_query_reset_status)(struct radeon_winsys_ctx *ctx);
479
480    /**
481     * Create a command stream.
482     *
483     * \param ctx       The submission context
484     * \param ring_type The ring type (GFX, DMA, UVD)
485     * \param flush     Flush callback function associated with the command stream.
486     * \param user      User pointer that will be passed to the flush callback.
487     */
488    struct radeon_cmdbuf *(*cs_create)(struct radeon_winsys_ctx *ctx,
489                                       enum ring_type ring_type,
490                                       void (*flush)(void *ctx, unsigned flags,
491                                                     struct pipe_fence_handle **fence),
492                                       void *flush_ctx,
493                                       bool stop_exec_on_failure);
494
495    /**
496     * Destroy a command stream.
497     *
498     * \param cs        A command stream to destroy.
499     */
500    void (*cs_destroy)(struct radeon_cmdbuf *cs);
501
502    /**
503     * Add a buffer. Each buffer used by a CS must be added using this function.
504     *
505     * \param cs      Command stream
506     * \param buf     Buffer
507     * \param usage   Whether the buffer is used for read and/or write.
508     * \param domain  Bitmask of the RADEON_DOMAIN_* flags.
509     * \param priority  A higher number means a greater chance of being
510     *                  placed in the requested domain. 15 is the maximum.
511     * \return Buffer index.
512     */
513    unsigned (*cs_add_buffer)(struct radeon_cmdbuf *cs,
514                             struct pb_buffer *buf,
515                             enum radeon_bo_usage usage,
516                             enum radeon_bo_domain domain,
517                             enum radeon_bo_priority priority);
518
519    /**
520     * Return the index of an already-added buffer.
521     *
522     * Not supported on amdgpu. Drivers with GPUVM should not care about
523     * buffer indices.
524     *
525     * \param cs        Command stream
526     * \param buf       Buffer
527     * \return          The buffer index, or -1 if the buffer has not been added.
528     */
529    int (*cs_lookup_buffer)(struct radeon_cmdbuf *cs,
530                            struct pb_buffer *buf);
531
532    /**
533     * Return true if there is enough memory in VRAM and GTT for the buffers
534     * added so far. If the validation fails, all buffers which have
535     * been added since the last call of cs_validate will be removed and
536     * the CS will be flushed (provided there are still any buffers).
537     *
538     * \param cs        A command stream to validate.
539     */
540    bool (*cs_validate)(struct radeon_cmdbuf *cs);
541
542    /**
543     * Check whether the given number of dwords is available in the IB.
544     * Optionally chain a new chunk of the IB if necessary and supported.
545     *
546     * \param cs        A command stream.
547     * \param dw        Number of CS dwords requested by the caller.
548     */
549    bool (*cs_check_space)(struct radeon_cmdbuf *cs, unsigned dw);
550
551    /**
552     * Return the buffer list.
553     *
554     * This is the buffer list as passed to the kernel, i.e. it only contains
555     * the parent buffers of sub-allocated buffers.
556     *
557     * \param cs    Command stream
558     * \param list  Returned buffer list. Set to NULL to query the count only.
559     * \return      The buffer count.
560     */
561    unsigned (*cs_get_buffer_list)(struct radeon_cmdbuf *cs,
562                                   struct radeon_bo_list_item *list);
563
564    /**
565     * Flush a command stream.
566     *
567     * \param cs          A command stream to flush.
568     * \param flags,      PIPE_FLUSH_* flags.
569     * \param fence       Pointer to a fence. If non-NULL, a fence is inserted
570     *                    after the CS and is returned through this parameter.
571     * \return Negative POSIX error code or 0 for success.
572     *         Asynchronous submissions never return an error.
573     */
574    int (*cs_flush)(struct radeon_cmdbuf *cs,
575                    unsigned flags,
576                    struct pipe_fence_handle **fence);
577
578    /**
579     * Create a fence before the CS is flushed.
580     * The user must flush manually to complete the initializaton of the fence.
581     *
582     * The fence must not be used for anything except \ref cs_add_fence_dependency
583     * before the flush.
584     */
585    struct pipe_fence_handle *(*cs_get_next_fence)(struct radeon_cmdbuf *cs);
586
587    /**
588     * Return true if a buffer is referenced by a command stream.
589     *
590     * \param cs        A command stream.
591     * \param buf       A winsys buffer.
592     */
593    bool (*cs_is_buffer_referenced)(struct radeon_cmdbuf *cs,
594                                    struct pb_buffer *buf,
595                                    enum radeon_bo_usage usage);
596
597    /**
598     * Request access to a feature for a command stream.
599     *
600     * \param cs        A command stream.
601     * \param fid       Feature ID, one of RADEON_FID_*
602     * \param enable    Whether to enable or disable the feature.
603     */
604    bool (*cs_request_feature)(struct radeon_cmdbuf *cs,
605                               enum radeon_feature_id fid,
606                               bool enable);
607     /**
608      * Make sure all asynchronous flush of the cs have completed
609      *
610      * \param cs        A command stream.
611      */
612    void (*cs_sync_flush)(struct radeon_cmdbuf *cs);
613
614    /**
615     * Add a fence dependency to the CS, so that the CS will wait for
616     * the fence before execution.
617     */
618    void (*cs_add_fence_dependency)(struct radeon_cmdbuf *cs,
619                                    struct pipe_fence_handle *fence);
620
621    /**
622     * Signal a syncobj when the CS finishes execution.
623     */
624    void (*cs_add_syncobj_signal)(struct radeon_cmdbuf *cs,
625				  struct pipe_fence_handle *fence);
626
627    /**
628     * Wait for the fence and return true if the fence has been signalled.
629     * The timeout of 0 will only return the status.
630     * The timeout of PIPE_TIMEOUT_INFINITE will always wait until the fence
631     * is signalled.
632     */
633    bool (*fence_wait)(struct radeon_winsys *ws,
634                       struct pipe_fence_handle *fence,
635                       uint64_t timeout);
636
637    /**
638     * Reference counting for fences.
639     */
640    void (*fence_reference)(struct pipe_fence_handle **dst,
641                            struct pipe_fence_handle *src);
642
643    /**
644     * Create a new fence object corresponding to the given syncobj fd.
645     */
646    struct pipe_fence_handle *(*fence_import_syncobj)(struct radeon_winsys *ws,
647						      int fd);
648
649    /**
650     * Create a new fence object corresponding to the given sync_file.
651     */
652    struct pipe_fence_handle *(*fence_import_sync_file)(struct radeon_winsys *ws,
653							int fd);
654
655    /**
656     * Return a sync_file FD corresponding to the given fence object.
657     */
658    int (*fence_export_sync_file)(struct radeon_winsys *ws,
659				  struct pipe_fence_handle *fence);
660
661    /**
662     * Return a sync file FD that is already signalled.
663     */
664    int (*export_signalled_sync_file)(struct radeon_winsys *ws);
665
666    /**
667     * Initialize surface
668     *
669     * \param ws        The winsys this function is called from.
670     * \param tex       Input texture description
671     * \param flags     Bitmask of RADEON_SURF_* flags
672     * \param bpe       Bytes per pixel, it can be different for Z buffers.
673     * \param mode      Preferred tile mode. (linear, 1D, or 2D)
674     * \param surf      Output structure
675     */
676    int (*surface_init)(struct radeon_winsys *ws,
677                        const struct pipe_resource *tex,
678                        unsigned flags, unsigned bpe,
679                        enum radeon_surf_mode mode,
680                        struct radeon_surf *surf);
681
682    uint64_t (*query_value)(struct radeon_winsys *ws,
683                            enum radeon_value_id value);
684
685    bool (*read_registers)(struct radeon_winsys *ws, unsigned reg_offset,
686                           unsigned num_registers, uint32_t *out);
687};
688
689static inline bool radeon_emitted(struct radeon_cmdbuf *cs, unsigned num_dw)
690{
691    return cs && (cs->prev_dw + cs->current.cdw > num_dw);
692}
693
694static inline void radeon_emit(struct radeon_cmdbuf *cs, uint32_t value)
695{
696    cs->current.buf[cs->current.cdw++] = value;
697}
698
699static inline void radeon_emit_array(struct radeon_cmdbuf *cs,
700				     const uint32_t *values, unsigned count)
701{
702    memcpy(cs->current.buf + cs->current.cdw, values, count * 4);
703    cs->current.cdw += count;
704}
705
706enum radeon_heap {
707    RADEON_HEAP_VRAM_NO_CPU_ACCESS,
708    RADEON_HEAP_VRAM_READ_ONLY,
709    RADEON_HEAP_VRAM_READ_ONLY_32BIT,
710    RADEON_HEAP_VRAM_32BIT,
711    RADEON_HEAP_VRAM,
712    RADEON_HEAP_GTT_WC,
713    RADEON_HEAP_GTT_WC_READ_ONLY,
714    RADEON_HEAP_GTT_WC_READ_ONLY_32BIT,
715    RADEON_HEAP_GTT_WC_32BIT,
716    RADEON_HEAP_GTT,
717    RADEON_MAX_SLAB_HEAPS,
718    RADEON_MAX_CACHED_HEAPS = RADEON_MAX_SLAB_HEAPS,
719};
720
721static inline enum radeon_bo_domain radeon_domain_from_heap(enum radeon_heap heap)
722{
723    switch (heap) {
724    case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
725    case RADEON_HEAP_VRAM_READ_ONLY:
726    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
727    case RADEON_HEAP_VRAM_32BIT:
728    case RADEON_HEAP_VRAM:
729        return RADEON_DOMAIN_VRAM;
730    case RADEON_HEAP_GTT_WC:
731    case RADEON_HEAP_GTT_WC_READ_ONLY:
732    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
733    case RADEON_HEAP_GTT_WC_32BIT:
734    case RADEON_HEAP_GTT:
735        return RADEON_DOMAIN_GTT;
736    default:
737        assert(0);
738        return (enum radeon_bo_domain)0;
739    }
740}
741
742static inline unsigned radeon_flags_from_heap(enum radeon_heap heap)
743{
744    unsigned flags = RADEON_FLAG_NO_INTERPROCESS_SHARING |
745                     (heap != RADEON_HEAP_GTT ? RADEON_FLAG_GTT_WC : 0);
746
747    switch (heap) {
748    case RADEON_HEAP_VRAM_NO_CPU_ACCESS:
749        return flags |
750               RADEON_FLAG_NO_CPU_ACCESS;
751
752    case RADEON_HEAP_VRAM_READ_ONLY:
753    case RADEON_HEAP_GTT_WC_READ_ONLY:
754        return flags |
755               RADEON_FLAG_READ_ONLY;
756
757    case RADEON_HEAP_VRAM_READ_ONLY_32BIT:
758    case RADEON_HEAP_GTT_WC_READ_ONLY_32BIT:
759        return flags |
760               RADEON_FLAG_READ_ONLY |
761               RADEON_FLAG_32BIT;
762
763    case RADEON_HEAP_VRAM_32BIT:
764    case RADEON_HEAP_GTT_WC_32BIT:
765        return flags |
766               RADEON_FLAG_32BIT;
767
768    case RADEON_HEAP_VRAM:
769    case RADEON_HEAP_GTT_WC:
770    case RADEON_HEAP_GTT:
771    default:
772        return flags;
773    }
774}
775
776/* Return the heap index for winsys allocators, or -1 on failure. */
777static inline int radeon_get_heap_index(enum radeon_bo_domain domain,
778                                        enum radeon_bo_flag flags)
779{
780    /* VRAM implies WC (write combining) */
781    assert(!(domain & RADEON_DOMAIN_VRAM) || flags & RADEON_FLAG_GTT_WC);
782    /* NO_CPU_ACCESS implies VRAM only. */
783    assert(!(flags & RADEON_FLAG_NO_CPU_ACCESS) || domain == RADEON_DOMAIN_VRAM);
784
785    /* Resources with interprocess sharing don't use any winsys allocators. */
786    if (!(flags & RADEON_FLAG_NO_INTERPROCESS_SHARING))
787        return -1;
788
789    /* Unsupported flags: NO_SUBALLOC, SPARSE. */
790    if (flags & ~(RADEON_FLAG_GTT_WC |
791                  RADEON_FLAG_NO_CPU_ACCESS |
792                  RADEON_FLAG_NO_INTERPROCESS_SHARING |
793                  RADEON_FLAG_READ_ONLY |
794                  RADEON_FLAG_32BIT))
795        return -1;
796
797    switch (domain) {
798    case RADEON_DOMAIN_VRAM:
799        switch (flags & (RADEON_FLAG_NO_CPU_ACCESS |
800                         RADEON_FLAG_READ_ONLY |
801                         RADEON_FLAG_32BIT)) {
802        case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
803        case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_READ_ONLY:
804            assert(!"NO_CPU_ACCESS | READ_ONLY doesn't make sense");
805            return -1;
806        case RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_32BIT:
807            assert(!"NO_CPU_ACCESS with 32BIT is disallowed");
808            return -1;
809        case RADEON_FLAG_NO_CPU_ACCESS:
810            return RADEON_HEAP_VRAM_NO_CPU_ACCESS;
811        case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
812            return RADEON_HEAP_VRAM_READ_ONLY_32BIT;
813        case RADEON_FLAG_READ_ONLY:
814            return RADEON_HEAP_VRAM_READ_ONLY;
815        case RADEON_FLAG_32BIT:
816            return RADEON_HEAP_VRAM_32BIT;
817        case 0:
818            return RADEON_HEAP_VRAM;
819        }
820        break;
821    case RADEON_DOMAIN_GTT:
822        switch (flags & (RADEON_FLAG_GTT_WC |
823                         RADEON_FLAG_READ_ONLY |
824                         RADEON_FLAG_32BIT)) {
825        case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
826            return RADEON_HEAP_GTT_WC_READ_ONLY_32BIT;
827        case RADEON_FLAG_GTT_WC | RADEON_FLAG_READ_ONLY:
828            return RADEON_HEAP_GTT_WC_READ_ONLY;
829        case RADEON_FLAG_GTT_WC | RADEON_FLAG_32BIT:
830            return RADEON_HEAP_GTT_WC_32BIT;
831        case RADEON_FLAG_GTT_WC:
832            return RADEON_HEAP_GTT_WC;
833        case RADEON_FLAG_READ_ONLY | RADEON_FLAG_32BIT:
834        case RADEON_FLAG_READ_ONLY:
835            assert(!"READ_ONLY without WC is disallowed");
836            return -1;
837        case RADEON_FLAG_32BIT:
838            assert(!"32BIT without WC is disallowed");
839            return -1;
840        case 0:
841            return RADEON_HEAP_GTT;
842        }
843        break;
844    default:
845        break;
846    }
847    return -1;
848}
849
850#endif
851