1848b8605Smrg/**************************************************************************
2848b8605Smrg *
3848b8605Smrg * Copyright 2007 VMware, Inc.
4848b8605Smrg * All Rights Reserved.
5848b8605Smrg *
6848b8605Smrg * Permission is hereby granted, free of charge, to any person obtaining a
7848b8605Smrg * copy of this software and associated documentation files (the
8848b8605Smrg * "Software"), to deal in the Software without restriction, including
9848b8605Smrg * without limitation the rights to use, copy, modify, merge, publish,
10848b8605Smrg * distribute, sub license, and/or sell copies of the Software, and to
11848b8605Smrg * permit persons to whom the Software is furnished to do so, subject to
12848b8605Smrg * the following conditions:
13848b8605Smrg *
14848b8605Smrg * The above copyright notice and this permission notice (including the
15848b8605Smrg * next paragraph) shall be included in all copies or substantial portions
16848b8605Smrg * of the Software.
17848b8605Smrg *
18848b8605Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19848b8605Smrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20848b8605Smrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21848b8605Smrg * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22848b8605Smrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23848b8605Smrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24848b8605Smrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25848b8605Smrg *
26848b8605Smrg **************************************************************************/
27848b8605Smrg
28848b8605Smrg/**
29848b8605Smrg * \file
30848b8605Smrg * Buffer management.
31848b8605Smrg *
32848b8605Smrg * A buffer manager does only one basic thing: it creates buffers. Actually,
33848b8605Smrg * "buffer factory" would probably a more accurate description.
34848b8605Smrg *
35848b8605Smrg * You can chain buffer managers so that you can have a finer grained memory
36b8e80941Smrg * management.
37848b8605Smrg *
38848b8605Smrg * For example, for a simple batch buffer manager you would chain:
39848b8605Smrg * - the native buffer manager, which provides DMA memory from the graphics
40848b8605Smrg * memory space;
41848b8605Smrg * - the fenced buffer manager, which will delay buffer destruction until the
42848b8605Smrg * the moment the card finishing processing it.
43848b8605Smrg *
44848b8605Smrg * \author Jose Fonseca <jfonseca@vmware.com>
45848b8605Smrg */
46848b8605Smrg
47848b8605Smrg#ifndef PB_BUFMGR_H_
48848b8605Smrg#define PB_BUFMGR_H_
49848b8605Smrg
50848b8605Smrg
51848b8605Smrg#include "pb_buffer.h"
52848b8605Smrg
53848b8605Smrg
54848b8605Smrg#ifdef __cplusplus
55848b8605Smrgextern "C" {
56848b8605Smrg#endif
57848b8605Smrg
58848b8605Smrg
59848b8605Smrgstruct pb_desc;
60848b8605Smrg
61848b8605Smrg
62848b8605Smrg/**
63848b8605Smrg * Abstract base class for all buffer managers.
64848b8605Smrg */
65848b8605Smrgstruct pb_manager
66848b8605Smrg{
67848b8605Smrg   void
68848b8605Smrg   (*destroy)( struct pb_manager *mgr );
69848b8605Smrg
70848b8605Smrg   struct pb_buffer *
71848b8605Smrg   (*create_buffer)( struct pb_manager *mgr,
72848b8605Smrg	             pb_size size,
73848b8605Smrg	             const struct pb_desc *desc);
74848b8605Smrg
75848b8605Smrg   /**
76848b8605Smrg    * Flush all temporary-held buffers.
77848b8605Smrg    *
78848b8605Smrg    * Used mostly to aid debugging memory issues or to clean up resources when
79848b8605Smrg    * the drivers are long lived.
80848b8605Smrg    */
81848b8605Smrg   void
82848b8605Smrg   (*flush)( struct pb_manager *mgr );
83848b8605Smrg
84848b8605Smrg   boolean
85848b8605Smrg   (*is_buffer_busy)( struct pb_manager *mgr,
86848b8605Smrg                      struct pb_buffer *buf );
87848b8605Smrg};
88848b8605Smrg
89848b8605Smrg/**
90848b8605Smrg * Static sub-allocator based the old memory manager.
91848b8605Smrg *
92848b8605Smrg * It managers buffers of different sizes. It does so by allocating a buffer
93848b8605Smrg * with the size of the heap, and then using the old mm memory manager to manage
94848b8605Smrg * that heap.
95848b8605Smrg */
96848b8605Smrgstruct pb_manager *
97848b8605Smrgmm_bufmgr_create(struct pb_manager *provider,
98848b8605Smrg                 pb_size size, pb_size align2);
99848b8605Smrg
100848b8605Smrg/**
101848b8605Smrg * Same as mm_bufmgr_create.
102848b8605Smrg *
103848b8605Smrg * Buffer will be release when the manager is destroyed.
104848b8605Smrg */
105848b8605Smrgstruct pb_manager *
106848b8605Smrgmm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
107848b8605Smrg                             pb_size size, pb_size align2);
108848b8605Smrg
109848b8605Smrg
110848b8605Smrg/**
111848b8605Smrg * Slab sub-allocator.
112848b8605Smrg */
113848b8605Smrgstruct pb_manager *
114848b8605Smrgpb_slab_manager_create(struct pb_manager *provider,
115848b8605Smrg                       pb_size bufSize,
116848b8605Smrg                       pb_size slabSize,
117848b8605Smrg                       const struct pb_desc *desc);
118848b8605Smrg
119848b8605Smrg/**
120848b8605Smrg * Allow a range of buffer size, by aggregating multiple slabs sub-allocators
121848b8605Smrg * with different bucket sizes.
122848b8605Smrg */
123848b8605Smrgstruct pb_manager *
124848b8605Smrgpb_slab_range_manager_create(struct pb_manager *provider,
125848b8605Smrg                             pb_size minBufSize,
126848b8605Smrg                             pb_size maxBufSize,
127848b8605Smrg                             pb_size slabSize,
128848b8605Smrg                             const struct pb_desc *desc);
129848b8605Smrg
130848b8605Smrg
131848b8605Smrg/**
132848b8605Smrg * Time-based buffer cache.
133848b8605Smrg *
134848b8605Smrg * This manager keeps a cache of destroyed buffers during a time interval.
135848b8605Smrg */
136848b8605Smrgstruct pb_manager *
137848b8605Smrgpb_cache_manager_create(struct pb_manager *provider,
138848b8605Smrg                        unsigned usecs,
139848b8605Smrg                        float size_factor,
140b8e80941Smrg                        unsigned bypass_usage,
141b8e80941Smrg                        uint64_t maximum_cache_size);
142848b8605Smrg
143b8e80941Smrg/**
144b8e80941Smrg * Remove a buffer from the cache, but keep it alive.
145b8e80941Smrg */
146b8e80941Smrgvoid
147b8e80941Smrgpb_cache_manager_remove_buffer(struct pb_buffer *buf);
148848b8605Smrg
149848b8605Smrgstruct pb_fence_ops;
150848b8605Smrg
151848b8605Smrg/**
152848b8605Smrg * Fenced buffer manager.
153848b8605Smrg *
154848b8605Smrg * This manager is just meant for convenience. It wraps the buffers returned
155848b8605Smrg * by another manager in fenced buffers, so that
156848b8605Smrg *
157848b8605Smrg * NOTE: the buffer manager that provides the buffers will be destroyed
158848b8605Smrg * at the same time.
159848b8605Smrg */
160848b8605Smrgstruct pb_manager *
161848b8605Smrgfenced_bufmgr_create(struct pb_manager *provider,
162848b8605Smrg                     struct pb_fence_ops *ops,
163848b8605Smrg                     pb_size max_buffer_size,
164848b8605Smrg                     pb_size max_cpu_total_size);
165848b8605Smrg
166848b8605Smrg/**
167848b8605Smrg * Debug buffer manager to detect buffer under- and overflows.
168848b8605Smrg *
169848b8605Smrg * Under/overflow sizes should be a multiple of the largest alignment
170848b8605Smrg */
171848b8605Smrgstruct pb_manager *
172848b8605Smrgpb_debug_manager_create(struct pb_manager *provider,
173848b8605Smrg                        pb_size underflow_size, pb_size overflow_size);
174848b8605Smrg
175848b8605Smrg
176848b8605Smrg#ifdef __cplusplus
177848b8605Smrg}
178848b8605Smrg#endif
179848b8605Smrg
180848b8605Smrg#endif /*PB_BUFMGR_H_*/
181