14a49301eSmrg/**************************************************************************
24a49301eSmrg *
3af69d88dSmrg * Copyright 2007 VMware, Inc.
44a49301eSmrg * All Rights Reserved.
54a49301eSmrg *
64a49301eSmrg * Permission is hereby granted, free of charge, to any person obtaining a
74a49301eSmrg * copy of this software and associated documentation files (the
84a49301eSmrg * "Software"), to deal in the Software without restriction, including
94a49301eSmrg * without limitation the rights to use, copy, modify, merge, publish,
104a49301eSmrg * distribute, sub license, and/or sell copies of the Software, and to
114a49301eSmrg * permit persons to whom the Software is furnished to do so, subject to
124a49301eSmrg * the following conditions:
134a49301eSmrg *
144a49301eSmrg * The above copyright notice and this permission notice (including the
154a49301eSmrg * next paragraph) shall be included in all copies or substantial portions
164a49301eSmrg * of the Software.
174a49301eSmrg *
184a49301eSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
194a49301eSmrg * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
204a49301eSmrg * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21af69d88dSmrg * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
224a49301eSmrg * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
234a49301eSmrg * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
244a49301eSmrg * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
254a49301eSmrg *
264a49301eSmrg **************************************************************************/
274a49301eSmrg
284a49301eSmrg/**
294a49301eSmrg * \file
304a49301eSmrg * Buffer management.
314a49301eSmrg *
324a49301eSmrg * A buffer manager does only one basic thing: it creates buffers. Actually,
334a49301eSmrg * "buffer factory" would probably a more accurate description.
344a49301eSmrg *
354a49301eSmrg * You can chain buffer managers so that you can have a finer grained memory
36361fc4cbSmaya * management.
374a49301eSmrg *
384a49301eSmrg * For example, for a simple batch buffer manager you would chain:
394a49301eSmrg * - the native buffer manager, which provides DMA memory from the graphics
404a49301eSmrg * memory space;
414a49301eSmrg * - the fenced buffer manager, which will delay buffer destruction until the
424a49301eSmrg * the moment the card finishing processing it.
434a49301eSmrg *
44af69d88dSmrg * \author Jose Fonseca <jfonseca@vmware.com>
454a49301eSmrg */
464a49301eSmrg
474a49301eSmrg#ifndef PB_BUFMGR_H_
484a49301eSmrg#define PB_BUFMGR_H_
494a49301eSmrg
504a49301eSmrg
513464ebd5Sriastradh#include "pb_buffer.h"
524a49301eSmrg
534a49301eSmrg
544a49301eSmrg#ifdef __cplusplus
554a49301eSmrgextern "C" {
564a49301eSmrg#endif
574a49301eSmrg
584a49301eSmrg
594a49301eSmrgstruct pb_desc;
604a49301eSmrg
614a49301eSmrg
624a49301eSmrg/**
634a49301eSmrg * Abstract base class for all buffer managers.
644a49301eSmrg */
654a49301eSmrgstruct pb_manager
664a49301eSmrg{
674a49301eSmrg   void
684a49301eSmrg   (*destroy)( struct pb_manager *mgr );
694a49301eSmrg
704a49301eSmrg   struct pb_buffer *
714a49301eSmrg   (*create_buffer)( struct pb_manager *mgr,
724a49301eSmrg	             pb_size size,
734a49301eSmrg	             const struct pb_desc *desc);
744a49301eSmrg
754a49301eSmrg   /**
764a49301eSmrg    * Flush all temporary-held buffers.
774a49301eSmrg    *
784a49301eSmrg    * Used mostly to aid debugging memory issues or to clean up resources when
794a49301eSmrg    * the drivers are long lived.
804a49301eSmrg    */
814a49301eSmrg   void
824a49301eSmrg   (*flush)( struct pb_manager *mgr );
833464ebd5Sriastradh
843464ebd5Sriastradh   boolean
853464ebd5Sriastradh   (*is_buffer_busy)( struct pb_manager *mgr,
863464ebd5Sriastradh                      struct pb_buffer *buf );
874a49301eSmrg};
884a49301eSmrg
894a49301eSmrg/**
904a49301eSmrg * Static sub-allocator based the old memory manager.
914a49301eSmrg *
924a49301eSmrg * It managers buffers of different sizes. It does so by allocating a buffer
934a49301eSmrg * with the size of the heap, and then using the old mm memory manager to manage
944a49301eSmrg * that heap.
954a49301eSmrg */
964a49301eSmrgstruct pb_manager *
974a49301eSmrgmm_bufmgr_create(struct pb_manager *provider,
984a49301eSmrg                 pb_size size, pb_size align2);
994a49301eSmrg
1004a49301eSmrg/**
1014a49301eSmrg * Same as mm_bufmgr_create.
1024a49301eSmrg *
1034a49301eSmrg * Buffer will be release when the manager is destroyed.
1044a49301eSmrg */
1054a49301eSmrgstruct pb_manager *
1064a49301eSmrgmm_bufmgr_create_from_buffer(struct pb_buffer *buffer,
1074a49301eSmrg                             pb_size size, pb_size align2);
1084a49301eSmrg
1094a49301eSmrg
1104a49301eSmrg/**
1114a49301eSmrg * Slab sub-allocator.
1124a49301eSmrg */
1134a49301eSmrgstruct pb_manager *
1144a49301eSmrgpb_slab_manager_create(struct pb_manager *provider,
1154a49301eSmrg                       pb_size bufSize,
1164a49301eSmrg                       pb_size slabSize,
1174a49301eSmrg                       const struct pb_desc *desc);
1184a49301eSmrg
1194a49301eSmrg/**
1204a49301eSmrg * Allow a range of buffer size, by aggregating multiple slabs sub-allocators
1214a49301eSmrg * with different bucket sizes.
1224a49301eSmrg */
1234a49301eSmrgstruct pb_manager *
1244a49301eSmrgpb_slab_range_manager_create(struct pb_manager *provider,
1254a49301eSmrg                             pb_size minBufSize,
1264a49301eSmrg                             pb_size maxBufSize,
1274a49301eSmrg                             pb_size slabSize,
1284a49301eSmrg                             const struct pb_desc *desc);
1294a49301eSmrg
1304a49301eSmrg
1314a49301eSmrg/**
1324a49301eSmrg * Time-based buffer cache.
1334a49301eSmrg *
1344a49301eSmrg * This manager keeps a cache of destroyed buffers during a time interval.
1354a49301eSmrg */
1364a49301eSmrgstruct pb_manager *
1374a49301eSmrgpb_cache_manager_create(struct pb_manager *provider,
138af69d88dSmrg                        unsigned usecs,
139af69d88dSmrg                        float size_factor,
14001e04c3fSmrg                        unsigned bypass_usage,
14101e04c3fSmrg                        uint64_t maximum_cache_size);
1424a49301eSmrg
14301e04c3fSmrg/**
14401e04c3fSmrg * Remove a buffer from the cache, but keep it alive.
14501e04c3fSmrg */
14601e04c3fSmrgvoid
14701e04c3fSmrgpb_cache_manager_remove_buffer(struct pb_buffer *buf);
1484a49301eSmrg
1494a49301eSmrgstruct pb_fence_ops;
1504a49301eSmrg
1514a49301eSmrg/**
1524a49301eSmrg * Fenced buffer manager.
1534a49301eSmrg *
1544a49301eSmrg * This manager is just meant for convenience. It wraps the buffers returned
1554a49301eSmrg * by another manager in fenced buffers, so that
1564a49301eSmrg *
1574a49301eSmrg * NOTE: the buffer manager that provides the buffers will be destroyed
1584a49301eSmrg * at the same time.
1594a49301eSmrg */
1604a49301eSmrgstruct pb_manager *
1614a49301eSmrgfenced_bufmgr_create(struct pb_manager *provider,
1624a49301eSmrg                     struct pb_fence_ops *ops,
1634a49301eSmrg                     pb_size max_buffer_size,
1644a49301eSmrg                     pb_size max_cpu_total_size);
1654a49301eSmrg
1664a49301eSmrg/**
1674a49301eSmrg * Debug buffer manager to detect buffer under- and overflows.
1684a49301eSmrg *
1694a49301eSmrg * Under/overflow sizes should be a multiple of the largest alignment
1704a49301eSmrg */
1714a49301eSmrgstruct pb_manager *
1724a49301eSmrgpb_debug_manager_create(struct pb_manager *provider,
1734a49301eSmrg                        pb_size underflow_size, pb_size overflow_size);
1744a49301eSmrg
1754a49301eSmrg
1764a49301eSmrg#ifdef __cplusplus
1774a49301eSmrg}
1784a49301eSmrg#endif
1794a49301eSmrg
1804a49301eSmrg#endif /*PB_BUFMGR_H_*/
181