17ec681f3Smrg/*
27ec681f3Smrg * Copyright (C) 2019 Collabora, Ltd.
37ec681f3Smrg *
47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a
57ec681f3Smrg * copy of this software and associated documentation files (the "Software"),
67ec681f3Smrg * to deal in the Software without restriction, including without limitation
77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the
97ec681f3Smrg * Software is furnished to do so, subject to the following conditions:
107ec681f3Smrg *
117ec681f3Smrg * The above copyright notice and this permission notice (including the next
127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the
137ec681f3Smrg * Software.
147ec681f3Smrg *
157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
187ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
197ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
207ec681f3Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
217ec681f3Smrg * SOFTWARE.
227ec681f3Smrg *
237ec681f3Smrg * Authors:
247ec681f3Smrg *   Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com>
257ec681f3Smrg */
267ec681f3Smrg
277ec681f3Smrg#include "util/u_math.h"
287ec681f3Smrg#include "util/macros.h"
297ec681f3Smrg#include "pan_encoder.h"
307ec681f3Smrg
317ec681f3Smrg/* Midgard has a small register file, so shaders with high register pressure
327ec681f3Smrg * need to spill from the register file onto the stack. In addition to
337ec681f3Smrg * spilling, it is desireable to allocate temporary arrays on the stack (for
347ec681f3Smrg * instance because the register file does not support indirect access but the
357ec681f3Smrg * stack does).
367ec681f3Smrg *
377ec681f3Smrg * The stack is located in "Thread Local Storage", sometimes abbreviated TLS in
387ec681f3Smrg * the kernel source code. Thread local storage is allocated per-thread,
397ec681f3Smrg * per-core, so threads executing concurrently do not interfere with each
407ec681f3Smrg * other's stacks. On modern kernels, we may query
417ec681f3Smrg * DRM_PANFROST_PARAM_THREAD_TLS_ALLOC for the number of threads per core we
427ec681f3Smrg * must allocate for, and DRM_PANFROST_PARAM_SHADER_PRESENT for a bitmask of
437ec681f3Smrg * shader cores (so take a popcount of that mask for the number of shader
447ec681f3Smrg * cores). On older kernels that do not support querying these values,
457ec681f3Smrg * following kbase, we may use the worst-case value of 256 threads for
467ec681f3Smrg * THREAD_TLS_ALLOC, and the worst-case value of 16 cores for Midgard per the
477ec681f3Smrg * "shader core count" column of the implementations table in
487ec681f3Smrg * https://en.wikipedia.org/wiki/Mali_%28GPU% [citation needed]
497ec681f3Smrg *
507ec681f3Smrg * Within a particular thread, there is stack allocated. If it is present, its
517ec681f3Smrg * size is a power-of-two, and it is at least 16 bytes. Stack is allocated
527ec681f3Smrg * with the shared memory descriptor used for all shaders within a frame (note
537ec681f3Smrg * that they don't execute concurrently so it's fine). So, consider the maximum
547ec681f3Smrg * stack size used by any shader within a job, and then compute (where npot
557ec681f3Smrg * denotes the next power of two):
567ec681f3Smrg *
577ec681f3Smrg *      bytes/thread = npot(max(size, 16))
587ec681f3Smrg *      allocated = (# of bytes/thread) * (# of threads/core) * (# of cores)
597ec681f3Smrg *
607ec681f3Smrg * The size of Thread Local Storage is signaled to the GPU in the tls_size
617ec681f3Smrg * field, which has a log2 modifier and is in units of 16 bytes.
627ec681f3Smrg */
637ec681f3Smrg
647ec681f3Smrg/* Computes log_stack_size = log2(ceil(s / 16)) */
657ec681f3Smrg
667ec681f3Smrgunsigned
677ec681f3Smrgpanfrost_get_stack_shift(unsigned stack_size)
687ec681f3Smrg{
697ec681f3Smrg        if (stack_size)
707ec681f3Smrg                return util_logbase2_ceil(DIV_ROUND_UP(stack_size, 16));
717ec681f3Smrg        else
727ec681f3Smrg                return 0;
737ec681f3Smrg}
747ec681f3Smrg
757ec681f3Smrg/* Computes the aligned stack size given the shift and thread count. */
767ec681f3Smrg
777ec681f3Smrgunsigned
787ec681f3Smrgpanfrost_get_total_stack_size(
797ec681f3Smrg                unsigned thread_size,
807ec681f3Smrg                unsigned threads_per_core,
817ec681f3Smrg                unsigned core_count)
827ec681f3Smrg{
837ec681f3Smrg        unsigned size_per_thread = (thread_size == 0) ? 0 :
847ec681f3Smrg                util_next_power_of_two(ALIGN_POT(thread_size, 16));
857ec681f3Smrg
867ec681f3Smrg        return size_per_thread * threads_per_core * core_count;
877ec681f3Smrg}
88