17ec681f3Smrg#
27ec681f3Smrg# Copyright (C) 2018 Alyssa Rosenzweig
37ec681f3Smrg# Copyright (C) 2019-2020 Collabora, Ltd.
47ec681f3Smrg#
57ec681f3Smrg# Copyright (C) 2016 Intel Corporation
67ec681f3Smrg#
77ec681f3Smrg# Permission is hereby granted, free of charge, to any person obtaining a
87ec681f3Smrg# copy of this software and associated documentation files (the "Software"),
97ec681f3Smrg# to deal in the Software without restriction, including without limitation
107ec681f3Smrg# the rights to use, copy, modify, merge, publish, distribute, sublicense,
117ec681f3Smrg# and/or sell copies of the Software, and to permit persons to whom the
127ec681f3Smrg# Software is furnished to do so, subject to the following conditions:
137ec681f3Smrg#
147ec681f3Smrg# The above copyright notice and this permission notice (including the next
157ec681f3Smrg# paragraph) shall be included in all copies or substantial portions of the
167ec681f3Smrg# Software.
177ec681f3Smrg#
187ec681f3Smrg# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
197ec681f3Smrg# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
207ec681f3Smrg# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
217ec681f3Smrg# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
227ec681f3Smrg# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
237ec681f3Smrg# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
247ec681f3Smrg# IN THE SOFTWARE.
257ec681f3Smrg
267ec681f3Smrgimport argparse
277ec681f3Smrgimport sys
287ec681f3Smrgimport math
297ec681f3Smrg
307ec681f3Smrga = 'a'
317ec681f3Smrgb = 'b'
327ec681f3Smrgc = 'c'
337ec681f3Smrg
347ec681f3Smrgalgebraic = [
357ec681f3Smrg   # Allows us to schedule as a multiply by 2
367ec681f3Smrg   (('~fadd', ('fadd', a, b), a), ('fadd', ('fadd', a, a), b)),
377ec681f3Smrg]
387ec681f3Smrg
397ec681f3Smrgalgebraic_late = [
407ec681f3Smrg    # ineg must be lowered late, but only for integers; floats will try to
417ec681f3Smrg    # have modifiers attached... hence why this has to be here rather than
427ec681f3Smrg    # a more standard lower_negate approach
437ec681f3Smrg
447ec681f3Smrg    (('ineg', a), ('isub', 0, a)),
457ec681f3Smrg
467ec681f3Smrg    # Likewise we want fsub lowered but not isub
477ec681f3Smrg    (('fsub', a, b), ('fadd', a, ('fneg', b))),
487ec681f3Smrg
497ec681f3Smrg    # These two special-cases save space/an op than the actual csel op +
507ec681f3Smrg    # scheduler flexibility
517ec681f3Smrg
527ec681f3Smrg    (('b32csel', a, 'b@32', 0), ('iand', a, b)),
537ec681f3Smrg    (('b32csel', a, 0, 'b@32'), ('iand', ('inot', a), b)),
547ec681f3Smrg
557ec681f3Smrg    # Fuse sat_signed. This should probably be shared with Bifrost
567ec681f3Smrg    (('~fmin', ('fmax', a, -1.0), 1.0), ('fsat_signed_mali', a)),
577ec681f3Smrg    (('~fmax', ('fmin', a, 1.0), -1.0), ('fsat_signed_mali', a)),
587ec681f3Smrg
597ec681f3Smrg    # Fuse clamp_positive. This should probably be shared with Utgard/bifrost
607ec681f3Smrg    (('fmax', a, 0.0), ('fclamp_pos_mali', a)),
617ec681f3Smrg
627ec681f3Smrg    (('ishl', 'a@16', b), ('u2u16', ('ishl', ('u2u32', a), b))),
637ec681f3Smrg    (('ishr', 'a@16', b), ('i2i16', ('ishr', ('i2i32', a), b))),
647ec681f3Smrg    (('ushr', 'a@16', b), ('u2u16', ('ushr', ('u2u32', a), b))),
657ec681f3Smrg
667ec681f3Smrg    (('ishl', 'a@8', b), ('u2u8', ('u2u16', ('ishl', ('u2u32', ('u2u16', a)), b)))),
677ec681f3Smrg    (('ishr', 'a@8', b), ('i2i8', ('i2i16', ('ishr', ('i2i32', ('i2i16', a)), b)))),
687ec681f3Smrg    (('ushr', 'a@8', b), ('u2u8', ('u2u16', ('ushr', ('u2u32', ('u2u16', a)), b)))),
697ec681f3Smrg
707ec681f3Smrg    # Canonical form. The scheduler will convert back if it makes sense.
717ec681f3Smrg    (('fmul', a, 2.0), ('fadd', a, a))
727ec681f3Smrg]
737ec681f3Smrg
747ec681f3Smrg# Size conversion is redundant to Midgard but needed for NIR, and writing this
757ec681f3Smrg# lowering in MIR would be painful without a competent builder, so eat the
767ec681f3Smrg# extra instruction
777ec681f3Smrgfor sz in ('8', '16', '32'):
787ec681f3Smrg    converted = ('u2u32', a) if sz != '32' else a
797ec681f3Smrg    algebraic_late += [(('ufind_msb', 'a@' + sz), ('isub', 31, ('uclz', converted)))]
807ec681f3Smrg
817ec681f3Smrg# Midgard is able to type convert down by only one "step" per instruction; if
827ec681f3Smrg# NIR wants more than one step, we need to break up into multiple instructions.
837ec681f3Smrg# Nevertheless, we can do both a size step and a floating/int step at once.
847ec681f3Smrg
857ec681f3Smrgconverts = []
867ec681f3Smrg
877ec681f3Smrgfor op in ('u2u', 'i2i', 'f2f', 'i2f', 'u2f', 'f2i', 'f2u'):
887ec681f3Smrg    srcsz_max = 64
897ec681f3Smrg    dstsz_max = 64
907ec681f3Smrg    # 8 bit float doesn't exist
917ec681f3Smrg    srcsz_min = 8 if op[0] != 'f' else 16
927ec681f3Smrg    dstsz_min = 8 if op[2] != 'f' else 16
937ec681f3Smrg    dstsz = dstsz_min
947ec681f3Smrg    # Iterate over all possible destination and source sizes
957ec681f3Smrg    while dstsz <= dstsz_max:
967ec681f3Smrg        srcsz = srcsz_min
977ec681f3Smrg        while srcsz <= srcsz_max:
987ec681f3Smrg            # Size converter lowering is only needed if src and dst sizes are
997ec681f3Smrg            # spaced by a factor > 2.
1007ec681f3Smrg            if srcsz != dstsz and (srcsz * 2 != dstsz and srcsz != dstsz * 2):
1017ec681f3Smrg                cursz = srcsz
1027ec681f3Smrg                rule = a
1037ec681f3Smrg                # When converting down we first do the type conversion followed
1047ec681f3Smrg                # by one or more size conversions. When converting up, we do
1057ec681f3Smrg                # the type conversion at the end. This way we don't have to
1067ec681f3Smrg                # deal with the fact that f2f8 doesn't exists.
1077ec681f3Smrg                sizeconvop = op[0] + '2' + op[0] if srcsz < dstsz else op[2] + '2' + op[2]
1087ec681f3Smrg                if srcsz > dstsz and op[0] != op[2]:
1097ec681f3Smrg                    rule = (op + str(int(cursz)), rule)
1107ec681f3Smrg                while cursz != dstsz:
1117ec681f3Smrg                    cursz = cursz / 2 if dstsz < srcsz else cursz * 2
1127ec681f3Smrg                    rule = (sizeconvop + str(int(cursz)), rule)
1137ec681f3Smrg                if srcsz < dstsz and op[0] != op[2]:
1147ec681f3Smrg                    rule = (op + str(int(cursz)), rule)
1157ec681f3Smrg                converts += [((op + str(int(dstsz)), 'a@' + str(int(srcsz))), rule)]
1167ec681f3Smrg            srcsz *= 2
1177ec681f3Smrg        dstsz *= 2
1187ec681f3Smrg
1197ec681f3Smrg# Try to force constants to the right
1207ec681f3Smrgconstant_switch = [
1217ec681f3Smrg        # fge gets flipped to fle, so we invert to keep the order
1227ec681f3Smrg        (('fge', 'a', '#b'), (('inot', ('flt', a, b)))),
1237ec681f3Smrg        (('fge32', 'a', '#b'), (('inot', ('flt32', a, b)))),
1247ec681f3Smrg        (('ige32', 'a', '#b'), (('inot', ('ilt32', a, b)))),
1257ec681f3Smrg        (('uge32', 'a', '#b'), (('inot', ('ult32', a, b)))),
1267ec681f3Smrg
1277ec681f3Smrg        # fge gets mapped to fle with a flip
1287ec681f3Smrg        (('flt32', '#a', 'b'), ('inot', ('fge32', a, b))),
1297ec681f3Smrg        (('ilt32', '#a', 'b'), ('inot', ('ige32', a, b))),
1307ec681f3Smrg        (('ult32', '#a', 'b'), ('inot', ('uge32', a, b)))
1317ec681f3Smrg]
1327ec681f3Smrg
1337ec681f3Smrg# ..since the above switching happens after algebraic stuff is done
1347ec681f3Smrgcancel_inot = [
1357ec681f3Smrg        (('inot', ('inot', a)), a)
1367ec681f3Smrg]
1377ec681f3Smrg
1387ec681f3Smrg# Midgard scales fsin/fcos arguments by pi.
1397ec681f3Smrg# Pass must be run only once, after the main loop
1407ec681f3Smrg
1417ec681f3Smrgscale_trig = [
1427ec681f3Smrg        (('fsin', a), ('fsin', ('fdiv', a, math.pi))),
1437ec681f3Smrg        (('fcos', a), ('fcos', ('fdiv', a, math.pi))),
1447ec681f3Smrg]
1457ec681f3Smrg
1467ec681f3Smrgdef main():
1477ec681f3Smrg    parser = argparse.ArgumentParser()
1487ec681f3Smrg    parser.add_argument('-p', '--import-path', required=True)
1497ec681f3Smrg    args = parser.parse_args()
1507ec681f3Smrg    sys.path.insert(0, args.import_path)
1517ec681f3Smrg    run()
1527ec681f3Smrg
1537ec681f3Smrg
1547ec681f3Smrgdef run():
1557ec681f3Smrg    import nir_algebraic  # pylint: disable=import-error
1567ec681f3Smrg
1577ec681f3Smrg    print('#include "midgard_nir.h"')
1587ec681f3Smrg
1597ec681f3Smrg    print(nir_algebraic.AlgebraicPass("midgard_nir_lower_algebraic_early",
1607ec681f3Smrg                                      algebraic).render())
1617ec681f3Smrg
1627ec681f3Smrg    print(nir_algebraic.AlgebraicPass("midgard_nir_lower_algebraic_late",
1637ec681f3Smrg                                      algebraic_late + converts + constant_switch).render())
1647ec681f3Smrg
1657ec681f3Smrg    print(nir_algebraic.AlgebraicPass("midgard_nir_scale_trig",
1667ec681f3Smrg                                      scale_trig).render())
1677ec681f3Smrg
1687ec681f3Smrg    print(nir_algebraic.AlgebraicPass("midgard_nir_cancel_inot",
1697ec681f3Smrg                                      cancel_inot).render())
1707ec681f3Smrg
1717ec681f3Smrg
1727ec681f3Smrgif __name__ == '__main__':
1737ec681f3Smrg    main()
174