17ec681f3Smrg/* 27ec681f3Smrg * Copyright © 2021 Google, Inc. 37ec681f3Smrg * 47ec681f3Smrg * Permission is hereby granted, free of charge, to any person obtaining a 57ec681f3Smrg * copy of this software and associated documentation files (the "Software"), 67ec681f3Smrg * to deal in the Software without restriction, including without limitation 77ec681f3Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 87ec681f3Smrg * and/or sell copies of the Software, and to permit persons to whom the 97ec681f3Smrg * Software is furnished to do so, subject to the following conditions: 107ec681f3Smrg * 117ec681f3Smrg * The above copyright notice and this permission notice (including the next 127ec681f3Smrg * paragraph) shall be included in all copies or substantial portions of the 137ec681f3Smrg * Software. 147ec681f3Smrg * 157ec681f3Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 167ec681f3Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 177ec681f3Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 187ec681f3Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 197ec681f3Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 207ec681f3Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 217ec681f3Smrg * SOFTWARE. 227ec681f3Smrg */ 237ec681f3Smrg 247ec681f3Smrg#ifdef X 257ec681f3Smrg#undef X 267ec681f3Smrg#endif 277ec681f3Smrg 287ec681f3Smrg#if PTRSZ == 32 297ec681f3Smrg#define X(n) n##_32 307ec681f3Smrg#else 317ec681f3Smrg#define X(n) n##_64 327ec681f3Smrg#endif 337ec681f3Smrg 347ec681f3Smrgstatic void X(emit_reloc_common)(struct fd_ringbuffer *ring, 357ec681f3Smrg const struct fd_reloc *reloc) 367ec681f3Smrg{ 377ec681f3Smrg (*ring->cur++) = (uint32_t)reloc->iova; 387ec681f3Smrg#if PTRSZ == 64 397ec681f3Smrg (*ring->cur++) = (uint32_t)(reloc->iova >> 32); 407ec681f3Smrg#endif 417ec681f3Smrg} 427ec681f3Smrg 437ec681f3Smrgstatic void X(msm_ringbuffer_sp_emit_reloc_nonobj)(struct fd_ringbuffer *ring, 447ec681f3Smrg const struct fd_reloc *reloc) 457ec681f3Smrg{ 467ec681f3Smrg X(emit_reloc_common)(ring, reloc); 477ec681f3Smrg 487ec681f3Smrg assert(!(ring->flags & _FD_RINGBUFFER_OBJECT)); 497ec681f3Smrg 507ec681f3Smrg struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring); 517ec681f3Smrg 527ec681f3Smrg struct msm_submit_sp *msm_submit = to_msm_submit_sp(msm_ring->u.submit); 537ec681f3Smrg 547ec681f3Smrg msm_submit_append_bo(msm_submit, reloc->bo); 557ec681f3Smrg} 567ec681f3Smrg 577ec681f3Smrgstatic void X(msm_ringbuffer_sp_emit_reloc_obj)(struct fd_ringbuffer *ring, 587ec681f3Smrg const struct fd_reloc *reloc) 597ec681f3Smrg{ 607ec681f3Smrg X(emit_reloc_common)(ring, reloc); 617ec681f3Smrg 627ec681f3Smrg assert(ring->flags & _FD_RINGBUFFER_OBJECT); 637ec681f3Smrg 647ec681f3Smrg struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring); 657ec681f3Smrg 667ec681f3Smrg /* Avoid emitting duplicate BO references into the list. Ringbuffer 677ec681f3Smrg * objects are long-lived, so this saves ongoing work at draw time in 687ec681f3Smrg * exchange for a bit at context setup/first draw. And the number of 697ec681f3Smrg * relocs per ringbuffer object is fairly small, so the O(n^2) doesn't 707ec681f3Smrg * hurt much. 717ec681f3Smrg */ 727ec681f3Smrg if (!msm_ringbuffer_references_bo(ring, reloc->bo)) { 737ec681f3Smrg APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(reloc->bo)); 747ec681f3Smrg } 757ec681f3Smrg} 767ec681f3Smrg 777ec681f3Smrgstatic uint32_t X(msm_ringbuffer_sp_emit_reloc_ring)( 787ec681f3Smrg struct fd_ringbuffer *ring, struct fd_ringbuffer *target, uint32_t cmd_idx) 797ec681f3Smrg{ 807ec681f3Smrg struct msm_ringbuffer_sp *msm_target = to_msm_ringbuffer_sp(target); 817ec681f3Smrg struct fd_bo *bo; 827ec681f3Smrg uint32_t size; 837ec681f3Smrg 847ec681f3Smrg if ((target->flags & FD_RINGBUFFER_GROWABLE) && 857ec681f3Smrg (cmd_idx < msm_target->u.nr_cmds)) { 867ec681f3Smrg bo = msm_target->u.cmds[cmd_idx].ring_bo; 877ec681f3Smrg size = msm_target->u.cmds[cmd_idx].size; 887ec681f3Smrg } else { 897ec681f3Smrg bo = msm_target->ring_bo; 907ec681f3Smrg size = offset_bytes(target->cur, target->start); 917ec681f3Smrg } 927ec681f3Smrg 937ec681f3Smrg if (ring->flags & _FD_RINGBUFFER_OBJECT) { 947ec681f3Smrg X(msm_ringbuffer_sp_emit_reloc_obj)(ring, &(struct fd_reloc){ 957ec681f3Smrg .bo = bo, 967ec681f3Smrg .iova = bo->iova + msm_target->offset, 977ec681f3Smrg .offset = msm_target->offset, 987ec681f3Smrg }); 997ec681f3Smrg } else { 1007ec681f3Smrg X(msm_ringbuffer_sp_emit_reloc_nonobj)(ring, &(struct fd_reloc){ 1017ec681f3Smrg .bo = bo, 1027ec681f3Smrg .iova = bo->iova + msm_target->offset, 1037ec681f3Smrg .offset = msm_target->offset, 1047ec681f3Smrg }); 1057ec681f3Smrg } 1067ec681f3Smrg 1077ec681f3Smrg if (!(target->flags & _FD_RINGBUFFER_OBJECT)) 1087ec681f3Smrg return size; 1097ec681f3Smrg 1107ec681f3Smrg struct msm_ringbuffer_sp *msm_ring = to_msm_ringbuffer_sp(ring); 1117ec681f3Smrg 1127ec681f3Smrg if (ring->flags & _FD_RINGBUFFER_OBJECT) { 1137ec681f3Smrg for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) { 1147ec681f3Smrg struct fd_bo *target_bo = msm_target->u.reloc_bos[i]; 1157ec681f3Smrg if (!msm_ringbuffer_references_bo(ring, target_bo)) 1167ec681f3Smrg APPEND(&msm_ring->u, reloc_bos, fd_bo_ref(target_bo)); 1177ec681f3Smrg } 1187ec681f3Smrg } else { 1197ec681f3Smrg // TODO it would be nice to know whether we have already 1207ec681f3Smrg // seen this target before. But hopefully we hit the 1217ec681f3Smrg // append_bo() fast path enough for this to not matter: 1227ec681f3Smrg struct msm_submit_sp *msm_submit = to_msm_submit_sp(msm_ring->u.submit); 1237ec681f3Smrg 1247ec681f3Smrg for (unsigned i = 0; i < msm_target->u.nr_reloc_bos; i++) { 1257ec681f3Smrg msm_submit_append_bo(msm_submit, msm_target->u.reloc_bos[i]); 1267ec681f3Smrg } 1277ec681f3Smrg } 1287ec681f3Smrg 1297ec681f3Smrg return size; 1307ec681f3Smrg} 131