Home | History | Annotate | Line # | Download | only in falcon
      1 /*	$NetBSD: nouveau_nvkm_falcon_cmdq.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice shall be included in
     14  * all copies or substantial portions of the Software.
     15  *
     16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
     20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
     21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
     22  * OTHER DEALINGS IN THE SOFTWARE.
     23  *
     24  */
     25 #include <sys/cdefs.h>
     26 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_falcon_cmdq.c,v 1.4 2021/12/19 11:34:45 riastradh Exp $");
     27 
     28 #include "qmgr.h"
     29 
     30 #include <linux/nbsd-namespace.h>
     31 
     32 static bool
     33 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind)
     34 {
     35 	u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg);
     36 	u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg);
     37 	u32 free;
     38 
     39 	size = ALIGN(size, QUEUE_ALIGNMENT);
     40 
     41 	if (head >= tail) {
     42 		free = cmdq->offset + cmdq->size - head;
     43 		free -= HDR_SIZE;
     44 
     45 		if (size > free) {
     46 			*rewind = true;
     47 			head = cmdq->offset;
     48 		}
     49 	}
     50 
     51 	if (head < tail)
     52 		free = tail - head - 1;
     53 
     54 	return size <= free;
     55 }
     56 
     57 static void
     58 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size)
     59 {
     60 	struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
     61 	nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0);
     62 	cmdq->position += ALIGN(size, QUEUE_ALIGNMENT);
     63 }
     64 
     65 static void
     66 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq)
     67 {
     68 	struct nv_falcon_cmd cmd;
     69 
     70 	cmd.unit_id = NV_FALCON_CMD_UNIT_ID_REWIND;
     71 	cmd.size = sizeof(cmd);
     72 	nvkm_falcon_cmdq_push(cmdq, &cmd, cmd.size);
     73 
     74 	cmdq->position = cmdq->offset;
     75 }
     76 
     77 static int
     78 nvkm_falcon_cmdq_open(struct nvkm_falcon_cmdq *cmdq, u32 size)
     79 {
     80 	struct nvkm_falcon *falcon = cmdq->qmgr->falcon;
     81 	bool rewind = false;
     82 
     83 	mutex_lock(&cmdq->mutex);
     84 
     85 	if (!nvkm_falcon_cmdq_has_room(cmdq, size, &rewind)) {
     86 		FLCNQ_DBG(cmdq, "queue full");
     87 		mutex_unlock(&cmdq->mutex);
     88 		return -EAGAIN;
     89 	}
     90 
     91 	cmdq->position = nvkm_falcon_rd32(falcon, cmdq->head_reg);
     92 
     93 	if (rewind)
     94 		nvkm_falcon_cmdq_rewind(cmdq);
     95 
     96 	return 0;
     97 }
     98 
     99 static void
    100 nvkm_falcon_cmdq_close(struct nvkm_falcon_cmdq *cmdq)
    101 {
    102 	nvkm_falcon_wr32(cmdq->qmgr->falcon, cmdq->head_reg, cmdq->position);
    103 	mutex_unlock(&cmdq->mutex);
    104 }
    105 
    106 static int
    107 nvkm_falcon_cmdq_write(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd)
    108 {
    109 	static unsigned timeout = 2000;
    110 	unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
    111 	int ret = -EAGAIN;
    112 
    113 	while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
    114 		ret = nvkm_falcon_cmdq_open(cmdq, cmd->size);
    115 	if (ret) {
    116 		FLCNQ_ERR(cmdq, "timeout waiting for queue space");
    117 		return ret;
    118 	}
    119 
    120 	nvkm_falcon_cmdq_push(cmdq, cmd, cmd->size);
    121 	nvkm_falcon_cmdq_close(cmdq);
    122 	return ret;
    123 }
    124 
    125 /* specifies that we want to know the command status in the answer message */
    126 #define CMD_FLAGS_STATUS BIT(0)
    127 /* specifies that we want an interrupt when the answer message is queued */
    128 #define CMD_FLAGS_INTR BIT(1)
    129 
    130 int
    131 nvkm_falcon_cmdq_send(struct nvkm_falcon_cmdq *cmdq, struct nv_falcon_cmd *cmd,
    132 		      nvkm_falcon_qmgr_callback cb, void *priv,
    133 		      unsigned long timeout)
    134 {
    135 	struct nvkm_falcon_qmgr_seq *seq;
    136 	int ret;
    137 
    138 	if (!wait_for_completion_timeout(&cmdq->ready,
    139 					 msecs_to_jiffies(1000))) {
    140 		FLCNQ_ERR(cmdq, "timeout waiting for queue ready");
    141 		return -ETIMEDOUT;
    142 	}
    143 
    144 	seq = nvkm_falcon_qmgr_seq_acquire(cmdq->qmgr);
    145 	if (IS_ERR(seq))
    146 		return PTR_ERR(seq);
    147 
    148 	cmd->seq_id = seq->id;
    149 	cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
    150 
    151 	seq->state = SEQ_STATE_USED;
    152 	seq->async = !timeout;
    153 	seq->callback = cb;
    154 	seq->priv = priv;
    155 
    156 	ret = nvkm_falcon_cmdq_write(cmdq, cmd);
    157 	if (ret) {
    158 		seq->state = SEQ_STATE_PENDING;
    159 		nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
    160 		return ret;
    161 	}
    162 
    163 	if (!seq->async) {
    164 		if (!wait_for_completion_timeout(&seq->done, timeout)) {
    165 			FLCNQ_ERR(cmdq, "timeout waiting for reply");
    166 			return -ETIMEDOUT;
    167 		}
    168 		ret = seq->result;
    169 		nvkm_falcon_qmgr_seq_release(cmdq->qmgr, seq);
    170 	}
    171 
    172 	return ret;
    173 }
    174 
    175 void
    176 nvkm_falcon_cmdq_fini(struct nvkm_falcon_cmdq *cmdq)
    177 {
    178 	reinit_completion(&cmdq->ready);
    179 }
    180 
    181 void
    182 nvkm_falcon_cmdq_init(struct nvkm_falcon_cmdq *cmdq,
    183 		      u32 index, u32 offset, u32 size)
    184 {
    185 	const struct nvkm_falcon_func *func = cmdq->qmgr->falcon->func;
    186 
    187 	cmdq->head_reg = func->cmdq.head + index * func->cmdq.stride;
    188 	cmdq->tail_reg = func->cmdq.tail + index * func->cmdq.stride;
    189 	cmdq->offset = offset;
    190 	cmdq->size = size;
    191 	complete_all(&cmdq->ready);
    192 
    193 	FLCNQ_DBG(cmdq, "initialised @ index %d offset 0x%08x size 0x%08x",
    194 		  index, cmdq->offset, cmdq->size);
    195 }
    196 
    197 void
    198 nvkm_falcon_cmdq_del(struct nvkm_falcon_cmdq **pcmdq)
    199 {
    200 	struct nvkm_falcon_cmdq *cmdq = *pcmdq;
    201 	if (cmdq) {
    202 		destroy_completion(&cmdq->ready);
    203 		mutex_destroy(&cmdq->mutex);
    204 		kfree(*pcmdq);
    205 		*pcmdq = NULL;
    206 	}
    207 }
    208 
    209 int
    210 nvkm_falcon_cmdq_new(struct nvkm_falcon_qmgr *qmgr, const char *name,
    211 		     struct nvkm_falcon_cmdq **pcmdq)
    212 {
    213 	struct nvkm_falcon_cmdq *cmdq = *pcmdq;
    214 
    215 	if (!(cmdq = *pcmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL)))
    216 		return -ENOMEM;
    217 
    218 	cmdq->qmgr = qmgr;
    219 	cmdq->name = name;
    220 	mutex_init(&cmdq->mutex);
    221 	init_completion(&cmdq->ready);
    222 	return 0;
    223 }
    224