Home | History | Annotate | Line # | Download | only in amdgpu
      1 /*	$NetBSD: amdgpu_pci.c,v 1.12 2023/08/07 16:34:47 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: amdgpu_pci.c,v 1.12 2023/08/07 16:34:47 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/atomic.h>
     37 #include <sys/queue.h>
     38 #include <sys/systm.h>
     39 #include <sys/workqueue.h>
     40 
     41 #include <dev/pci/pcivar.h>
     42 
     43 #include <linux/pci.h>
     44 
     45 #include <drm/drm_device.h>
     46 #include <drm/drm_drv.h>
     47 #include <drm/drm_fb_helper.h>
     48 #include <drm/drm_ioctl.h>
     49 #include <drm/drm_pci.h>
     50 
     51 #include <amdgpu.h>
     52 #include "amdgpu_drv.h"
     53 #include "amdgpu_task.h"
     54 
     55 struct drm_device;
     56 
     57 SIMPLEQ_HEAD(amdgpu_task_head, amdgpu_task);
     58 
     59 struct amdgpu_softc {
     60 	device_t			sc_dev;
     61 	struct pci_attach_args		sc_pa;
     62 	struct lwp			*sc_task_thread;
     63 	struct amdgpu_task_head		sc_tasks;
     64 	struct workqueue		*sc_task_wq;
     65 	struct drm_device		*sc_drm_dev;
     66 	struct pci_dev			sc_pci_dev;
     67 	bool				sc_pci_attached;
     68 	bool				sc_dev_registered;
     69 };
     70 
     71 static bool	amdgpu_pci_lookup(const struct pci_attach_args *,
     72 		    unsigned long *);
     73 
     74 static int	amdgpu_match(device_t, cfdata_t, void *);
     75 static void	amdgpu_attach(device_t, device_t, void *);
     76 static void	amdgpu_attach_real(device_t);
     77 static int	amdgpu_detach(device_t, int);
     78 static bool	amdgpu_do_suspend(device_t, const pmf_qual_t *);
     79 static bool	amdgpu_do_resume(device_t, const pmf_qual_t *);
     80 
     81 static void	amdgpu_task_work(struct work *, void *);
     82 
     83 CFATTACH_DECL_NEW(amdgpu, sizeof(struct amdgpu_softc),
     84     amdgpu_match, amdgpu_attach, amdgpu_detach, NULL);
     85 
     86 /* XXX Kludge to get these from amdgpu_drv.c.  */
     87 extern struct drm_driver *const amdgpu_drm_driver;
     88 extern const struct pci_device_id *const amdgpu_device_ids;
     89 extern const size_t amdgpu_n_device_ids;
     90 
     91 static bool
     92 amdgpu_pci_lookup(const struct pci_attach_args *pa, unsigned long *flags)
     93 {
     94 	size_t i;
     95 
     96 	for (i = 0; i < amdgpu_n_device_ids; i++) {
     97 		if ((PCI_VENDOR(pa->pa_id) == amdgpu_device_ids[i].vendor) &&
     98 		    (PCI_PRODUCT(pa->pa_id) == amdgpu_device_ids[i].device))
     99 			break;
    100 	}
    101 
    102 	/* Did we find it?  */
    103 	if (i == amdgpu_n_device_ids)
    104 		return false;
    105 
    106 	if (flags)
    107 		*flags = amdgpu_device_ids[i].driver_data;
    108 	return true;
    109 }
    110 
    111 static int
    112 amdgpu_match(device_t parent, cfdata_t match, void *aux)
    113 {
    114 	extern int amdgpu_guarantee_initialized(void);
    115 	const struct pci_attach_args *const pa = aux;
    116 	int error;
    117 
    118 	error = amdgpu_guarantee_initialized();
    119 	if (error) {
    120 		aprint_error("amdgpu: failed to initialize: %d\n", error);
    121 		return 0;
    122 	}
    123 
    124 	if (!amdgpu_pci_lookup(pa, NULL))
    125 		return 0;
    126 
    127 	return 7;		/* beat genfb_pci and radeon  */
    128 }
    129 
    130 static void
    131 amdgpu_attach(device_t parent, device_t self, void *aux)
    132 {
    133 	struct amdgpu_softc *const sc = device_private(self);
    134 	const struct pci_attach_args *const pa = aux;
    135 	int error;
    136 
    137 	pci_aprint_devinfo(pa, NULL);
    138 
    139 	/* Initialize the Linux PCI device descriptor.  */
    140 	linux_pci_dev_init(&sc->sc_pci_dev, self, device_parent(self), pa, 0);
    141 
    142 	sc->sc_dev = self;
    143 	sc->sc_pa = *pa;
    144 	sc->sc_task_thread = NULL;
    145 	SIMPLEQ_INIT(&sc->sc_tasks);
    146 	error = workqueue_create(&sc->sc_task_wq, "amdgpufb",
    147 	    &amdgpu_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE);
    148 	if (error) {
    149 		aprint_error_dev(self, "unable to create workqueue: %d\n",
    150 		    error);
    151 		sc->sc_task_wq = NULL;
    152 		return;
    153 	}
    154 
    155 	/*
    156 	 * Defer the remainder of initialization until we have mounted
    157 	 * the root file system and can load firmware images.
    158 	 */
    159 	config_mountroot(self, &amdgpu_attach_real);
    160 }
    161 
    162 static void
    163 amdgpu_attach_real(device_t self)
    164 {
    165 	struct amdgpu_softc *const sc = device_private(self);
    166 	const struct pci_attach_args *const pa = &sc->sc_pa;
    167 	bool ok __diagused;
    168 	unsigned long flags = 0; /* XXXGCC */
    169 	int error;
    170 
    171 	ok = amdgpu_pci_lookup(pa, &flags);
    172 	KASSERT(ok);
    173 
    174 	/*
    175 	 * Cause any tasks issued synchronously during attach to be
    176 	 * processed at the end of this function.
    177 	 */
    178 	sc->sc_task_thread = curlwp;
    179 
    180 	sc->sc_drm_dev = drm_dev_alloc(amdgpu_drm_driver, self);
    181 	if (IS_ERR(sc->sc_drm_dev)) {
    182 		aprint_error_dev(self, "unable to create drm device: %ld\n",
    183 		    PTR_ERR(sc->sc_drm_dev));
    184 		sc->sc_drm_dev = NULL;
    185 		goto out;
    186 	}
    187 
    188 	/* XXX errno Linux->NetBSD */
    189 	error = -drm_pci_attach(sc->sc_drm_dev, &sc->sc_pci_dev);
    190 	if (error) {
    191 		aprint_error_dev(self, "unable to attach drm: %d\n", error);
    192 		goto out;
    193 	}
    194 	sc->sc_pci_attached = true;
    195 
    196 	/* XXX errno Linux->NetBSD */
    197 	error = -drm_dev_register(sc->sc_drm_dev, flags);
    198 	if (error) {
    199 		aprint_error_dev(self, "unable to register drm: %d\n", error);
    200 		goto out;
    201 	}
    202 	sc->sc_dev_registered = true;
    203 
    204 	if (!pmf_device_register(self, &amdgpu_do_suspend, &amdgpu_do_resume))
    205 		aprint_error_dev(self, "unable to establish power handler\n");
    206 
    207 	/*
    208 	 * Process asynchronous tasks queued synchronously during
    209 	 * attach.  This will be for display detection to attach a
    210 	 * framebuffer, so we have the opportunity for a console device
    211 	 * to attach before autoconf has completed, in time for init(8)
    212 	 * to find that console without panicking.
    213 	 */
    214 	while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
    215 		struct amdgpu_task *const task = SIMPLEQ_FIRST(&sc->sc_tasks);
    216 
    217 		SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, rt_u.queue);
    218 		(*task->rt_fn)(task);
    219 	}
    220 
    221 out:	/* Cause any subesquent tasks to be processed by the workqueue.  */
    222 	atomic_store_relaxed(&sc->sc_task_thread, NULL);
    223 }
    224 
    225 static int
    226 amdgpu_detach(device_t self, int flags)
    227 {
    228 	struct amdgpu_softc *const sc = device_private(self);
    229 	int error;
    230 
    231 	/* XXX Check for in-use before tearing it all down...  */
    232 	error = config_detach_children(self, flags);
    233 	if (error)
    234 		return error;
    235 
    236 	KASSERT(sc->sc_task_thread == NULL);
    237 	KASSERT(SIMPLEQ_EMPTY(&sc->sc_tasks));
    238 
    239 	pmf_device_deregister(self);
    240 	if (sc->sc_dev_registered)
    241 		drm_dev_unregister(sc->sc_drm_dev);
    242 	if (sc->sc_pci_attached)
    243 		drm_pci_detach(sc->sc_drm_dev);
    244 	if (sc->sc_drm_dev) {
    245 		drm_dev_put(sc->sc_drm_dev);
    246 		sc->sc_drm_dev = NULL;
    247 	}
    248 	if (sc->sc_task_wq) {
    249 		workqueue_destroy(sc->sc_task_wq);
    250 		sc->sc_task_wq = NULL;
    251 	}
    252 	linux_pci_dev_destroy(&sc->sc_pci_dev);
    253 
    254 	return 0;
    255 }
    256 
    257 static bool
    258 amdgpu_do_suspend(device_t self, const pmf_qual_t *qual)
    259 {
    260 	struct amdgpu_softc *const sc = device_private(self);
    261 	struct drm_device *const dev = sc->sc_drm_dev;
    262 	int ret;
    263 
    264 	drm_suspend_ioctl(dev);
    265 
    266 	ret = amdgpu_device_suspend(dev, /*fbcon*/true);
    267 	if (ret)
    268 		return false;
    269 
    270 	return true;
    271 }
    272 
    273 static bool
    274 amdgpu_do_resume(device_t self, const pmf_qual_t *qual)
    275 {
    276 	struct amdgpu_softc *const sc = device_private(self);
    277 	struct drm_device *const dev = sc->sc_drm_dev;
    278 	int ret;
    279 
    280 	ret = amdgpu_device_resume(dev, /*fbcon*/true);
    281 	if (ret)
    282 		goto out;
    283 
    284 out:	drm_resume_ioctl(dev);
    285 	return ret == 0;
    286 }
    287 
    288 static void
    289 amdgpu_task_work(struct work *work, void *cookie __unused)
    290 {
    291 	struct amdgpu_task *const task = container_of(work, struct amdgpu_task,
    292 	    rt_u.work);
    293 
    294 	(*task->rt_fn)(task);
    295 }
    296 
    297 void
    298 amdgpu_task_schedule(device_t self, struct amdgpu_task *task)
    299 {
    300 	struct amdgpu_softc *const sc = device_private(self);
    301 
    302 	if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
    303 		SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, rt_u.queue);
    304 	else
    305 		workqueue_enqueue(sc->sc_task_wq, &task->rt_u.work, NULL);
    306 }
    307