Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_pci.c revision 1.10
      1 /*	$NetBSD: amdgpu_pci.c,v 1.10 2021/12/19 12:28:12 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: amdgpu_pci.c,v 1.10 2021/12/19 12:28:12 riastradh Exp $");
     34 
     35 #include <sys/types.h>
     36 #include <sys/atomic.h>
     37 #include <sys/queue.h>
     38 #include <sys/systm.h>
     39 #include <sys/workqueue.h>
     40 
     41 #include <dev/pci/pcivar.h>
     42 
     43 #include <linux/pci.h>
     44 
     45 #include <drm/drm_device.h>
     46 #include <drm/drm_drv.h>
     47 #include <drm/drm_fb_helper.h>
     48 #include <drm/drm_pci.h>
     49 
     50 #include <amdgpu.h>
     51 #include "amdgpu_drv.h"
     52 #include "amdgpu_task.h"
     53 
     54 struct drm_device;
     55 
     56 SIMPLEQ_HEAD(amdgpu_task_head, amdgpu_task);
     57 
     58 struct amdgpu_softc {
     59 	device_t			sc_dev;
     60 	struct pci_attach_args		sc_pa;
     61 	struct lwp			*sc_task_thread;
     62 	struct amdgpu_task_head		sc_tasks;
     63 	struct workqueue		*sc_task_wq;
     64 	struct drm_device		*sc_drm_dev;
     65 	struct pci_dev			sc_pci_dev;
     66 	bool				sc_pci_attached;
     67 	bool				sc_dev_registered;
     68 };
     69 
     70 static bool	amdgpu_pci_lookup(const struct pci_attach_args *,
     71 		    unsigned long *);
     72 
     73 static int	amdgpu_match(device_t, cfdata_t, void *);
     74 static void	amdgpu_attach(device_t, device_t, void *);
     75 static void	amdgpu_attach_real(device_t);
     76 static int	amdgpu_detach(device_t, int);
     77 static bool	amdgpu_do_suspend(device_t, const pmf_qual_t *);
     78 static bool	amdgpu_do_resume(device_t, const pmf_qual_t *);
     79 
     80 static void	amdgpu_task_work(struct work *, void *);
     81 
     82 CFATTACH_DECL_NEW(amdgpu, sizeof(struct amdgpu_softc),
     83     amdgpu_match, amdgpu_attach, amdgpu_detach, NULL);
     84 
     85 /* XXX Kludge to get these from amdgpu_drv.c.  */
     86 extern struct drm_driver *const amdgpu_drm_driver;
     87 extern const struct pci_device_id *const amdgpu_device_ids;
     88 extern const size_t amdgpu_n_device_ids;
     89 
     90 static bool
     91 amdgpu_pci_lookup(const struct pci_attach_args *pa, unsigned long *flags)
     92 {
     93 	size_t i;
     94 
     95 	for (i = 0; i < amdgpu_n_device_ids; i++) {
     96 		if ((PCI_VENDOR(pa->pa_id) == amdgpu_device_ids[i].vendor) &&
     97 		    (PCI_PRODUCT(pa->pa_id) == amdgpu_device_ids[i].device))
     98 			break;
     99 	}
    100 
    101 	/* Did we find it?  */
    102 	if (i == amdgpu_n_device_ids)
    103 		return false;
    104 
    105 	if (flags)
    106 		*flags = amdgpu_device_ids[i].driver_data;
    107 	return true;
    108 }
    109 
    110 static int
    111 amdgpu_match(device_t parent, cfdata_t match, void *aux)
    112 {
    113 	extern int amdgpu_guarantee_initialized(void);
    114 	const struct pci_attach_args *const pa = aux;
    115 	int error;
    116 
    117 	error = amdgpu_guarantee_initialized();
    118 	if (error) {
    119 		aprint_error("amdgpu: failed to initialize: %d\n", error);
    120 		return 0;
    121 	}
    122 
    123 	if (!amdgpu_pci_lookup(pa, NULL))
    124 		return 0;
    125 
    126 	return 7;		/* beat genfb_pci and radeon  */
    127 }
    128 
    129 static void
    130 amdgpu_attach(device_t parent, device_t self, void *aux)
    131 {
    132 	struct amdgpu_softc *const sc = device_private(self);
    133 	const struct pci_attach_args *const pa = aux;
    134 	int error;
    135 
    136 	pci_aprint_devinfo(pa, NULL);
    137 
    138 	/* Initialize the Linux PCI device descriptor.  */
    139 	linux_pci_dev_init(&sc->sc_pci_dev, self, device_parent(self), pa, 0);
    140 
    141 	sc->sc_dev = self;
    142 	sc->sc_pa = *pa;
    143 	sc->sc_task_thread = NULL;
    144 	SIMPLEQ_INIT(&sc->sc_tasks);
    145 	error = workqueue_create(&sc->sc_task_wq, "amdgpufb",
    146 	    &amdgpu_task_work, NULL, PRI_NONE, IPL_NONE, WQ_MPSAFE);
    147 	if (error) {
    148 		aprint_error_dev(self, "unable to create workqueue: %d\n",
    149 		    error);
    150 		sc->sc_task_wq = NULL;
    151 		return;
    152 	}
    153 
    154 	/*
    155 	 * Defer the remainder of initialization until we have mounted
    156 	 * the root file system and can load firmware images.
    157 	 */
    158 	config_mountroot(self, &amdgpu_attach_real);
    159 }
    160 
    161 static void
    162 amdgpu_attach_real(device_t self)
    163 {
    164 	struct amdgpu_softc *const sc = device_private(self);
    165 	const struct pci_attach_args *const pa = &sc->sc_pa;
    166 	bool ok __diagused;
    167 	unsigned long flags = 0; /* XXXGCC */
    168 	int error;
    169 
    170 	ok = amdgpu_pci_lookup(pa, &flags);
    171 	KASSERT(ok);
    172 
    173 	/*
    174 	 * Cause any tasks issued synchronously during attach to be
    175 	 * processed at the end of this function.
    176 	 */
    177 	sc->sc_task_thread = curlwp;
    178 
    179 	sc->sc_drm_dev = drm_dev_alloc(amdgpu_drm_driver, self);
    180 	if (IS_ERR(sc->sc_drm_dev)) {
    181 		aprint_error_dev(self, "unable to create drm device: %ld\n",
    182 		    PTR_ERR(sc->sc_drm_dev));
    183 		sc->sc_drm_dev = NULL;
    184 		goto out;
    185 	}
    186 
    187 	/* XXX errno Linux->NetBSD */
    188 	error = -drm_pci_attach(sc->sc_drm_dev, &sc->sc_pci_dev);
    189 	if (error) {
    190 		aprint_error_dev(self, "unable to attach drm: %d\n", error);
    191 		goto out;
    192 	}
    193 	sc->sc_pci_attached = true;
    194 
    195 	/* XXX errno Linux->NetBSD */
    196 	error = -drm_dev_register(sc->sc_drm_dev, flags);
    197 	if (error) {
    198 		aprint_error_dev(self, "unable to register drm: %d\n", error);
    199 		goto out;
    200 	}
    201 	sc->sc_dev_registered = true;
    202 
    203 	if (!pmf_device_register(self, &amdgpu_do_suspend, &amdgpu_do_resume))
    204 		aprint_error_dev(self, "unable to establish power handler\n");
    205 
    206 	/*
    207 	 * Process asynchronous tasks queued synchronously during
    208 	 * attach.  This will be for display detection to attach a
    209 	 * framebuffer, so we have the opportunity for a console device
    210 	 * to attach before autoconf has completed, in time for init(8)
    211 	 * to find that console without panicking.
    212 	 */
    213 	while (!SIMPLEQ_EMPTY(&sc->sc_tasks)) {
    214 		struct amdgpu_task *const task = SIMPLEQ_FIRST(&sc->sc_tasks);
    215 
    216 		SIMPLEQ_REMOVE_HEAD(&sc->sc_tasks, rt_u.queue);
    217 		(*task->rt_fn)(task);
    218 	}
    219 
    220 out:	/* Cause any subesquent tasks to be processed by the workqueue.  */
    221 	atomic_store_relaxed(&sc->sc_task_thread, NULL);
    222 }
    223 
    224 static int
    225 amdgpu_detach(device_t self, int flags)
    226 {
    227 	struct amdgpu_softc *const sc = device_private(self);
    228 	int error;
    229 
    230 	/* XXX Check for in-use before tearing it all down...  */
    231 	error = config_detach_children(self, flags);
    232 	if (error)
    233 		return error;
    234 
    235 	KASSERT(sc->sc_task_thread == NULL);
    236 	KASSERT(SIMPLEQ_EMPTY(&sc->sc_tasks));
    237 
    238 	pmf_device_deregister(self);
    239 	if (sc->sc_dev_registered)
    240 		drm_dev_unregister(sc->sc_drm_dev);
    241 	if (sc->sc_pci_attached)
    242 		drm_pci_detach(sc->sc_drm_dev);
    243 	if (sc->sc_drm_dev) {
    244 		drm_dev_put(sc->sc_drm_dev);
    245 		sc->sc_drm_dev = NULL;
    246 	}
    247 	if (sc->sc_task_wq) {
    248 		workqueue_destroy(sc->sc_task_wq);
    249 		sc->sc_task_wq = NULL;
    250 	}
    251 	linux_pci_dev_destroy(&sc->sc_pci_dev);
    252 
    253 	return 0;
    254 }
    255 
    256 static bool
    257 amdgpu_do_suspend(device_t self, const pmf_qual_t *qual)
    258 {
    259 	struct amdgpu_softc *const sc = device_private(self);
    260 	struct drm_device *const dev = sc->sc_drm_dev;
    261 	int ret;
    262 
    263 	ret = amdgpu_device_suspend(dev, /*fbcon*/true);
    264 	if (ret)
    265 		return false;
    266 
    267 	return true;
    268 }
    269 
    270 static bool
    271 amdgpu_do_resume(device_t self, const pmf_qual_t *qual)
    272 {
    273 	struct amdgpu_softc *const sc = device_private(self);
    274 	struct drm_device *const dev = sc->sc_drm_dev;
    275 	int ret;
    276 
    277 	ret = amdgpu_device_resume(dev, /*fbcon*/true);
    278 	if (ret)
    279 		return false;
    280 
    281 	return true;
    282 }
    283 
    284 static void
    285 amdgpu_task_work(struct work *work, void *cookie __unused)
    286 {
    287 	struct amdgpu_task *const task = container_of(work, struct amdgpu_task,
    288 	    rt_u.work);
    289 
    290 	(*task->rt_fn)(task);
    291 }
    292 
    293 int
    294 amdgpu_task_schedule(device_t self, struct amdgpu_task *task)
    295 {
    296 	struct amdgpu_softc *const sc = device_private(self);
    297 
    298 	if (atomic_load_relaxed(&sc->sc_task_thread) == curlwp)
    299 		SIMPLEQ_INSERT_TAIL(&sc->sc_tasks, task, rt_u.queue);
    300 	else
    301 		workqueue_enqueue(sc->sc_task_wq, &task->rt_u.work, NULL);
    302 
    303 	return 0;
    304 }
    305