Lines Matching refs:uvd
79 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
144 rdev->uvd.fw_header_present = false;
145 rdev->uvd.max_handles = RADEON_DEFAULT_UVD_HANDLES;
160 rdev->uvd.fw_header_present = true;
165 DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
169 * Limit the number of UVD handles depending on
173 rdev->uvd.max_handles = RADEON_MAX_UVD_HANDLES;
193 RADEON_UVD_SESSION_SIZE * rdev->uvd.max_handles;
196 NULL, &rdev->uvd.vcpu_bo);
198 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
202 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
204 radeon_bo_unref(&rdev->uvd.vcpu_bo);
205 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
209 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
210 &rdev->uvd.gpu_addr);
212 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
213 radeon_bo_unref(&rdev->uvd.vcpu_bo);
214 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
218 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
220 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
224 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
226 for (i = 0; i < rdev->uvd.max_handles; ++i) {
227 atomic_set(&rdev->uvd.handles[i], 0);
228 rdev->uvd.filp[i] = NULL;
229 rdev->uvd.img_size[i] = 0;
239 if (rdev->uvd.vcpu_bo == NULL)
242 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
244 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
245 radeon_bo_unpin(rdev->uvd.vcpu_bo);
246 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
249 radeon_bo_unref(&rdev->uvd.vcpu_bo);
260 if (rdev->uvd.vcpu_bo == NULL)
263 for (i = 0; i < rdev->uvd.max_handles; ++i) {
264 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
273 DRM_ERROR("Error destroying UVD (%d)!\n", r);
280 rdev->uvd.filp[i] = NULL;
281 atomic_set(&rdev->uvd.handles[i], 0);
293 if (rdev->uvd.vcpu_bo == NULL)
296 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
298 size = radeon_bo_size(rdev->uvd.vcpu_bo);
301 ptr = rdev->uvd.cpu_addr;
338 for (i = 0; i < rdev->uvd.max_handles; ++i) {
339 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
340 if (handle != 0 && rdev->uvd.filp[i] == filp) {
348 DRM_ERROR("Error destroying UVD (%d)!\n", r);
355 rdev->uvd.filp[i] = NULL;
356 atomic_set(&rdev->uvd.handles[i], 0);
429 DRM_ERROR("UVD codec not handled %d!\n", stream_type);
434 DRM_ERROR("Invalid UVD decoding target pitch!\n");
439 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
460 /* only since UVD 3 */
466 DRM_ERROR("UVD codec not supported by hardware %d!\n",
483 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
491 DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
498 DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
508 DRM_ERROR("Invalid UVD handle!\n");
523 for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
524 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
529 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
530 p->rdev->uvd.filp[i] = p->filp;
531 p->rdev->uvd.img_size[i] = img_size;
536 DRM_ERROR("No more free UVD handles!\n");
549 for (i = 0; i < p->rdev->uvd.max_handles; ++i) {
550 if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
551 if (p->rdev->uvd.filp[i] != p->filp) {
552 DRM_ERROR("UVD handle collision detected!\n");
559 DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
564 for (i = 0; i < p->rdev->uvd.max_handles; ++i)
565 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
571 DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
620 DRM_ERROR("invalid UVD command %X!\n", cmd);
632 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
640 DRM_ERROR("More than one message in a UVD-IB!\n");
708 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
740 DRM_ERROR("UVD-IBs need a msg command!\n");
787 /* we use the last page of the vcpu bo for the UVD message */
788 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
791 uint32_t *msg = rdev->uvd.cpu_addr + offs;
792 uint64_t addr = rdev->uvd.gpu_addr + offs;
796 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
800 /* stitch together an UVD create msg */
816 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
823 /* we use the last page of the vcpu bo for the UVD message */
824 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
827 uint32_t *msg = rdev->uvd.cpu_addr + offs;
828 uint64_t addr = rdev->uvd.gpu_addr + offs;
832 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
836 /* stitch together an UVD destroy msg */
845 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
866 for (i = 0; i < rdev->uvd.max_handles; ++i) {
867 if (!atomic_read(&rdev->uvd.handles[i]))
870 if (rdev->uvd.img_size[i] >= 720*576)
880 container_of(work, struct radeon_device, uvd.idle_work.work);
891 schedule_delayed_work(&rdev->uvd.idle_work,
899 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
900 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
1054 DRM_ERROR("Timeout setting UVD clocks!\n");