/src/sys/external/bsd/drm2/dist/drm/amd/amdkfd/ |
kfd_device_queue_manager.c | 47 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, 50 static int execute_queues_cpsch(struct device_queue_manager *dqm, 53 static int unmap_queues_cpsch(struct device_queue_manager *dqm, 57 static int map_queues_cpsch(struct device_queue_manager *dqm); 59 static void deallocate_sdma_queue(struct device_queue_manager *dqm, 62 static inline void deallocate_hqd(struct device_queue_manager *dqm, 64 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); 65 static int allocate_sdma_queue(struct device_queue_manager *dqm, 77 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) 80 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_me 1764 struct device_queue_manager *dqm; local in function:device_queue_manager_init 1928 struct device_queue_manager *dqm = container_of(work, local in function:kfd_process_hw_exception 1958 struct device_queue_manager *dqm = data; local in function:dqm_debugfs_hqds [all...] |
kfd_device_queue_manager.h | 89 int (*create_queue)(struct device_queue_manager *dqm, 93 int (*destroy_queue)(struct device_queue_manager *dqm, 97 int (*update_queue)(struct device_queue_manager *dqm, 100 int (*register_process)(struct device_queue_manager *dqm, 103 int (*unregister_process)(struct device_queue_manager *dqm, 106 int (*initialize)(struct device_queue_manager *dqm); 107 int (*start)(struct device_queue_manager *dqm); 108 int (*stop)(struct device_queue_manager *dqm); 109 void (*pre_reset)(struct device_queue_manager *dqm); 110 void (*uninitialize)(struct device_queue_manager *dqm); [all...] |
kfd_device_queue_manager_v9.c | 35 static int update_qpd_v9(struct device_queue_manager *dqm, 37 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, 57 static int update_qpd_v9(struct device_queue_manager *dqm, 70 !dqm->dev->device_info->needs_iommu_device) 85 static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q,
|
kfd_device_queue_manager_vi.c | 34 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, 40 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, 46 static int update_qpd_vi(struct device_queue_manager *dqm, 48 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, 50 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 52 static void init_sdma_vm_tonga(struct device_queue_manager *dqm, 102 static bool set_cache_memory_policy_vi(struct device_queue_manager *dqm, 131 static bool set_cache_memory_policy_vi_tonga(struct device_queue_manager *dqm, 158 static int update_qpd_vi(struct device_queue_manager *dqm, 199 static int update_qpd_vi_tonga(struct device_queue_manager *dqm, [all...] |
kfd_mqd_manager.c | 61 mqd_mem_obj->gtt_mem = dev->dqm->hiq_sdma_mqd.gtt_mem; 62 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr; 63 mqd_mem_obj->cpu_ptr = dev->dqm->hiq_sdma_mqd.cpu_ptr; 81 dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; 83 offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; 85 mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem 87 mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; 89 dev->dqm->hiq_sdma_mqd.cpu_ptr + offset);
|
kfd_process_queue_manager.c | 79 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); 128 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, 180 /* let DQM handle it*/ 238 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd); 249 if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count 250 >= get_num_sdma_queues(dev->dqm)) || 252 dev->dqm->xgmi_sdma_queue_coun 350 struct device_queue_manager *dqm; local in function:pqm_destroy_queue [all...] |
kfd_device_queue_manager_v10.c | 34 static int update_qpd_v10(struct device_queue_manager *dqm, 36 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, 56 static int update_qpd_v10(struct device_queue_manager *dqm, 88 static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q,
|
kfd_device_queue_manager_cik.c | 34 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, 40 static int update_qpd_cik(struct device_queue_manager *dqm, 42 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, 44 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 46 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, 95 static bool set_cache_memory_policy_cik(struct device_queue_manager *dqm, 121 static int update_qpd_cik(struct device_queue_manager *dqm, 155 static int update_qpd_cik_hawaii(struct device_queue_manager *dqm, 185 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, 201 static void init_sdma_vm_hawaii(struct device_queue_manager *dqm, [all...] |
kfd_packet_manager.c | 52 struct kfd_dev *dev = pm->dqm->dev; 54 process_count = pm->dqm->processes_count; 55 queue_count = pm->dqm->queue_count; 56 compute_queue_count = queue_count - pm->dqm->sdma_queue_count - 57 pm->dqm->xgmi_sdma_queue_count; 70 compute_queue_count > get_queues_num(pm->dqm)) { 105 retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size, 149 pm->dqm->processes_count, pm->dqm->queue_count); 155 if (proccesses_mapped >= pm->dqm->processes_count) [all...] |
kfd_int_process_v9.c | 55 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { 62 pasid = dev->dqm->vmid_pasid[vmid]; 125 kfd_process_vm_fault(dev->dqm, pasid);
|
kfd_device.c | 661 kfd->dqm = device_queue_manager_init(kfd); 662 if (!kfd->dqm) { 689 kfd->dqm->sched_policy); 696 device_queue_manager_uninit(kfd->dqm); 719 device_queue_manager_uninit(kfd->dqm); 737 kfd->dqm->ops.pre_reset(kfd->dqm); 782 kfd->dqm->ops.stop(kfd->dqm); 818 err = kfd->dqm->ops.start(kfd->dqm) [all...] |
kfd_priv.h | 272 struct device_queue_manager *dqm; member in struct:kfd_dev 409 * @is_evicted are protected by the DQM lock. 545 struct device_queue_manager *dqm; member in struct:qcm_process_device 645 /* Flag used to tell the pdd has dequeued from the dqm. 646 * This is used to prevent dev->dqm->ops.process_termination() from 885 void device_queue_manager_uninit(struct device_queue_manager *dqm); 889 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid); 933 struct device_queue_manager *dqm; member in struct:packet_manager 976 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 1069 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm); [all...] |
cik_event_interrupt.c | 115 kfd_process_vm_fault(dev->dqm, pasid);
|
kfd_dbgmgr.c | 92 if (pdev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
|
kfd_process.c | 756 pdd->qpd.dqm = dev->dqm; 964 r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm, 982 if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 999 r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm, 1162 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
|
kfd_kernel_queue.c | 69 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_DIQ]; 72 kq->mqd_mgr = dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
|
kfd_packet_manager_v9.c | 87 struct kfd_dev *kfd = pm->dqm->dev; 98 concurrent_proc_cnt = min(pm->dqm->processes_count,
|
kfd_packet_manager_vi.c | 84 struct kfd_dev *kfd = pm->dqm->dev; 98 concurrent_proc_cnt = min(pm->dqm->processes_count,
|
kfd_topology.c | 1324 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? 1326 dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm); 1516 r = dqm_debugfs_hqds(m, dev->gpu->dqm); 1541 r = pm_debugfs_runlist(m, &dev->gpu->dqm->packets);
|
kfd_chardev.c | 525 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, 559 if (dev->dqm->ops.set_trap_handler(dev->dqm, 1151 if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
|