struct drm_gpu_scheduler; int drm_sched_init(struct drm_gpu_scheduler *sched, //sched: scheduler instance const struct drm_sched_backend_ops *ops, //ops: backend operations for this scheduler unsigned hw_submission, //hw_submission: number of hw submissions that can be in flight unsigned hang_limit, //hang_limit: number of times to allow a job to hang before dropping it long timeout, //timeout: timeout value in jiffies for the scheduler const char *name) //name: name used for debugging
struct drm_sched_backend_ops { struct dma_fence *(*dependency)(struct drm_sched_job *sched_job, struct drm_sched_entity *s_entity); struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); void (*timedout_job)(struct drm_sched_job *sched_job); void (*free_job)(struct drm_sched_job *sched_job); }
int drm_sched_entity_init(struct drm_sched_entity *entity, enum drm_sched_priority priority, struct drm_gpu_scheduler **sched_list, unsigned int num_sched_list, atomic_t *guilty)
DRM_SCHED_PRIORITY_MIN
DRM_SCHED_PRIORITY_NORMAL
DRM_SCHED_PRIORITY_HIGH
DRM_SCHED_PRIORITY_KERNEL
int drm_sched_job_init(struct drm_sched_job *job, struct drm_sched_entity *entity, void *owner)
void drm_sched_entity_push_job(struct drm_sched_job *sched_job, struct drm_sched_entity *entity)
1 static const struct drm_sched_backend_ops vc4_bin_sched_ops = { 2 .dependency = vc4_job_dependency, 3 .run_job = vc4_bin_job_run, 4 .timedout_job = NULL, 5 .free_job = vc4_job_free, 6 }; 7 8 static const struct drm_sched_backend_ops vc4_render_sched_ops = { 9 .dependency = vc4_job_dependency, 10 .run_job = vc4_render_job_run, 11 .timedout_job = NULL, 12 .free_job = vc4_job_free, 13 }; 14 15 int vc4_sched_init(struct vc4_dev *vc4) 16 { 17 int hw_jobs_limit = 1; 18 int job_hang_limit = 0; 19 int hang_limit_ms = 500; 20 int ret; 21 22 ret = drm_sched_init(&vc4->queue[VC4_BIN].sched, 23 &vc4_bin_sched_ops, 24 hw_jobs_limit, 25 job_hang_limit, 26 msecs_to_jiffies(hang_limit_ms), 27 "vc4_bin"); 28 if (ret) { 29 dev_err(vc4->base.dev, "Failed to create bin scheduler: %d.", ret); 30 return ret; 31 } 32 33 ret = drm_sched_init(&vc4->queue[VC4_RENDER].sched, 34 &vc4_render_sched_ops, 35 hw_jobs_limit, 36 job_hang_limit, 37 msecs_to_jiffies(hang_limit_ms), 38 "vc4_render"); 39 if (ret) { 40 dev_err(vc4->base.dev, "Failed to create render scheduler: %d.", ret); 41 vc4_sched_fini(vc4); 42 return ret; 43 } 44 45 return ret; 46 }
1 static int vc4_open(struct drm_device *dev, struct drm_file *file) 2 { 3 struct vc4_dev *vc4 = to_vc4_dev(dev); 4 struct vc4_file *vc4file; 5 struct drm_gpu_scheduler *sched; 6 int i; 7 8 vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); 9 if (!vc4file) 10 return -ENOMEM; 11 12 vc4_perfmon_open_file(vc4file); 13 14 for (i = 0; i < VC4_MAX_QUEUES; i++) { 15 sched = &vc4->queue[i].sched; 16 drm_sched_entity_init(&vc4file->sched_entity[i], 17 DRM_SCHED_PRIORITY_NORMAL, 18 &sched, 1, 19 NULL); 20 } 21 22 file->driver_priv = vc4file; 23 24 return 0; 25 }
1 static void vc4_job_free(struct kref *ref) 2 { 3 struct vc4_job *job = container_of(ref, struct vc4_job, refcount); 4 struct vc4_dev *vc4 = job->dev; 5 struct vc4_exec_info *exec = job->exec; 6 struct vc4_seqno_cb *cb, *cb_temp; 7 struct dma_fence *fence; 8 unsigned long index; 9 unsigned long irqflags; 10 11 xa_for_each(&job->deps, index, fence) { 12 dma_fence_put(fence); 13 } 14 xa_destroy(&job->deps); 15 16 dma_fence_put(job->irq_fence); 17 dma_fence_put(job->done_fence); 18 19 if (exec) 20 vc4_complete_exec(&job->dev->base, exec); 21 22 spin_lock_irqsave(&vc4->job_lock, irqflags); 23 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) { 24 if (cb->seqno <= vc4->finished_seqno) { 25 list_del_init(&cb->work.entry); 26 schedule_work(&cb->work); 27 } 28 } 29 30 spin_unlock_irqrestore(&vc4->job_lock, irqflags); 31 32 kfree(job); 33 } 34 35 void vc4_job_put(struct vc4_job *job) 36 { 37 kref_put(&job->refcount, job->free); 38 } 39 40 static int vc4_job_init(struct vc4_dev *vc4, struct drm_file *file_priv, 41 struct vc4_job *job, void (*free)(struct kref *ref), u32 in_sync) 42 { 43 struct dma_fence *in_fence = NULL; 44 int ret; 45 46 xa_init_flags(&job->deps, XA_FLAGS_ALLOC); 47 48 if (in_sync) { 49 ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence); 50 if (ret == -EINVAL) 51 goto fail; 52 53 ret = drm_gem_fence_array_add(&job->deps, in_fence); 54 if (ret) { 55 dma_fence_put(in_fence); 56 goto fail; 57 } 58 } 59 60 kref_init(&job->refcount); 61 job->free = free; 62 63 return 0; 64 65 fail: 66 xa_destroy(&job->deps); 67 return ret; 68 } 69 70 static int vc4_push_job(struct drm_file *file_priv, struct vc4_job *job, enum vc4_queue queue) 71 { 72 struct vc4_file *vc4file = file_priv->driver_priv; 73 int ret; 74 75 ret = drm_sched_job_init(&job->base, &vc4file->sched_entity[queue], vc4file); 76 if (ret) 77 return ret; 78 79 job->done_fence = dma_fence_get(&job->base.s_fence->finished); 80 81 kref_get(&job->refcount); 82 83 drm_sched_entity_push_job(&job->base, &vc4file->sched_entity[queue]); 84 85 return 0; 86 } 87 88 /* Queues a struct vc4_exec_info for execution. If no job is 89 * currently executing, then submits it. 90 * 91 * Unlike most GPUs, our hardware only handles one command list at a 92 * time. To queue multiple jobs at once, we'd need to edit the 93 * previous command list to have a jump to the new one at the end, and 94 * then bump the end address. That's a change for a later date, 95 * though. 96 */ 97 static int 98 vc4_queue_submit_to_scheduler(struct drm_device *dev, 99 struct drm_file *file_priv, 100 struct vc4_exec_info *exec, 101 struct ww_acquire_ctx *acquire_ctx) 102 { 103 struct vc4_dev *vc4 = to_vc4_dev(dev); 104 struct drm_vc4_submit_cl *args = exec->args; 105 struct vc4_job *bin = NULL; 106 struct vc4_job *render = NULL; 107 struct drm_syncobj *out_sync; 108 uint64_t seqno; 109 unsigned long irqflags; 110 int ret; 111 112 spin_lock_irqsave(&vc4->job_lock, irqflags); 113 114 seqno = ++vc4->emit_seqno; 115 exec->seqno = seqno; 116 117 spin_unlock_irqrestore(&vc4->job_lock, irqflags); 118 119 render = kcalloc(1, sizeof(*render), GFP_KERNEL); 120 if (!render) 121 return -ENOMEM; 122 123 render->exec = exec; 124 125 ret = vc4_job_init(vc4, file_priv, render, vc4_job_free, args->in_sync); 126 if (ret) { 127 kfree(render); 128 return ret; 129 } 130 131 if (args->bin_cl_size != 0) { 132 bin = kcalloc(1, sizeof(*bin), GFP_KERNEL); 133 if (!bin) { 134 vc4_job_put(render); 135 return -ENOMEM; 136 } 137 138 bin->exec = exec; 139 140 ret = vc4_job_init(vc4, file_priv, bin, vc4_job_free, args->in_sync); 141 if (ret) { 142 vc4_job_put(render); 143 kfree(bin); 144 return ret; 145 } 146 } 147 148 mutex_lock(&vc4->sched_lock); 149 150 if (bin) { 151 ret = vc4_push_job(file_priv, bin, VC4_BIN); 152 if (ret) 153 goto FAIL; 154 155 ret = drm_gem_fence_array_add(&render->deps, dma_fence_get(bin->done_fence)); 156 if (ret) 157 goto FAIL; 158 } 159 160 vc4_push_job(file_priv, render, VC4_RENDER); 161 162 mutex_unlock(&vc4->sched_lock); 163 164 if (args->out_sync) { 165 out_sync = drm_syncobj_find(file_priv, args->out_sync); 166 if (!out_sync) { 167 ret = -EINVAL; 168 goto FAIL;; 169 } 170 171 drm_syncobj_replace_fence(out_sync, &bin->base.s_fence->scheduled); 172 exec->fence = render->done_fence; 173 174 drm_syncobj_put(out_sync); 175 } 176 177 vc4_update_bo_seqnos(exec, seqno); 178 179 vc4_unlock_bo_reservations(dev, exec, acquire_ctx); 180 181 if (bin) 182 vc4_job_put(bin); 183 vc4_job_put(render); 184 185 return 0; 186 187 FAIL: 188 return ret; 189 }