aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/scheduler/sched_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_main.c')
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c96
1 files changed, 93 insertions, 3 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index e5a4ecde0063..d0ff9e11cb69 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -62,6 +62,55 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
+int drm_sched_policy = DRM_SCHED_POLICY_FIFO;
+
+/**
+ * DOC: sched_policy (int)
+ * Used to override default entities scheduling policy in a run queue.
+ */
+MODULE_PARM_DESC(sched_policy, "Specify the scheduling policy for entities on a run-queue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin, " __stringify(DRM_SCHED_POLICY_FIFO) " = FIFO (default).");
+module_param_named(sched_policy, drm_sched_policy, int, 0444);
+
+static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
+ const struct rb_node *b)
+{
+ struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
+ struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
+
+ return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
+}
+
+static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
+{
+ struct drm_sched_rq *rq = entity->rq;
+
+ if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
+ rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
+ RB_CLEAR_NODE(&entity->rb_tree_node);
+ }
+}
+
+void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
+{
+ /*
+ * Both locks need to be grabbed, one to protect from entity->rq change
+ * for entity from within concurrent drm_sched_entity_select_rq and the
+ * other to update the rb tree structure.
+ */
+ spin_lock(&entity->rq_lock);
+ spin_lock(&entity->rq->lock);
+
+ drm_sched_rq_remove_fifo_locked(entity);
+
+ entity->oldest_job_waiting = ts;
+
+ rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
+ drm_sched_entity_compare_before);
+
+ spin_unlock(&entity->rq->lock);
+ spin_unlock(&entity->rq_lock);
+}
+
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@@ -75,6 +124,7 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
+ rq->rb_tree_root = RB_ROOT_CACHED;
rq->current_entity = NULL;
rq->sched = sched;
}
@@ -92,9 +142,12 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
{
if (!list_empty(&entity->list))
return;
+
spin_lock(&rq->lock);
+
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
+
spin_unlock(&rq->lock);
}
@@ -111,23 +164,30 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
{
if (list_empty(&entity->list))
return;
+
spin_lock(&rq->lock);
+
atomic_dec(rq->sched->score);
list_del_init(&entity->list);
+
if (rq->current_entity == entity)
rq->current_entity = NULL;
+
+ if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
+ drm_sched_rq_remove_fifo_locked(entity);
+
spin_unlock(&rq->lock);
}
/**
- * drm_sched_rq_select_entity - Select an entity which could provide a job to run
+ * drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
*
* @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
static struct drm_sched_entity *
-drm_sched_rq_select_entity(struct drm_sched_rq *rq)
+drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
{
struct drm_sched_entity *entity;
@@ -164,6 +224,34 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
}
/**
+ * drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
+ *
+ * @rq: scheduler run queue to check.
+ *
+ * Find oldest waiting ready entity, returns NULL if none found.
+ */
+static struct drm_sched_entity *
+drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+{
+ struct rb_node *rb;
+
+ spin_lock(&rq->lock);
+ for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
+ struct drm_sched_entity *entity;
+
+ entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
+ if (drm_sched_entity_is_ready(entity)) {
+ rq->current_entity = entity;
+ reinit_completion(&entity->entity_idle);
+ break;
+ }
+ }
+ spin_unlock(&rq->lock);
+
+ return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
+}
+
+/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
*
@@ -803,7 +891,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
/* Kernel run queue has higher priority than normal run queue*/
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
- entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
+ entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
+ drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
+ drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
if (entity)
break;
}