sched: Consider misfit tasks when load-balancing

With the new group_misfit_task load-balancing scenario additional policy
conditions are needed when load-balancing. Misfit task balancing only
makes sense between source group with lower capacity than the target
group. If capacities are the same, fallback to normal group_other
balancing. The aim is to balance tasks such that no task has its
throughput hindered by compute capacity if a cpu with more capacity is
available. Load-balancing is generally based on average load in the
sched_groups, but for misfitting tasks it is necessary to introduce
exceptions to migrate tasks against usual metrics and optimize
throughput.

This patch ensures the following load-balance for mixed capacity systems
(e.g. ARM big.LITTLE) for always-running tasks:

1. Place a task on each cpu starting in order from cpus with highest
capacity to lowest until all cpus are in use (i.e. one task on each
cpu).

2. Once all cpus are in use balance according to compute capacity such
that load per capacity is approximately the same regardless of the
compute capacity (i.e. big cpus get more tasks than little cpus).

Necessary changes are introduced in find_busiest_group(),
calculate_imbalance(), and find_busiest_queue(). This includes passing
the group_type on to find_busiest_queue() through struct lb_env, which
is currently only considers imbalance and not the imbalance situation
(group_type).

To avoid taking remote rq locks to examine source sched_groups for
misfit tasks, each cpu is responsible for tracking misfit tasks
themselves and update the rq->misfit_task flag. This means checking task
utilization when tasks are scheduled and on sched_tick.

Change-Id: I29b35783765b0e51ae3ede74a8160b07f885bfde
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
(cherry picked from commit 0d2b1cdfa11fd62d3358eef332e78b95b6a3e9ec)
[Fixed cherry-pick issue]
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 61a2e75..1a67af4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7143,6 +7143,7 @@
 	unsigned int		loop_max;
 
 	enum fbq_type		fbq_type;
+	enum group_type		busiest_group_type;
 	struct list_head	tasks;
 };
 
@@ -7918,6 +7919,18 @@
 	return false;
 }
 
+
+/*
+ * group_smaller_cpu_capacity: Returns true if sched_group sg has smaller
+ * per-cpu capacity than sched_group ref.
+ */
+static inline bool
+group_smaller_cpu_capacity(struct sched_group *sg, struct sched_group *ref)
+{
+	return sg->sgc->max_capacity + capacity_margin - SCHED_CAPACITY_SCALE <
+							ref->sgc->max_capacity;
+}
+
 static inline enum
 group_type group_classify(struct sched_group *group,
 			  struct sg_lb_stats *sgs)
@@ -8028,9 +8041,25 @@
 	if (sgs->group_type < busiest->group_type)
 		return false;
 
+	/*
+	 * Candidate sg doesn't face any serious load-balance problems
+	 * so don't pick it if the local sg is already filled up.
+	 */
+	if (sgs->group_type == group_other &&
+	    !group_has_capacity(env, &sds->local_stat))
+		return false;
+
 	if (sgs->avg_load <= busiest->avg_load)
 		return false;
 
+	/*
+	 * Candiate sg has no more than one task per cpu and has higher
+	 * per-cpu capacity. No reason to pull tasks to less capable cpus.
+	 */
+	if (sgs->sum_nr_running <= sgs->group_weight &&
+	    group_smaller_cpu_capacity(sds->local, sg))
+		return false;
+
 	/* This is the busiest node in its class. */
 	if (!(env->sd->flags & SD_ASYM_PACKING))
 		return true;
@@ -8140,6 +8169,15 @@
 			sgs->group_type = group_classify(sg, sgs);
 		}
 
+		/*
+		 * Ignore task groups with misfit tasks if local group has no
+		 * capacity or if per-cpu capacity isn't higher.
+		 */
+		if (sgs->group_type == group_misfit_task &&
+		    (!group_has_capacity(env, &sds->local_stat) ||
+		     !group_smaller_cpu_capacity(sg, sds->local)))
+			sgs->group_type = group_other;
+
 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
 			sds->busiest = sg;
 			sds->busiest_stat = *sgs;
@@ -8325,6 +8363,22 @@
 	 */
 	if (busiest->avg_load <= sds->avg_load ||
 	    local->avg_load >= sds->avg_load) {
+		/* Misfitting tasks should be migrated in any case */
+		if (busiest->group_type == group_misfit_task) {
+			env->imbalance = busiest->group_misfit_task;
+			return;
+		}
+
+		/*
+		 * Busiest group is overloaded, local is not, use the spare
+		 * cycles to maximize throughput
+		 */
+		if (busiest->group_type == group_overloaded &&
+		    local->group_type <= group_misfit_task) {
+			env->imbalance = busiest->load_per_task;
+			return;
+		}
+
 		env->imbalance = 0;
 		return fix_small_imbalance(env, sds);
 	}
@@ -8358,6 +8412,11 @@
 		(sds->avg_load - local->avg_load) * local->group_capacity
 	) / SCHED_CAPACITY_SCALE;
 
+	/* Boost imbalance to allow misfit task to be balanced. */
+	if (busiest->group_type == group_misfit_task)
+		env->imbalance = max_t(long, env->imbalance,
+				     busiest->group_misfit_task);
+
 	/*
 	 * if *imbalance is less than the average load per runnable task
 	 * there is no guarantee that any tasks will be moved so we'll have
@@ -8424,6 +8483,11 @@
 	    busiest->group_no_capacity)
 		goto force_balance;
 
+	/* Misfitting tasks should be dealt with regardless of the avg load */
+	if (busiest->group_type == group_misfit_task) {
+		goto force_balance;
+	}
+
 	/*
 	 * If the local group is busier than the selected busiest group
 	 * don't try and pull any tasks.
@@ -8447,7 +8511,8 @@
 		 * might end up to just move the imbalance on another group
 		 */
 		if ((busiest->group_type != group_overloaded) &&
-				(local->idle_cpus <= (busiest->idle_cpus + 1)))
+		    (local->idle_cpus <= (busiest->idle_cpus + 1)) &&
+		    !group_smaller_cpu_capacity(sds.busiest, sds.local))
 			goto out_balanced;
 	} else {
 		/*
@@ -8460,6 +8525,7 @@
 	}
 
 force_balance:
+	env->busiest_group_type = busiest->group_type;
 	/* Looks like there is an imbalance. Compute it */
 	calculate_imbalance(env, &sds);
 	return sds.busiest;
@@ -8518,7 +8584,8 @@
 		 */
 
 		if (rq->nr_running == 1 && wl > env->imbalance &&
-		    !check_cpu_capacity(rq, env->sd))
+		    !check_cpu_capacity(rq, env->sd) &&
+		    env->busiest_group_type != group_misfit_task)
 			continue;
 
 		/*