summaryrefslogtreecommitdiff
authorRick Yiu <rickyiu@google.com>2018-09-26 08:45:50 (GMT)
committer Alistair Strachan <astrachan@google.com>2019-05-08 23:52:03 (GMT)
commit07e40a31febc7d7dc272e4745d8304d1ffd9923c (patch)
tree72f462b21e7ce1c7e58d0532cea976a1e0151b53
parent9370915ea55ff00a9053d823e28786848d6f1a38 (diff)
downloadcommon-07e40a31febc7d7dc272e4745d8304d1ffd9923c.zip
common-07e40a31febc7d7dc272e4745d8304d1ffd9923c.tar.gz
common-07e40a31febc7d7dc272e4745d8304d1ffd9923c.tar.bz2
ANDROID: block/cfq-iosched: make group_idle per io cgroup tunable
If group_idle is made per io cgroup tunable, it gives more flexibility in tuning the performance of each group. If no value is set, it will just use the original default value. Bug: 117857342 Test: values could be set to each group correctly Signed-off-by: Rick Yiu <rickyiu@google.com> Change-Id: I0b13c5da618a66da9274a59f8a85686aad3e110f
Diffstat
-rw-r--r--block/cfq-iosched.c81
1 files changed, 72 insertions, 9 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index a4e2d01..650d69e 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -222,6 +222,7 @@ struct cfq_group_data {
unsigned int weight;
unsigned int leaf_weight;
+ u64 group_idle;
};
/* This is per cgroup per device grouping structure */
@@ -307,6 +308,7 @@ struct cfq_group {
struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
struct cfq_queue *async_idle_cfqq;
+ u64 group_idle;
};
struct cfq_io_cq {
@@ -803,6 +805,17 @@ static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
#endif /* CONFIG_CFQ_GROUP_IOSCHED */
+static inline u64 get_group_idle(struct cfq_data *cfqd)
+{
+#ifdef CONFIG_CFQ_GROUP_IOSCHED
+ struct cfq_queue *cfqq = cfqd->active_queue;
+
+ if (cfqq && cfqq->cfqg)
+ return cfqq->cfqg->group_idle;
+#endif
+ return cfqd->cfq_group_idle;
+}
+
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -823,7 +836,7 @@ static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
if (!sample_valid(ttime->ttime_samples))
return false;
if (group_idle)
- slice = cfqd->cfq_group_idle;
+ slice = get_group_idle(cfqd);
else
slice = cfqd->cfq_slice_idle;
return ttime->ttime_mean > slice;
@@ -1626,6 +1639,7 @@ static void cfq_cpd_init(struct blkcg_policy_data *cpd)
cgd->weight = weight;
cgd->leaf_weight = weight;
+ cgd->group_idle = cfq_group_idle;
}
static void cfq_cpd_free(struct blkcg_policy_data *cpd)
@@ -1670,6 +1684,7 @@ static void cfq_pd_init(struct blkg_policy_data *pd)
cfqg->weight = cgd->weight;
cfqg->leaf_weight = cgd->leaf_weight;
+ cfqg->group_idle = cgd->group_idle;
}
static void cfq_pd_offline(struct blkg_policy_data *pd)
@@ -1791,6 +1806,19 @@ static int cfq_print_leaf_weight(struct seq_file *sf, void *v)
return 0;
}
+static int cfq_print_group_idle(struct seq_file *sf, void *v)
+{
+ struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
+ struct cfq_group_data *cgd = blkcg_to_cfqgd(blkcg);
+ u64 val = 0;
+
+ if (cgd)
+ val = cgd->group_idle;
+
+ seq_printf(sf, "%llu\n", div_u64(val, NSEC_PER_USEC));
+ return 0;
+}
+
static ssize_t __cfqg_set_weight_device(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off,
bool on_dfl, bool is_leaf_weight)
@@ -1912,6 +1940,37 @@ static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
return __cfq_set_weight(css, val, false, false, true);
}
+static int cfq_set_group_idle(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
+{
+ struct blkcg *blkcg = css_to_blkcg(css);
+ struct cfq_group_data *cfqgd;
+ struct blkcg_gq *blkg;
+ int ret = 0;
+
+ spin_lock_irq(&blkcg->lock);
+ cfqgd = blkcg_to_cfqgd(blkcg);
+ if (!cfqgd) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ cfqgd->group_idle = val * NSEC_PER_USEC;
+
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct cfq_group *cfqg = blkg_to_cfqg(blkg);
+
+ if (!cfqg)
+ continue;
+
+ cfqg->group_idle = cfqgd->group_idle;
+ }
+
+out:
+ spin_unlock_irq(&blkcg->lock);
+ return ret;
+}
+
static int cfqg_print_stat(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
@@ -2057,6 +2116,11 @@ static struct cftype cfq_blkcg_legacy_files[] = {
.seq_show = cfq_print_leaf_weight,
.write_u64 = cfq_set_leaf_weight,
},
+ {
+ .name = "group_idle",
+ .seq_show = cfq_print_group_idle,
+ .write_u64 = cfq_set_group_idle,
+ },
/* statistics, covers only the tasks in the cfqg */
{
@@ -2952,7 +3016,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
* with sync vs async workloads.
*/
if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag &&
- !cfqd->cfq_group_idle)
+ !get_group_idle(cfqd))
return;
WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
@@ -2963,9 +3027,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
*/
if (!cfq_should_idle(cfqd, cfqq)) {
/* no queue idling. Check for group idling */
- if (cfqd->cfq_group_idle)
- group_idle = cfqd->cfq_group_idle;
- else
+ group_idle = get_group_idle(cfqd);
+ if (!group_idle)
return;
}
@@ -3006,7 +3069,7 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
cfq_mark_cfqq_wait_request(cfqq);
if (group_idle)
- sl = cfqd->cfq_group_idle;
+ sl = group_idle;
else
sl = cfqd->cfq_slice_idle;
@@ -3355,7 +3418,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
* this group, wait for requests to complete.
*/
check_group_idle:
- if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
+ if (get_group_idle(cfqd) && cfqq->cfqg->nr_cfqq == 1 &&
cfqq->cfqg->dispatched &&
!cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
cfqq = NULL;
@@ -3914,7 +3977,7 @@ cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfqd->cfq_slice_idle);
}
#ifdef CONFIG_CFQ_GROUP_IOSCHED
- __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
+ __cfq_update_io_thinktime(&cfqq->cfqg->ttime, get_group_idle(cfqd));
#endif
}
@@ -4308,7 +4371,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
if (cfq_should_wait_busy(cfqd, cfqq)) {
u64 extend_sl = cfqd->cfq_slice_idle;
if (!cfqd->cfq_slice_idle)
- extend_sl = cfqd->cfq_group_idle;
+ extend_sl = get_group_idle(cfqd);
cfqq->slice_end = now + extend_sl;
cfq_mark_cfqq_wait_busy(cfqq);
cfq_log_cfqq(cfqd, cfqq, "will busy wait");