Skip to content

Commit

Permalink
blk-throtl: stop using blkg->stat_bytes and ->stat_ios
Browse files Browse the repository at this point in the history
When used on cgroup1, blk-throtl uses the blkg->stat_bytes and
->stat_ios from blk-cgroup core to populate four stat knobs.
blk-cgroup core is moving away from blkg_rwstat to improve scalability
and won't be able to support this usage.

It isn't like the sharing gains all that much.  Let's break them out
to dedicated rwstat counters which are updated when on cgroup1.

Signed-off-by: Tejun Heo <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
htejun authored and axboe committed Nov 7, 2019
1 parent fd41e60 commit 7ca4643
Showing 1 changed file with 61 additions and 9 deletions.
70 changes: 61 additions & 9 deletions block/blk-throttle.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,9 @@ struct throtl_grp {
unsigned int bio_cnt; /* total bios */
unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
unsigned long bio_cnt_reset_time;

struct blkg_rwstat stat_bytes;
struct blkg_rwstat stat_ios;
};

/* We measure latency for request size from <= 4k to >= 1M */
Expand Down Expand Up @@ -489,6 +492,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
if (!tg)
return NULL;

if (blkg_rwstat_init(&tg->stat_bytes, gfp))
goto err_free_tg;

if (blkg_rwstat_init(&tg->stat_ios, gfp))
goto err_exit_stat_bytes;

throtl_service_queue_init(&tg->service_queue);

for (rw = READ; rw <= WRITE; rw++) {
Expand All @@ -513,6 +522,12 @@ static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;

return &tg->pd;

err_exit_stat_bytes:
blkg_rwstat_exit(&tg->stat_bytes);
err_free_tg:
kfree(tg);
return NULL;
}

static void throtl_pd_init(struct blkg_policy_data *pd)
Expand Down Expand Up @@ -611,6 +626,8 @@ static void throtl_pd_free(struct blkg_policy_data *pd)
struct throtl_grp *tg = pd_to_tg(pd);

del_timer_sync(&tg->service_queue.pending_timer);
blkg_rwstat_exit(&tg->stat_bytes);
blkg_rwstat_exit(&tg->stat_ios);
kfree(tg);
}

Expand Down Expand Up @@ -1464,6 +1481,32 @@ static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
return tg_set_conf(of, buf, nbytes, off, false);
}

static int tg_print_rwstat(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
blkg_prfill_rwstat, &blkcg_policy_throtl,
seq_cft(sf)->private, true);
return 0;
}

static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
struct blkg_policy_data *pd, int off)
{
struct blkg_rwstat_sample sum;

blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
&sum);
return __blkg_prfill_rwstat(sf, pd, &sum);
}

static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
{
blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
seq_cft(sf)->private, true);
return 0;
}

static struct cftype throtl_legacy_files[] = {
{
.name = "throttle.read_bps_device",
Expand Down Expand Up @@ -1491,23 +1534,23 @@ static struct cftype throtl_legacy_files[] = {
},
{
.name = "throttle.io_service_bytes",
.private = (unsigned long)&blkcg_policy_throtl,
.seq_show = blkg_print_stat_bytes,
.private = offsetof(struct throtl_grp, stat_bytes),
.seq_show = tg_print_rwstat,
},
{
.name = "throttle.io_service_bytes_recursive",
.private = (unsigned long)&blkcg_policy_throtl,
.seq_show = blkg_print_stat_bytes_recursive,
.private = offsetof(struct throtl_grp, stat_bytes),
.seq_show = tg_print_rwstat_recursive,
},
{
.name = "throttle.io_serviced",
.private = (unsigned long)&blkcg_policy_throtl,
.seq_show = blkg_print_stat_ios,
.private = offsetof(struct throtl_grp, stat_ios),
.seq_show = tg_print_rwstat,
},
{
.name = "throttle.io_serviced_recursive",
.private = (unsigned long)&blkcg_policy_throtl,
.seq_show = blkg_print_stat_ios_recursive,
.private = offsetof(struct throtl_grp, stat_ios),
.seq_show = tg_print_rwstat_recursive,
},
{ } /* terminate */
};
Expand Down Expand Up @@ -2127,7 +2170,16 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
WARN_ON_ONCE(!rcu_read_lock_held());

/* see throtl_charge_bio() */
if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
if (bio_flagged(bio, BIO_THROTTLED))
goto out;

if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
bio->bi_iter.bi_size);
blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
}

if (!tg->has_rules[rw])
goto out;

spin_lock_irq(&q->queue_lock);
Expand Down

0 comments on commit 7ca4643

Please sign in to comment.