Skip to content

Commit

Permalink
Merge tag 'for-linus-20181102' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block layer fixes from Jens Axboe:
 "The biggest part of this pull request is the revert of the blkcg
  cleanup series. It had one fix earlier for a stacked device issue, but
  another one was reported. Rather than play whack-a-mole with this,
  revert the entire series and try again for the next kernel release.

  Apart from that, only small fixes/changes.

  Summary:

   - Indentation fixup for mtip32xx (Colin Ian King)

   - The blkcg cleanup series revert (Dennis Zhou)

   - Two NVMe fixes. One fixing a regression in the nvme request
     initialization in this merge window, causing nvme-fc to not work.
     The other is a suspend/resume p2p resource issue (James, Keith)

   - Fix sg discard merge, allowing us to merge in cases where we didn't
     before (Jianchao Wang)

   - Call rq_qos_exit() after the queue is frozen, preventing a hang
     (Ming)

   - Fix brd queue setup, fixing an oops if we fail setting up all
     devices (Ming)"

* tag 'for-linus-20181102' of git://git.kernel.dk/linux-block:
  nvme-pci: fix conflicting p2p resource adds
  nvme-fc: fix request private initialization
  blkcg: revert blkcg cleanups series
  block: brd: associate with queue until adding disk
  block: call rq_qos_exit() after queue is frozen
  mtip32xx: clean an indentation issue, remove extraneous tabs
  block: fix the DISCARD request merge
  • Loading branch information
torvalds committed Nov 2, 2018
2 parents fcc37f7 + 9fe5c59 commit 5f21585
Show file tree
Hide file tree
Showing 28 changed files with 265 additions and 424 deletions.
8 changes: 3 additions & 5 deletions Documentation/admin-guide/cgroup-v2.rst
Original file line number Diff line number Diff line change
Expand Up @@ -1879,10 +1879,8 @@ following two functions.

wbc_init_bio(@wbc, @bio)
Should be called for each bio carrying writeback data and
associates the bio with the inode's owner cgroup and the
corresponding request queue. This must be called after
a queue (device) has been associated with the bio and
before submission.
associates the bio with the inode's owner cgroup. Can be
called anytime between bio allocation and submission.

wbc_account_io(@wbc, @page, @bytes)
Should be called for each data segment being written out.
Expand All @@ -1901,7 +1899,7 @@ the configuration, the bio may be executed at a lower priority and if
the writeback session is holding shared resources, e.g. a journal
entry, may lead to priority inversion. There is no one easy solution
for the problem. Filesystems can try to work around specific problem
cases by skipping wbc_init_bio() or using bio_associate_create_blkg()
cases by skipping wbc_init_bio() or using bio_associate_blkcg()
directly.


Expand Down
4 changes: 2 additions & 2 deletions block/bfq-cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
uint64_t serial_nr;

rcu_read_lock();
serial_nr = __bio_blkcg(bio)->css.serial_nr;
serial_nr = bio_blkcg(bio)->css.serial_nr;

/*
* Check whether blkcg has changed. The condition may trigger
Expand All @@ -651,7 +651,7 @@ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
goto out;

bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
bfqg = __bfq_bic_change_cgroup(bfqd, bic, bio_blkcg(bio));
/*
* Update blkg_path for bfq_log_* functions. We cache this
* path, and update it here, for the following
Expand Down
2 changes: 1 addition & 1 deletion block/bfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -4384,7 +4384,7 @@ static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd,

rcu_read_lock();

bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio));
if (!bfqg) {
bfqq = &bfqd->oom_bfqq;
goto out;
Expand Down
174 changes: 47 additions & 127 deletions block/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -609,9 +609,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;

bio_clone_blkg_association(bio, bio_src);

blkcg_bio_issue_init(bio);
bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);

Expand Down Expand Up @@ -1956,151 +1954,69 @@ EXPORT_SYMBOL(bioset_init_from_src);

#ifdef CONFIG_BLK_CGROUP

/**
* bio_associate_blkg - associate a bio with the a blkg
* @bio: target bio
* @blkg: the blkg to associate
*
* This tries to associate @bio with the specified blkg. Association failure
* is handled by walking up the blkg tree. Therefore, the blkg associated can
* be anything between @blkg and the root_blkg. This situation only happens
* when a cgroup is dying and then the remaining bios will spill to the closest
* alive blkg.
*
* A reference will be taken on the @blkg and will be released when @bio is
* freed.
*/
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
{
if (unlikely(bio->bi_blkg))
return -EBUSY;
bio->bi_blkg = blkg_tryget_closest(blkg);
return 0;
}

/**
* __bio_associate_blkg_from_css - internal blkg association function
*
* This in the core association function that all association paths rely on.
* A blkg reference is taken which is released upon freeing of the bio.
*/
static int __bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
{
struct request_queue *q = bio->bi_disk->queue;
struct blkcg_gq *blkg;
int ret;

rcu_read_lock();

if (!css || !css->parent)
blkg = q->root_blkg;
else
blkg = blkg_lookup_create(css_to_blkcg(css), q);

ret = bio_associate_blkg(bio, blkg);

rcu_read_unlock();
return ret;
}

/**
* bio_associate_blkg_from_css - associate a bio with a specified css
* @bio: target bio
* @css: target css
*
* Associate @bio with the blkg found by combining the css's blkg and the
* request_queue of the @bio. This falls back to the queue's root_blkg if
* the association fails with the css.
*/
int bio_associate_blkg_from_css(struct bio *bio,
struct cgroup_subsys_state *css)
{
if (unlikely(bio->bi_blkg))
return -EBUSY;
return __bio_associate_blkg_from_css(bio, css);
}
EXPORT_SYMBOL_GPL(bio_associate_blkg_from_css);

#ifdef CONFIG_MEMCG
/**
* bio_associate_blkg_from_page - associate a bio with the page's blkg
* bio_associate_blkcg_from_page - associate a bio with the page's blkcg
* @bio: target bio
* @page: the page to lookup the blkcg from
*
* Associate @bio with the blkg from @page's owning memcg and the respective
* request_queue. If cgroup_e_css returns NULL, fall back to the queue's
* root_blkg.
*
* Note: this must be called after bio has an associated device.
* Associate @bio with the blkcg from @page's owning memcg. This works like
* every other associate function wrt references.
*/
int bio_associate_blkg_from_page(struct bio *bio, struct page *page)
int bio_associate_blkcg_from_page(struct bio *bio, struct page *page)
{
struct cgroup_subsys_state *css;
int ret;
struct cgroup_subsys_state *blkcg_css;

if (unlikely(bio->bi_blkg))
if (unlikely(bio->bi_css))
return -EBUSY;
if (!page->mem_cgroup)
return 0;

rcu_read_lock();

css = cgroup_e_css(page->mem_cgroup->css.cgroup, &io_cgrp_subsys);

ret = __bio_associate_blkg_from_css(bio, css);

rcu_read_unlock();
return ret;
blkcg_css = cgroup_get_e_css(page->mem_cgroup->css.cgroup,
&io_cgrp_subsys);
bio->bi_css = blkcg_css;
return 0;
}
#endif /* CONFIG_MEMCG */

/**
* bio_associate_create_blkg - associate a bio with a blkg from q
* @q: request_queue where bio is going
* bio_associate_blkcg - associate a bio with the specified blkcg
* @bio: target bio
* @blkcg_css: css of the blkcg to associate
*
* Associate @bio with the blkcg specified by @blkcg_css. Block layer will
* treat @bio as if it were issued by a task which belongs to the blkcg.
*
* Associate @bio with the blkg found from the bio's css and the request_queue.
* If one is not found, bio_lookup_blkg creates the blkg. This falls back to
* the queue's root_blkg if association fails.
* This function takes an extra reference of @blkcg_css which will be put
* when @bio is released. The caller must own @bio and is responsible for
* synchronizing calls to this function.
*/
int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
{
struct cgroup_subsys_state *css;
int ret = 0;

/* someone has already associated this bio with a blkg */
if (bio->bi_blkg)
return ret;

rcu_read_lock();

css = blkcg_css();

ret = __bio_associate_blkg_from_css(bio, css);

rcu_read_unlock();
return ret;
if (unlikely(bio->bi_css))
return -EBUSY;
css_get(blkcg_css);
bio->bi_css = blkcg_css;
return 0;
}
EXPORT_SYMBOL_GPL(bio_associate_blkcg);

/**
* bio_reassociate_blkg - reassociate a bio with a blkg from q
* @q: request_queue where bio is going
* bio_associate_blkg - associate a bio with the specified blkg
* @bio: target bio
* @blkg: the blkg to associate
*
* When submitting a bio, multiple recursive calls to make_request() may occur.
* This causes the initial associate done in blkcg_bio_issue_check() to be
* incorrect and reference the prior request_queue. This performs reassociation
* when this situation happens.
* Associate @bio with the blkg specified by @blkg. This is the queue specific
* blkcg information associated with the @bio, a reference will be taken on the
* @blkg and will be freed when the bio is freed.
*/
int bio_reassociate_blkg(struct request_queue *q, struct bio *bio)
int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
{
if (bio->bi_blkg) {
blkg_put(bio->bi_blkg);
bio->bi_blkg = NULL;
}

return bio_associate_create_blkg(q, bio);
if (unlikely(bio->bi_blkg))
return -EBUSY;
if (!blkg_try_get(blkg))
return -ENODEV;
bio->bi_blkg = blkg;
return 0;
}

/**
Expand All @@ -2113,23 +2029,27 @@ void bio_disassociate_task(struct bio *bio)
put_io_context(bio->bi_ioc);
bio->bi_ioc = NULL;
}
if (bio->bi_css) {
css_put(bio->bi_css);
bio->bi_css = NULL;
}
if (bio->bi_blkg) {
blkg_put(bio->bi_blkg);
bio->bi_blkg = NULL;
}
}

/**
* bio_clone_blkg_association - clone blkg association from src to dst bio
* bio_clone_blkcg_association - clone blkcg association from src to dst bio
* @dst: destination bio
* @src: source bio
*/
void bio_clone_blkg_association(struct bio *dst, struct bio *src)
void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
{
if (src->bi_blkg)
bio_associate_blkg(dst, src->bi_blkg);
if (src->bi_css)
WARN_ON(bio_associate_blkcg(dst, src->bi_css));
}
EXPORT_SYMBOL_GPL(bio_clone_blkg_association);
EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
#endif /* CONFIG_BLK_CGROUP */

static void __init biovec_init_slabs(void)
Expand Down
Loading

0 comments on commit 5f21585

Please sign in to comment.