Skip to content

Commit

Permalink
Merge branch 'for-4.9/block' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block layer updates from Jens Axboe:
 "This is the main pull request for block layer changes in 4.9.

  As mentioned at the last merge window, I've changed things up and now
  do just one branch for core block layer changes, and driver changes.
  This avoids dependencies between the two branches. Outside of this
  main pull request, there are two topical branches coming as well.

  This pull request contains:

   - A set of fixes, and a conversion to blk-mq, of nbd. From Josef.

   - Set of fixes and updates for lightnvm from Matias, Simon, and Arnd.
     Followup dependency fix from Geert.

   - General fixes from Bart, Baoyou, Guoqing, and Linus W.

   - CFQ async write starvation fix from Glauber.

   - Add supprot for delayed kick of the requeue list, from Mike.

   - Pull out the scalable bitmap code from blk-mq-tag.c and make it
     generally available under the name of sbitmap. Only blk-mq-tag uses
     it for now, but the blk-mq scheduling bits will use it as well.
     From Omar.

   - bdev thaw error progagation from Pierre.

   - Improve the blk polling statistics, and allow the user to clear
     them. From Stephen.

   - Set of minor cleanups from Christoph in block/blk-mq.

   - Set of cleanups and optimizations from me for block/blk-mq.

   - Various nvme/nvmet/nvmeof fixes from the various folks"

* 'for-4.9/block' of git://git.kernel.dk/linux-block: (54 commits)
  fs/block_dev.c: return the right error in thaw_bdev()
  nvme: Pass pointers, not dma addresses, to nvme_get/set_features()
  nvme/scsi: Remove power management support
  nvmet: Make dsm number of ranges zero based
  nvmet: Use direct IO for writes
  admin-cmd: Added smart-log command support.
  nvme-fabrics: Add host_traddr options field to host infrastructure
  nvme-fabrics: revise host transport option descriptions
  nvme-fabrics: rework nvmf_get_address() for variable options
  nbd: use BLK_MQ_F_BLOCKING
  blkcg: Annotate blkg_hint correctly
  cfq: fix starvation of asynchronous writes
  blk-mq: add flag for drivers wanting blocking ->queue_rq()
  blk-mq: remove non-blocking pass in blk_mq_map_request
  blk-mq: get rid of manual run of queue with __blk_mq_run_hw_queue()
  block: export bio_free_pages to other modules
  lightnvm: propagate device_add() error code
  lightnvm: expose device geometry through sysfs
  lightnvm: control life of nvm_dev in driver
  blk-mq: register device instead of disk
  ...
  • Loading branch information
torvalds committed Oct 7, 2016
2 parents 87840a2 + 997198b commit 513a4be
Show file tree
Hide file tree
Showing 53 changed files with 1,828 additions and 1,170 deletions.
4 changes: 2 additions & 2 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ i. Per-queue limits/values exported to the generic layer by the driver

Various parameters that the generic i/o scheduler logic uses are set at
a per-queue level (e.g maximum request size, maximum number of segments in
a scatter-gather list, hardsect size)
a scatter-gather list, logical block size)

Some parameters that were earlier available as global arrays indexed by
major/minor are now directly associated with the queue. Some of these may
Expand Down Expand Up @@ -156,7 +156,7 @@ Some new queue property settings:
blk_queue_max_segment_size(q, max_seg_size)
Maximum size of a clustered segment, 64kB default.

blk_queue_hardsect_size(q, hardsect_size)
blk_queue_logical_block_size(q, logical_block_size)
Lowest possible sector size that the hardware can operate
on, 512 bytes default.

Expand Down
1 change: 1 addition & 0 deletions MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -2472,6 +2472,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: block/
F: kernel/trace/blktrace.c
F: lib/sbitmap.c

BLOCK2MTD DRIVER
M: Joern Engel <[email protected]>
Expand Down
1 change: 1 addition & 0 deletions block/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
menuconfig BLOCK
bool "Enable the block layer" if EXPERT
default y
select SBITMAP
help
Provide block layer support for the kernel.

Expand Down
5 changes: 3 additions & 2 deletions block/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -1068,14 +1068,15 @@ static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
return 0;
}

static void bio_free_pages(struct bio *bio)
void bio_free_pages(struct bio *bio)
{
struct bio_vec *bvec;
int i;

bio_for_each_segment_all(bvec, bio, i)
__free_page(bvec->bv_page);
}
EXPORT_SYMBOL(bio_free_pages);

/**
* bio_uncopy_user - finish previously mapped bio
Expand Down Expand Up @@ -1274,7 +1275,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,

nr_pages += end - start;
/*
* buffer must be aligned to at least hardsector size for now
* buffer must be aligned to at least logical block size for now
*/
if (uaddr & queue_dma_alignment(q))
return ERR_PTR(-EINVAL);
Expand Down
16 changes: 13 additions & 3 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ void blk_sync_queue(struct request_queue *q)
int i;

queue_for_each_hw_ctx(q, hctx, i) {
cancel_delayed_work_sync(&hctx->run_work);
cancel_work_sync(&hctx->run_work);
cancel_delayed_work_sync(&hctx->delay_work);
}
} else {
Expand Down Expand Up @@ -3097,6 +3097,12 @@ int kblockd_schedule_work(struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);

int kblockd_schedule_work_on(int cpu, struct work_struct *work)
{
return queue_work_on(cpu, kblockd_workqueue, work);
}
EXPORT_SYMBOL(kblockd_schedule_work_on);

int kblockd_schedule_delayed_work(struct delayed_work *dwork,
unsigned long delay)
{
Expand Down Expand Up @@ -3301,19 +3307,23 @@ bool blk_poll(struct request_queue *q, blk_qc_t cookie)
{
struct blk_plug *plug;
long state;
unsigned int queue_num;
struct blk_mq_hw_ctx *hctx;

if (!q->mq_ops || !q->mq_ops->poll || !blk_qc_t_valid(cookie) ||
!test_bit(QUEUE_FLAG_POLL, &q->queue_flags))
return false;

queue_num = blk_qc_t_to_queue_num(cookie);
hctx = q->queue_hw_ctx[queue_num];
hctx->poll_considered++;

plug = current->plug;
if (plug)
blk_flush_plug_list(plug, false);

state = current->state;
while (!need_resched()) {
unsigned int queue_num = blk_qc_t_to_queue_num(cookie);
struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[queue_num];
int ret;

hctx->poll_invoked++;
Expand Down
40 changes: 25 additions & 15 deletions block/blk-mq-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,17 @@ static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)

static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return sprintf(page, "invoked=%lu, success=%lu\n", hctx->poll_invoked, hctx->poll_success);
return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
hctx->poll_considered, hctx->poll_invoked,
hctx->poll_success);
}

static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
const char *page, size_t size)
{
hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;

return size;
}

static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
Expand All @@ -198,12 +208,14 @@ static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,

page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);

for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
unsigned long d = 1U << (i - 1);
for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
unsigned int d = 1U << (i - 1);

page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
}

page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
hctx->dispatched[i]);
return page - start_page;
}

Expand Down Expand Up @@ -301,8 +313,9 @@ static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
.show = blk_mq_hw_sysfs_cpus_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
.attr = {.name = "io_poll", .mode = S_IRUGO },
.attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
.show = blk_mq_hw_sysfs_poll_show,
.store = blk_mq_hw_sysfs_poll_store,
};

static struct attribute *default_hw_ctx_attrs[] = {
Expand Down Expand Up @@ -380,9 +393,8 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
}

static void __blk_mq_unregister_disk(struct gendisk *disk)
static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
int i, j;
Expand All @@ -400,15 +412,15 @@ static void __blk_mq_unregister_disk(struct gendisk *disk)
kobject_del(&q->mq_kobj);
kobject_put(&q->mq_kobj);

kobject_put(&disk_to_dev(disk)->kobj);
kobject_put(&dev->kobj);

q->mq_sysfs_init_done = false;
}

void blk_mq_unregister_disk(struct gendisk *disk)
void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
{
blk_mq_disable_hotplug();
__blk_mq_unregister_disk(disk);
__blk_mq_unregister_dev(dev, q);
blk_mq_enable_hotplug();
}

Expand All @@ -430,10 +442,8 @@ static void blk_mq_sysfs_init(struct request_queue *q)
}
}

int blk_mq_register_disk(struct gendisk *disk)
int blk_mq_register_dev(struct device *dev, struct request_queue *q)
{
struct device *dev = disk_to_dev(disk);
struct request_queue *q = disk->queue;
struct blk_mq_hw_ctx *hctx;
int ret, i;

Expand All @@ -454,15 +464,15 @@ int blk_mq_register_disk(struct gendisk *disk)
}

if (ret)
__blk_mq_unregister_disk(disk);
__blk_mq_unregister_dev(dev, q);
else
q->mq_sysfs_init_done = true;
out:
blk_mq_enable_hotplug();

return ret;
}
EXPORT_SYMBOL_GPL(blk_mq_register_disk);
EXPORT_SYMBOL_GPL(blk_mq_register_dev);

void blk_mq_sysfs_unregister(struct request_queue *q)
{
Expand Down
Loading

0 comments on commit 513a4be

Please sign in to comment.