Skip to content

Commit

Permalink
Merge tag 'for-linus-20180616' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block fixes from Jens Axboe:
 "A collection of fixes that should go into -rc1. This contains:

   - bsg_open vs bsg_unregister race fix (Anatoliy)

   - NVMe pull request from Christoph, with fixes for regressions in
     this window, FC connect/reconnect path code unification, and a
     trace point addition.

   - timeout fix (Christoph)

   - remove a few unused functions (Christoph)

   - blk-mq tag_set reinit fix (Roman)"

* tag 'for-linus-20180616' of git://git.kernel.dk/linux-block:
  bsg: fix race of bsg_open and bsg_unregister
  block: remov blk_queue_invalidate_tags
  nvme-fabrics: fix and refine state checks in __nvmf_check_ready
  nvme-fabrics: handle the admin-only case properly in nvmf_check_ready
  nvme-fabrics: refactor queue ready check
  blk-mq: remove blk_mq_tagset_iter
  nvme: remove nvme_reinit_tagset
  nvme-fc: fix nulling of queue data on reconnect
  nvme-fc: remove reinit_request routine
  blk-mq: don't time out requests again that are in the timeout handler
  nvme-fc: change controllers first connect to use reconnect path
  nvme: don't rely on the changed namespace list log
  nvmet: free smart-log buffer after use
  nvme-rdma: fix error flow during mapping request data
  nvme: add bio remapping tracepoint
  nvme: fix NULL pointer dereference in nvme_init_subsystem
  blk-mq: reinit q->tag_set_list entry only after grace period
  • Loading branch information
torvalds committed Jun 16, 2018
2 parents 5e7b921 + d6c7396 commit 265c559
Show file tree
Hide file tree
Showing 16 changed files with 174 additions and 275 deletions.
15 changes: 1 addition & 14 deletions Documentation/block/biodoc.txt
Original file line number Diff line number Diff line change
Expand Up @@ -752,18 +752,6 @@ completion of the request to the block layer. This means ending tag
operations before calling end_that_request_last()! For an example of a user
of these helpers, see the IDE tagged command queueing support.

Certain hardware conditions may dictate a need to invalidate the block tag
queue. For instance, on IDE any tagged request error needs to clear both
the hardware and software block queue and enable the driver to sanely restart
all the outstanding requests. There's a third helper to do that:

blk_queue_invalidate_tags(struct request_queue *q)

Clear the internal block tag queue and re-add all the pending requests
to the request queue. The driver will receive them again on the
next request_fn run, just like it did the first time it encountered
them.

3.2.5.2 Tag info

Some block functions exist to query current tag status or to go from a
Expand Down Expand Up @@ -805,8 +793,7 @@ Internally, block manages tags in the blk_queue_tag structure:
Most of the above is simple and straight forward, however busy_list may need
a bit of explaining. Normally we don't care too much about request ordering,
but in the event of any barrier requests in the tag queue we need to ensure
that requests are restarted in the order they were queue. This may happen
if the driver needs to use blk_queue_invalidate_tags().
that requests are restarted in the order they were queue.

3.3 I/O Submission

Expand Down
29 changes: 0 additions & 29 deletions block/blk-mq-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,35 +311,6 @@ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
}
EXPORT_SYMBOL(blk_mq_tagset_busy_iter);

int blk_mq_tagset_iter(struct blk_mq_tag_set *set, void *data,
int (fn)(void *, struct request *))
{
int i, j, ret = 0;

if (WARN_ON_ONCE(!fn))
goto out;

for (i = 0; i < set->nr_hw_queues; i++) {
struct blk_mq_tags *tags = set->tags[i];

if (!tags)
continue;

for (j = 0; j < tags->nr_tags; j++) {
if (!tags->static_rqs[j])
continue;

ret = fn(data, tags->static_rqs[j]);
if (ret)
goto out;
}
}

out:
return ret;
}
EXPORT_SYMBOL_GPL(blk_mq_tagset_iter);

void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv)
{
Expand Down
8 changes: 6 additions & 2 deletions block/blk-mq.c
Original file line number Diff line number Diff line change
Expand Up @@ -671,6 +671,7 @@ static void __blk_mq_requeue_request(struct request *rq)

if (blk_mq_request_started(rq)) {
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
rq->rq_flags &= ~RQF_TIMED_OUT;
if (q->dma_drain_size && blk_rq_bytes(rq))
rq->nr_phys_segments--;
}
Expand Down Expand Up @@ -770,6 +771,7 @@ EXPORT_SYMBOL(blk_mq_tag_to_rq);

static void blk_mq_rq_timed_out(struct request *req, bool reserved)
{
req->rq_flags |= RQF_TIMED_OUT;
if (req->q->mq_ops->timeout) {
enum blk_eh_timer_return ret;

Expand All @@ -779,6 +781,7 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
}

req->rq_flags &= ~RQF_TIMED_OUT;
blk_add_timer(req);
}

Expand All @@ -788,6 +791,8 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)

if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT)
return false;
if (rq->rq_flags & RQF_TIMED_OUT)
return false;

deadline = blk_rq_deadline(rq);
if (time_after_eq(jiffies, deadline))
Expand Down Expand Up @@ -2349,16 +2354,15 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)

mutex_lock(&set->tag_list_lock);
list_del_rcu(&q->tag_set_list);
INIT_LIST_HEAD(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
/* update existing queue */
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);

synchronize_rcu();
INIT_LIST_HEAD(&q->tag_set_list);
}

static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
Expand Down
22 changes: 0 additions & 22 deletions block/blk-tag.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
*/
q->queue_tags = tags;
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
}
EXPORT_SYMBOL(blk_queue_init_tags);
Expand Down Expand Up @@ -374,27 +373,6 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
rq->tag = tag;
bqt->tag_index[tag] = rq;
blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}
EXPORT_SYMBOL(blk_queue_start_tag);

/**
* blk_queue_invalidate_tags - invalidate all pending tags
* @q: the request queue for the device
*
* Description:
* Hardware conditions may dictate a need to stop all pending requests.
* In this case, we will safely clear the block side of the tag queue and
* readd all requests to the request queue in the right order.
**/
void blk_queue_invalidate_tags(struct request_queue *q)
{
struct list_head *tmp, *n;

lockdep_assert_held(q->queue_lock);

list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp));
}
EXPORT_SYMBOL(blk_queue_invalidate_tags);
22 changes: 11 additions & 11 deletions block/bsg.c
Original file line number Diff line number Diff line change
Expand Up @@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
struct bsg_device *bd;
unsigned char buf[32];

lockdep_assert_held(&bsg_mutex);

if (!blk_get_queue(rq))
return ERR_PTR(-ENXIO);

Expand All @@ -707,22 +709,20 @@ static struct bsg_device *bsg_add_device(struct inode *inode,
bsg_set_block(bd, file);

atomic_set(&bd->ref_count, 1);
mutex_lock(&bsg_mutex);
hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));

strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
bsg_dbg(bd, "bound to <%s>, max queue %d\n",
format_dev_t(buf, inode->i_rdev), bd->max_queue);

mutex_unlock(&bsg_mutex);
return bd;
}

static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
{
struct bsg_device *bd;

mutex_lock(&bsg_mutex);
lockdep_assert_held(&bsg_mutex);

hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
if (bd->queue == q) {
Expand All @@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
}
bd = NULL;
found:
mutex_unlock(&bsg_mutex);
return bd;
}

Expand All @@ -746,17 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
*/
mutex_lock(&bsg_mutex);
bcd = idr_find(&bsg_minor_idr, iminor(inode));
mutex_unlock(&bsg_mutex);

if (!bcd)
return ERR_PTR(-ENODEV);
if (!bcd) {
bd = ERR_PTR(-ENODEV);
goto out_unlock;
}

bd = __bsg_get_device(iminor(inode), bcd->queue);
if (bd)
return bd;

bd = bsg_add_device(inode, bcd->queue, file);
if (!bd)
bd = bsg_add_device(inode, bcd->queue, file);

out_unlock:
mutex_unlock(&bsg_mutex);
return bd;
}

Expand Down
48 changes: 12 additions & 36 deletions drivers/nvme/host/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2208,7 +2208,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
* Verify that the subsystem actually supports multiple
* controllers, else bail out.
*/
if (!ctrl->opts->discovery_nqn &&
if (!(ctrl->opts && ctrl->opts->discovery_nqn) &&
nvme_active_ctrls(found) && !(id->cmic & (1 << 1))) {
dev_err(ctrl->device,
"ignoring ctrl due to duplicate subnqn (%s).\n",
Expand Down Expand Up @@ -3197,40 +3197,28 @@ static void nvme_scan_ns_sequential(struct nvme_ctrl *ctrl, unsigned nn)
nvme_remove_invalid_namespaces(ctrl, nn);
}

static bool nvme_scan_changed_ns_log(struct nvme_ctrl *ctrl)
static void nvme_clear_changed_ns_log(struct nvme_ctrl *ctrl)
{
size_t log_size = NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32);
__le32 *log;
int error, i;
bool ret = false;
int error;

log = kzalloc(log_size, GFP_KERNEL);
if (!log)
return false;
return;

/*
* We need to read the log to clear the AEN, but we don't want to rely
* on it for the changed namespace information as userspace could have
* raced with us in reading the log page, which could cause us to miss
* updates.
*/
error = nvme_get_log(ctrl, NVME_LOG_CHANGED_NS, log, log_size);
if (error) {
if (error)
dev_warn(ctrl->device,
"reading changed ns log failed: %d\n", error);
goto out_free_log;
}

if (log[0] == cpu_to_le32(0xffffffff))
goto out_free_log;

for (i = 0; i < NVME_MAX_CHANGED_NAMESPACES; i++) {
u32 nsid = le32_to_cpu(log[i]);

if (nsid == 0)
break;
dev_info(ctrl->device, "rescanning namespace %d.\n", nsid);
nvme_validate_ns(ctrl, nsid);
}
ret = true;

out_free_log:
kfree(log);
return ret;
}

static void nvme_scan_work(struct work_struct *work)
Expand All @@ -3246,9 +3234,8 @@ static void nvme_scan_work(struct work_struct *work)
WARN_ON_ONCE(!ctrl->tagset);

if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
if (nvme_scan_changed_ns_log(ctrl))
goto out_sort_namespaces;
dev_info(ctrl->device, "rescanning namespaces.\n");
nvme_clear_changed_ns_log(ctrl);
}

if (nvme_identify_ctrl(ctrl, &id))
Expand All @@ -3263,7 +3250,6 @@ static void nvme_scan_work(struct work_struct *work)
nvme_scan_ns_sequential(ctrl, nn);
out_free_id:
kfree(id);
out_sort_namespaces:
down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
up_write(&ctrl->namespaces_rwsem);
Expand Down Expand Up @@ -3641,16 +3627,6 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_start_queues);

int nvme_reinit_tagset(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set)
{
if (!ctrl->ops->reinit_request)
return 0;

return blk_mq_tagset_iter(set, set->driver_data,
ctrl->ops->reinit_request);
}
EXPORT_SYMBOL_GPL(nvme_reinit_tagset);

int __init nvme_core_init(void)
{
int result = -ENOMEM;
Expand Down
Loading

0 comments on commit 265c559

Please sign in to comment.