Skip to content

Commit

Permalink
Merge branch 'for-3.6/drivers' of git://git.kernel.dk/linux-block
Browse files Browse the repository at this point in the history
Pull block driver changes from Jens Axboe:

 - Making the plugging support for drivers a bit more sane from Neil.
   This supersedes the plugging change from Shaohua as well.

 - The usual round of drbd updates.

 - Using a tail add instead of a head add in the request completion for
   ndb, making us find the most completed request more quickly.

 - A few floppy changes, getting rid of a duplicated flag and also
   running the floppy init async (since it takes forever in boot terms)
   from Andi.

* 'for-3.6/drivers' of git://git.kernel.dk/linux-block:
  floppy: remove duplicated flag FD_RAW_NEED_DISK
  blk: pass from_schedule to non-request unplug functions.
  block: stack unplug
  blk: centralize non-request unplug handling.
  md: remove plug_cnt feature of plugging.
  block/nbd: micro-optimization in nbd request completion
  drbd: announce FLUSH/FUA capability to upper layers
  drbd: fix max_bio_size to be unsigned
  drbd: flush drbd work queue before invalidate/invalidate remote
  drbd: fix potential access after free
  drbd: call local-io-error handler early
  drbd: do not reset rs_pending_cnt too early
  drbd: reset congestion information before reporting it in /proc/drbd
  drbd: report congestion if we are waiting for some userland callback
  drbd: differentiate between normal and forced detach
  drbd: cleanup, remove two unused global flags
  floppy: Run floppy initialization asynchronous
  • Loading branch information
torvalds committed Aug 1, 2012
2 parents 8cf1a3f + 10af813 commit eff0d13
Show file tree
Hide file tree
Showing 19 changed files with 238 additions and 177 deletions.
44 changes: 34 additions & 10 deletions block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -2909,23 +2909,47 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,

}

static void flush_plug_callbacks(struct blk_plug *plug)
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
{
LIST_HEAD(callbacks);

if (list_empty(&plug->cb_list))
return;

list_splice_init(&plug->cb_list, &callbacks);
while (!list_empty(&plug->cb_list)) {
list_splice_init(&plug->cb_list, &callbacks);

while (!list_empty(&callbacks)) {
struct blk_plug_cb *cb = list_first_entry(&callbacks,
while (!list_empty(&callbacks)) {
struct blk_plug_cb *cb = list_first_entry(&callbacks,
struct blk_plug_cb,
list);
list_del(&cb->list);
cb->callback(cb);
list_del(&cb->list);
cb->callback(cb, from_schedule);
}
}
}

struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug, void *data,
int size)
{
struct blk_plug *plug = current->plug;
struct blk_plug_cb *cb;

if (!plug)
return NULL;

list_for_each_entry(cb, &plug->cb_list, list)
if (cb->callback == unplug && cb->data == data)
return cb;

/* Not currently on the callback list */
BUG_ON(size < sizeof(*cb));
cb = kzalloc(size, GFP_ATOMIC);
if (cb) {
cb->data = data;
cb->callback = unplug;
list_add(&cb->list, &plug->cb_list);
}
return cb;
}
EXPORT_SYMBOL(blk_check_plugged);

void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
Expand All @@ -2937,7 +2961,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)

BUG_ON(plug->magic != PLUG_MAGIC);

flush_plug_callbacks(plug);
flush_plug_callbacks(plug, from_schedule);
if (list_empty(&plug->list))
return;

Expand Down
8 changes: 6 additions & 2 deletions drivers/block/drbd/drbd_actlog.c
Original file line number Diff line number Diff line change
Expand Up @@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
+ mdev->ldev->md.al_offset + mdev->al_tr_pos;

if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE))
drbd_chk_io_error(mdev, 1, true);
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);

if (++mdev->al_tr_pos >
div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT))
Expand Down Expand Up @@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size,
unsigned int enr, count = 0;
struct lc_element *e;

if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
/* this should be an empty REQ_FLUSH */
if (size == 0)
return 0;

if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
dev_err(DEV, "sector: %llus, size: %d\n",
(unsigned long long)sector, size);
return 0;
Expand Down
4 changes: 2 additions & 2 deletions drivers/block/drbd/drbd_bitmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w

if (ctx->error) {
dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
drbd_chk_io_error(mdev, 1, true);
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
err = -EIO; /* ctx->error ? */
}

Expand Down Expand Up @@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);

if (ctx->error)
drbd_chk_io_error(mdev, 1, true);
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
/* that should force detach, so the in memory bitmap will be
* gone in a moment as well. */

Expand Down
44 changes: 31 additions & 13 deletions drivers/block/drbd/drbd_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -813,7 +813,6 @@ enum {
SIGNAL_ASENDER, /* whether asender wants to be interrupted */
SEND_PING, /* whether asender should send a ping asap */

UNPLUG_QUEUED, /* only relevant with kernel 2.4 */
UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */
MD_DIRTY, /* current uuids and flags not yet on disk */
DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */
Expand All @@ -824,7 +823,6 @@ enum {
CRASHED_PRIMARY, /* This node was a crashed primary.
* Gets cleared when the state.conn
* goes into C_CONNECTED state. */
NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC,

MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
Expand All @@ -834,6 +832,7 @@ enum {
BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
WAS_IO_ERROR, /* Local disk failed returned IO error */
FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */

Expand All @@ -851,6 +850,13 @@ enum {
AL_SUSPENDED, /* Activity logging is currently suspended. */
AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */
STATE_SENT, /* Do not change state/UUIDs while this is set */

CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC)
* pending, from drbd worker context.
* If set, bdi_write_congested() returns true,
* so shrink_page_list() would not recurse into,
* and potentially deadlock on, this drbd worker.
*/
};

struct drbd_bitmap; /* opaque for drbd_conf */
Expand Down Expand Up @@ -1130,8 +1136,8 @@ struct drbd_conf {
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
int rs_planed; /* resync sectors already planned */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
int peer_max_bio_size;
int local_max_bio_size;
unsigned int peer_max_bio_size;
unsigned int local_max_bio_size;
};

static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
Expand Down Expand Up @@ -1435,9 +1441,9 @@ struct bm_extent {
* hash table. */
#define HT_SHIFT 8
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */
#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */

#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */
#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */

/* Number of elements in the app_reads_hash */
#define APP_R_HSIZE 15
Expand Down Expand Up @@ -1840,12 +1846,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED);
}

enum drbd_force_detach_flags {
DRBD_IO_ERROR,
DRBD_META_IO_ERROR,
DRBD_FORCE_DETACH,
};

#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
enum drbd_force_detach_flags forcedetach,
const char *where)
{
switch (mdev->ldev->dc.on_io_error) {
case EP_PASS_ON:
if (!forcedetach) {
if (forcedetach == DRBD_IO_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT)
Expand All @@ -1856,6 +1870,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
case EP_DETACH:
case EP_CALL_HELPER:
set_bit(WAS_IO_ERROR, &mdev->flags);
if (forcedetach == DRBD_FORCE_DETACH)
set_bit(FORCE_DETACH, &mdev->flags);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV,
Expand All @@ -1875,7 +1891,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
*/
#define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
static inline void drbd_chk_io_error_(struct drbd_conf *mdev,
int error, int forcedetach, const char *where)
int error, enum drbd_force_detach_flags forcedetach, const char *where)
{
if (error) {
unsigned long flags;
Expand Down Expand Up @@ -2405,15 +2421,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt);

D_ASSERT(ap_bio >= 0);

if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
}

/* this currently does wake_up for every dec_ap_bio!
* maybe rather introduce some type of hysteresis?
* e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */
if (ap_bio < mxb)
wake_up(&mdev->misc_wait);
if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
}
}

static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val)
Expand Down
65 changes: 49 additions & 16 deletions drivers/block/drbd/drbd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -1514,6 +1514,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,

/* Do not change the order of the if above and the two below... */
if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */
/* we probably will start a resync soon.
* make sure those things are properly reset. */
mdev->rs_total = 0;
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
drbd_rs_cancel_all(mdev);

drbd_send_uuids(mdev);
drbd_send_state(mdev, ns);
}
Expand Down Expand Up @@ -1630,9 +1637,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
eh = mdev->ldev->dc.on_io_error;
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);

/* Immediately allow completion of all application IO, that waits
for completion from the local disk. */
tl_abort_disk_io(mdev);
if (was_io_error && eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error");

/* Immediately allow completion of all application IO,
* that waits for completion from the local disk,
* if this was a force-detach due to disk_timeout
* or administrator request (drbdsetup detach --force).
* Do NOT abort otherwise.
* Aborting local requests may cause serious problems,
* if requests are completed to upper layers already,
* and then later the already submitted local bio completes.
* This can cause DMA into former bio pages that meanwhile
* have been re-used for other things.
* So aborting local requests may cause crashes,
* or even worse, silent data corruption.
*/
if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
tl_abort_disk_io(mdev);

/* current state still has to be D_FAILED,
* there is only one way out: to D_DISKLESS,
Expand All @@ -1653,9 +1675,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
drbd_md_sync(mdev);
}
put_ldev(mdev);

if (was_io_error && eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error");
}

/* second half of local IO error, failure to attach,
Expand All @@ -1669,10 +1688,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
"ASSERT FAILED: disk is %s while going diskless\n",
drbd_disk_str(mdev->state.disk));

mdev->rs_total = 0;
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);

if (ns.conn >= C_CONNECTED)
drbd_send_state(mdev, ns);

Expand Down Expand Up @@ -2194,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
{
struct p_sizes p;
sector_t d_size, u_size;
int q_order_type, max_bio_size;
int q_order_type;
unsigned int max_bio_size;
int ok;

if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
Expand All @@ -2203,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
u_size = mdev->ldev->dc.disk_size;
q_order_type = drbd_queue_order_type(mdev);
max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
put_ldev(mdev);
} else {
d_size = 0;
Expand All @@ -2214,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl

/* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
if (mdev->agreed_pro_version <= 94)
max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);

p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size);
Expand Down Expand Up @@ -3541,6 +3557,22 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out;
}

if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
* currently blocked waiting for that usermode helper to
* finish.
*/
if (!get_ldev_if_state(mdev, D_UP_TO_DATE))
r |= (1 << BDI_sync_congested);
else
put_ldev(mdev);
r &= bdi_bits;
reason = 'c';
goto out;
}

if (get_ldev(mdev)) {
q = bdev_get_queue(mdev->ldev->backing_bdev);
r = bdi_congested(&q->backing_dev_info, bdi_bits);
Expand Down Expand Up @@ -3604,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
q->backing_dev_info.congested_data = mdev;

blk_queue_make_request(q, drbd_make_request);
blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
/* Setting the max_hw_sectors to an odd value of 8kibyte here
This triggers a max_bio_size message upon first attach or connect */
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
Expand Down Expand Up @@ -3870,7 +3903,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
/* this was a try anyways ... */
dev_err(DEV, "meta data update failed!\n");
drbd_chk_io_error(mdev, 1, true);
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
}

/* Update mdev->ldev->md.la_size_sect,
Expand Down Expand Up @@ -3950,9 +3983,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)

spin_lock_irq(&mdev->req_lock);
if (mdev->state.conn < C_CONNECTED) {
int peer;
unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
mdev->peer_max_bio_size = peer;
}
spin_unlock_irq(&mdev->req_lock);
Expand Down
Loading

0 comments on commit eff0d13

Please sign in to comment.