Skip to content

Commit

Permalink
block: Use blk_queue_flag_*() in drivers instead of queue_flag_*()
Browse files Browse the repository at this point in the history
This patch has been generated as follows:

for verb in set_unlocked clear_unlocked set clear; do
  replace-in-files queue_flag_${verb} blk_queue_flag_${verb%_unlocked} \
    $(git grep -lw queue_flag_${verb} drivers block/bsg*)
done

Except for protecting all queue flag changes with the queue lock
this patch does not change any functionality.

Cc: Mike Snitzer <[email protected]>
Cc: Shaohua Li <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Hannes Reinecke <[email protected]>
Cc: Ming Lei <[email protected]>
Signed-off-by: Bart Van Assche <[email protected]>
Reviewed-by: Martin K. Petersen <[email protected]>
Reviewed-by: Johannes Thumshirn <[email protected]>
Acked-by: Martin K. Petersen <[email protected]>
Signed-off-by: Jens Axboe <[email protected]>
  • Loading branch information
KAGA-KOKO authored and axboe committed Mar 8, 2018
1 parent bf3a2b3 commit 8b904b5
Show file tree
Hide file tree
Showing 38 changed files with 89 additions and 89 deletions.
4 changes: 2 additions & 2 deletions block/bsg-lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -275,8 +275,8 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,

q->queuedata = dev;
q->bsg_job_fn = job_fn;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_flag_set(QUEUE_FLAG_BIDI, q);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_softirq_done(q, bsg_softirq_done);
blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);

Expand Down
4 changes: 2 additions & 2 deletions drivers/block/drbd/drbd_nl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1212,10 +1212,10 @@ static void decide_on_discard_support(struct drbd_device *device,
* topology on all peers. */
blk_queue_discard_granularity(q, 512);
q->limits.max_discard_sectors = drbd_max_discard_sectors(connection);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection);
} else {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
blk_queue_discard_granularity(q, 0);
q->limits.max_discard_sectors = 0;
q->limits.max_write_zeroes_sectors = 0;
Expand Down
10 changes: 5 additions & 5 deletions drivers/block/loop.c
Original file line number Diff line number Diff line change
Expand Up @@ -214,10 +214,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
blk_mq_freeze_queue(lo->lo_queue);
lo->use_dio = use_dio;
if (use_dio) {
queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
} else {
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
}
blk_mq_unfreeze_queue(lo->lo_queue);
Expand Down Expand Up @@ -817,7 +817,7 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
return;
}

Expand All @@ -826,7 +826,7 @@ static void loop_config_discard(struct loop_device *lo)

blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
}

static void loop_unprepare_queue(struct loop_device *lo)
Expand Down Expand Up @@ -1808,7 +1808,7 @@ static int loop_add(struct loop_device **l, int i)
* page. For directio mode, merge does help to dispatch bigger request
* to underlayer disk. We will enable merge once directio is enabled.
*/
queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);

err = -ENOMEM;
disk = lo->lo_disk = alloc_disk(1 << part_shift);
Expand Down
8 changes: 4 additions & 4 deletions drivers/block/nbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -964,7 +964,7 @@ static void nbd_parse_flags(struct nbd_device *nbd)
else
set_disk_ro(nbd->disk, false);
if (config->flags & NBD_FLAG_SEND_TRIM)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue);
if (config->flags & NBD_FLAG_SEND_FLUSH) {
if (config->flags & NBD_FLAG_SEND_FUA)
blk_queue_write_cache(nbd->disk->queue, true, true);
Expand Down Expand Up @@ -1040,7 +1040,7 @@ static void nbd_config_put(struct nbd_device *nbd)
nbd->config = NULL;

nbd->tag_set.timeout = 0;
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue);

mutex_unlock(&nbd->config_lock);
nbd_put(nbd);
Expand Down Expand Up @@ -1488,8 +1488,8 @@ static int nbd_dev_add(int index)
/*
* Tell the block layer that we are not a rotational device
*/
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
disk->queue->limits.discard_granularity = 512;
blk_queue_max_discard_sectors(disk->queue, UINT_MAX);
blk_queue_max_segment_size(disk->queue, UINT_MAX);
Expand Down
6 changes: 3 additions & 3 deletions drivers/block/null_blk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1525,7 +1525,7 @@ static void null_config_discard(struct nullb *nullb)
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
}

static int null_open(struct block_device *bdev, fmode_t mode)
Expand Down Expand Up @@ -1810,8 +1810,8 @@ static int null_add_dev(struct nullb_device *dev)
}

nullb->q->queuedata = nullb;
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q);

mutex_lock(&lock);
nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL);
Expand Down
4 changes: 2 additions & 2 deletions drivers/block/rbd.c
Original file line number Diff line number Diff line change
Expand Up @@ -4370,7 +4370,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
goto out_tag_set;
}

queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */

/* set io sizes to object size */
Expand All @@ -4383,7 +4383,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
blk_queue_io_opt(q, segment_size);

/* enable the discard support */
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
q->limits.discard_granularity = segment_size;
blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE);
Expand Down
6 changes: 3 additions & 3 deletions drivers/block/rsxx/dev.c
Original file line number Diff line number Diff line change
Expand Up @@ -287,10 +287,10 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);

queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
if (rsxx_discard_supported(card)) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
blk_queue_max_discard_sectors(card->queue,
RSXX_HW_BLK_SIZE >> 9);
card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
Expand Down
4 changes: 2 additions & 2 deletions drivers/block/skd_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -2858,8 +2858,8 @@ static int skd_cons_disk(struct skd_device *skdev)
/* set optimal I/O size to 8KB */
blk_queue_io_opt(q, 8192);

queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);

blk_queue_rq_timeout(q, 8 * HZ);

Expand Down
10 changes: 5 additions & 5 deletions drivers/block/xen-blkfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -931,15 +931,15 @@ static void blkif_set_queue_limits(struct blkfront_info *info)
unsigned int segments = info->max_indirect_segments ? :
BLKIF_MAX_SEGMENTS_PER_REQUEST;

queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);

if (info->feature_discard) {
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment;
if (info->feature_secdiscard)
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
}

/* Hard sector size and max sectors impersonate the equiv. hardware. */
Expand Down Expand Up @@ -1610,8 +1610,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
blkif_req(req)->error = BLK_STS_NOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
}
break;
case BLKIF_OP_FLUSH_DISKCACHE:
Expand Down
6 changes: 3 additions & 3 deletions drivers/block/zram/zram_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -1530,8 +1530,8 @@ static int zram_add(void)
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
set_capacity(zram->disk, 0);
/* zram devices sort of resembles non-rotational disks */
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);

/*
* To ensure that we always get PAGE_SIZE aligned
Expand All @@ -1544,7 +1544,7 @@ static int zram_add(void)
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue);

/*
* zram_bio_discard() will clear all logical blocks if logical block
Expand Down
4 changes: 2 additions & 2 deletions drivers/ide/ide-disk.c
Original file line number Diff line number Diff line change
Expand Up @@ -687,8 +687,8 @@ static void ide_disk_setup(ide_drive_t *drive)
queue_max_sectors(q) / 2);

if (ata_id_is_ssd(id)) {
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}

/* calculate drive capacity, and select LBA if possible */
Expand Down
2 changes: 1 addition & 1 deletion drivers/ide/ide-probe.c
Original file line number Diff line number Diff line change
Expand Up @@ -773,7 +773,7 @@ static int ide_init_queue(ide_drive_t *drive)
q->request_fn = do_ide_request;
q->initialize_rq_fn = ide_initialize_rq;
q->cmd_size = sizeof(struct ide_request);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
if (blk_init_allocated_queue(q) < 0) {
blk_cleanup_queue(q);
return 1;
Expand Down
2 changes: 1 addition & 1 deletion drivers/lightnvm/pblk-init.c
Original file line number Diff line number Diff line change
Expand Up @@ -1067,7 +1067,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size;
tqueue->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue);

pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n",
tdisk->disk_name,
Expand Down
16 changes: 8 additions & 8 deletions drivers/md/dm-table.c
Original file line number Diff line number Diff line change
Expand Up @@ -1861,15 +1861,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
q->limits = *limits;

if (!dm_table_supports_discards(t)) {
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
/* Must also clear discard limits... */
q->limits.max_discard_sectors = 0;
q->limits.max_hw_discard_sectors = 0;
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
q->limits.discard_misaligned = 0;
} else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);

if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
wc = true;
Expand All @@ -1879,25 +1879,25 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
blk_queue_write_cache(q, wc, fua);

if (dm_table_supports_dax(t))
queue_flag_set_unlocked(QUEUE_FLAG_DAX, q);
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);

/* Ensure that all underlying devices are non-rotational. */
if (dm_table_all_devices_attribute(t, device_is_nonrot))
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);

if (!dm_table_supports_write_same(t))
q->limits.max_write_same_sectors = 0;
if (!dm_table_supports_write_zeroes(t))
q->limits.max_write_zeroes_sectors = 0;

if (dm_table_all_devices_attribute(t, queue_supports_sg_merge))
queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q);
else
queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q);
blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q);

dm_table_verify_integrity(t);

Expand All @@ -1908,7 +1908,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
* have it set.
*/
if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
}

unsigned int dm_table_get_num_targets(struct dm_table *t)
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/md-linear.c
Original file line number Diff line number Diff line change
Expand Up @@ -138,9 +138,9 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks)
}

if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);

/*
* Here we calculate the device offsets.
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/md.c
Original file line number Diff line number Diff line change
Expand Up @@ -5608,9 +5608,9 @@ int md_run(struct mddev *mddev)
if (mddev->degraded)
nonrot = false;
if (nonrot)
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
mddev->queue->backing_dev_info->congested_data = mddev;
mddev->queue->backing_dev_info->congested_fn = md_congested;
}
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/raid0.c
Original file line number Diff line number Diff line change
Expand Up @@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev)
discard_supported = true;
}
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue);
else
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
}

/* calculate array device size */
Expand Down
6 changes: 3 additions & 3 deletions drivers/md/raid1.c
Original file line number Diff line number Diff line change
Expand Up @@ -1760,7 +1760,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
}
}
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
print_conf(conf);
return err;
}
Expand Down Expand Up @@ -3099,10 +3099,10 @@ static int raid1_run(struct mddev *mddev)

if (mddev->queue) {
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue);
}

Expand Down
6 changes: 3 additions & 3 deletions drivers/md/raid10.c
Original file line number Diff line number Diff line change
Expand Up @@ -1845,7 +1845,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
break;
}
if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);

print_conf(conf);
return err;
Expand Down Expand Up @@ -3844,10 +3844,10 @@ static int raid10_run(struct mddev *mddev)

if (mddev->queue) {
if (discard_supported)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue);
}
/* need to check that every block has at least one working mirror */
Expand Down
4 changes: 2 additions & 2 deletions drivers/md/raid5.c
Original file line number Diff line number Diff line change
Expand Up @@ -7444,10 +7444,10 @@ static int raid5_run(struct mddev *mddev)
if (devices_handle_discard_safely &&
mddev->queue->limits.max_discard_sectors >= (stripe >> 9) &&
mddev->queue->limits.discard_granularity >= stripe)
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_set(QUEUE_FLAG_DISCARD,
mddev->queue);
else
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
mddev->queue);

blk_queue_max_hw_sectors(mddev->queue, UINT_MAX);
Expand Down
Loading

0 comments on commit 8b904b5

Please sign in to comment.