Skip to content

Commit

Permalink
Merge branch 'for-linus' of git://neil.brown.name/md
Browse files Browse the repository at this point in the history
* 'for-linus' of git://neil.brown.name/md:
  async_tx: fix asynchronous raid6 recovery for ddf layouts
  async_pq: rename scribble page
  async_pq: kill a stray dma_map() call and other cleanups
  md/raid6: kill a gcc-4.0.1 'uninitialized variable' warning
  raid6/async_tx: handle holes in block list in async_syndrome_val
  md/async: don't pass a memory pointer as a page pointer.
  md: Fix handling of raid5 array which is being reshaped to fewer devices.
  md: fix problems with RAID6 calculations for DDF.
  md/raid456: downlevel multicore operations to raid_run_ops
  md: drivers/md/unroll.pl replaced with awk analog
  md: remove clumsy usage of do_sync_mapping_range from bitmap code
  md: raid1/raid10: handle allocation errors during array setup.
  md/raid5: initialize conf->device_lock earlier
  md/raid1/raid10: add a cond_resched
  Revert "md: do not progress the resync process if the stripe was blocked"
  • Loading branch information
torvalds committed Oct 31, 2009
2 parents aefba41 + da17bf4 commit bf699c9
Show file tree
Hide file tree
Showing 15 changed files with 284 additions and 224 deletions.
60 changes: 36 additions & 24 deletions crypto/async_tx/async_pq.c
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,10 @@
#include <linux/async_tx.h>

/**
* scribble - space to hold throwaway P buffer for synchronous gen_syndrome
* pq_scribble_page - space to hold throwaway P or Q buffer for
* synchronous gen_syndrome
*/
static struct page *scribble;

static bool is_raid6_zero_block(struct page *p)
{
return p == (void *) raid6_empty_zero_page;
}
static struct page *pq_scribble_page;

/* the struct page *blocks[] parameter passed to async_gen_syndrome()
* and async_syndrome_val() contains the 'P' destination address at
Expand Down Expand Up @@ -83,7 +79,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
* sources and update the coefficients accordingly
*/
for (i = 0, idx = 0; i < src_cnt; i++) {
if (is_raid6_zero_block(blocks[i]))
if (blocks[i] == NULL)
continue;
dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
DMA_TO_DEVICE);
Expand Down Expand Up @@ -160,9 +156,9 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
srcs = (void **) blocks;

for (i = 0; i < disks; i++) {
if (is_raid6_zero_block(blocks[i])) {
if (blocks[i] == NULL) {
BUG_ON(i > disks - 3); /* P or Q can't be zero */
srcs[i] = blocks[i];
srcs[i] = (void*)raid6_empty_zero_page;
} else
srcs[i] = page_address(blocks[i]) + offset;
}
Expand All @@ -186,10 +182,14 @@ do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
* blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
* PAGE_SIZE as a temporary buffer of this size is used in the
* synchronous path. 'disks' always accounts for both destination
* buffers.
* buffers. If any source buffers (blocks[i] where i < disks - 2) are
* set to NULL those buffers will be replaced with the raid6_zero_page
* in the synchronous path and omitted in the hardware-asynchronous
* path.
*
* 'blocks' note: if submit->scribble is NULL then the contents of
* 'blocks' may be overridden
* 'blocks' may be overwritten to perform address conversions
* (dma_map_page() or page_address()).
*/
struct dma_async_tx_descriptor *
async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
Expand Down Expand Up @@ -227,11 +227,11 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
async_tx_quiesce(&submit->depend_tx);

if (!P(blocks, disks)) {
P(blocks, disks) = scribble;
P(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE);
}
if (!Q(blocks, disks)) {
Q(blocks, disks) = scribble;
Q(blocks, disks) = pq_scribble_page;
BUG_ON(len + offset > PAGE_SIZE);
}
do_sync_gen_syndrome(blocks, offset, disks, len, submit);
Expand Down Expand Up @@ -265,8 +265,10 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
len);
struct dma_device *device = chan ? chan->device : NULL;
struct dma_async_tx_descriptor *tx;
unsigned char coefs[disks-2];
enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
dma_addr_t *dma_src = NULL;
int src_cnt = 0;

BUG_ON(disks < 4);

Expand All @@ -285,22 +287,32 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
__func__, disks, len);
if (!P(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_P;
else
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
if (!Q(blocks, disks))
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
else
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);

if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
for (i = 0; i < disks; i++)
for (i = 0; i < disks-2; i++)
if (likely(blocks[i])) {
BUG_ON(is_raid6_zero_block(blocks[i]));
dma_src[i] = dma_map_page(dev, blocks[i],
offset, len,
DMA_TO_DEVICE);
dma_src[src_cnt] = dma_map_page(dev, blocks[i],
offset, len,
DMA_TO_DEVICE);
coefs[src_cnt] = raid6_gfexp[i];
src_cnt++;
}

for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
disks - 2,
raid6_gfexp,
src_cnt,
coefs,
len, pqres,
dma_flags);
if (likely(tx))
Expand Down Expand Up @@ -373,9 +385,9 @@ EXPORT_SYMBOL_GPL(async_syndrome_val);

static int __init async_pq_init(void)
{
scribble = alloc_page(GFP_KERNEL);
pq_scribble_page = alloc_page(GFP_KERNEL);

if (scribble)
if (pq_scribble_page)
return 0;

pr_err("%s: failed to allocate required spare page\n", __func__);
Expand All @@ -385,7 +397,7 @@ static int __init async_pq_init(void)

static void __exit async_pq_exit(void)
{
put_page(scribble);
put_page(pq_scribble_page);
}

module_init(async_pq_init);
Expand Down
100 changes: 66 additions & 34 deletions crypto/async_tx/async_raid6_recov.c
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
}

static struct dma_async_tx_descriptor *
__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
struct async_submit_ctl *submit)
__2data_recov_4(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *a, *b;
Expand All @@ -143,8 +143,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;

p = blocks[4-2];
q = blocks[4-1];
p = blocks[disks-2];
q = blocks[disks-1];

a = blocks[faila];
b = blocks[failb];
Expand All @@ -170,8 +170,8 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks,
}

static struct dma_async_tx_descriptor *
__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
struct async_submit_ctl *submit)
__2data_recov_5(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit)
{
struct dma_async_tx_descriptor *tx = NULL;
struct page *p, *q, *g, *dp, *dq;
Expand All @@ -181,21 +181,22 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks,
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
int uninitialized_var(good);
int i;
int good_srcs, good, i;

for (i = 0; i < 3; i++) {
good_srcs = 0;
good = -1;
for (i = 0; i < disks-2; i++) {
if (blocks[i] == NULL)
continue;
if (i == faila || i == failb)
continue;
else {
good = i;
break;
}
good = i;
good_srcs++;
}
BUG_ON(i >= 3);
BUG_ON(good_srcs > 1);

p = blocks[5-2];
q = blocks[5-1];
p = blocks[disks-2];
q = blocks[disks-1];
g = blocks[good];

/* Compute syndrome with zero for the missing data pages
Expand Down Expand Up @@ -263,10 +264,10 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb,
* delta p and delta q
*/
dp = blocks[faila];
blocks[faila] = (void *)raid6_empty_zero_page;
blocks[faila] = NULL;
blocks[disks-2] = dp;
dq = blocks[failb];
blocks[failb] = (void *)raid6_empty_zero_page;
blocks[failb] = NULL;
blocks[disks-1] = dq;

init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble);
Expand Down Expand Up @@ -323,6 +324,8 @@ struct dma_async_tx_descriptor *
async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
struct page **blocks, struct async_submit_ctl *submit)
{
int non_zero_srcs, i;

BUG_ON(faila == failb);
if (failb < faila)
swap(faila, failb);
Expand All @@ -334,11 +337,13 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
*/
if (!submit->scribble) {
void **ptrs = (void **) blocks;
int i;

async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
ptrs[i] = page_address(blocks[i]);
if (blocks[i] == NULL)
ptrs[i] = (void *) raid6_empty_zero_page;
else
ptrs[i] = page_address(blocks[i]);

raid6_2data_recov(disks, bytes, faila, failb, ptrs);

Expand All @@ -347,19 +352,30 @@ async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb,
return NULL;
}

switch (disks) {
case 4:
non_zero_srcs = 0;
for (i = 0; i < disks-2 && non_zero_srcs < 4; i++)
if (blocks[i])
non_zero_srcs++;
switch (non_zero_srcs) {
case 0:
case 1:
/* There must be at least 2 sources - the failed devices. */
BUG();

case 2:
/* dma devices do not uniformly understand a zero source pq
* operation (in contrast to the synchronous case), so
* explicitly handle the 4 disk special case
* explicitly handle the special case of a 4 disk array with
* both data disks missing.
*/
return __2data_recov_4(bytes, faila, failb, blocks, submit);
case 5:
return __2data_recov_4(disks, bytes, faila, failb, blocks, submit);
case 3:
/* dma devices do not uniformly understand a single
* source pq operation (in contrast to the synchronous
* case), so explicitly handle the 5 disk special case
* case), so explicitly handle the special case of a 5 disk
* array with 2 of 3 data disks missing.
*/
return __2data_recov_5(bytes, faila, failb, blocks, submit);
return __2data_recov_5(disks, bytes, faila, failb, blocks, submit);
default:
return __2data_recov_n(disks, bytes, faila, failb, blocks, submit);
}
Expand All @@ -385,6 +401,7 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
dma_async_tx_callback cb_fn = submit->cb_fn;
void *cb_param = submit->cb_param;
void *scribble = submit->scribble;
int good_srcs, good, i;
struct page *srcs[2];

pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes);
Expand All @@ -394,11 +411,13 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
*/
if (!scribble) {
void **ptrs = (void **) blocks;
int i;

async_tx_quiesce(&submit->depend_tx);
for (i = 0; i < disks; i++)
ptrs[i] = page_address(blocks[i]);
if (blocks[i] == NULL)
ptrs[i] = (void*)raid6_empty_zero_page;
else
ptrs[i] = page_address(blocks[i]);

raid6_datap_recov(disks, bytes, faila, ptrs);

Expand All @@ -407,21 +426,34 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila,
return NULL;
}

good_srcs = 0;
good = -1;
for (i = 0; i < disks-2; i++) {
if (i == faila)
continue;
if (blocks[i]) {
good = i;
good_srcs++;
if (good_srcs > 1)
break;
}
}
BUG_ON(good_srcs == 0);

p = blocks[disks-2];
q = blocks[disks-1];

/* Compute syndrome with zero for the missing data page
* Use the dead data page as temporary storage for delta q
*/
dq = blocks[faila];
blocks[faila] = (void *)raid6_empty_zero_page;
blocks[faila] = NULL;
blocks[disks-1] = dq;

/* in the 4 disk case we only need to perform a single source
* multiplication
/* in the 4-disk case we only need to perform a single source
* multiplication with the one good data block.
*/
if (disks == 4) {
int good = faila == 0 ? 1 : 0;
if (good_srcs == 1) {
struct page *g = blocks[good];

init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL,
Expand Down
18 changes: 11 additions & 7 deletions crypto/async_tx/async_xor.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,20 +44,23 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list,
void *cb_param_orig = submit->cb_param;
enum async_tx_flags flags_orig = submit->flags;
enum dma_ctrl_flags dma_flags;
int xor_src_cnt;
int xor_src_cnt = 0;
dma_addr_t dma_dest;

/* map the dest bidrectional in case it is re-used as a source */
dma_dest = dma_map_page(dma->dev, dest, offset, len, DMA_BIDIRECTIONAL);
for (i = 0; i < src_cnt; i++) {
/* only map the dest once */
if (!src_list[i])
continue;
if (unlikely(src_list[i] == dest)) {
dma_src[i] = dma_dest;
dma_src[xor_src_cnt++] = dma_dest;
continue;
}
dma_src[i] = dma_map_page(dma->dev, src_list[i], offset,
len, DMA_TO_DEVICE);
dma_src[xor_src_cnt++] = dma_map_page(dma->dev, src_list[i], offset,
len, DMA_TO_DEVICE);
}
src_cnt = xor_src_cnt;

while (src_cnt) {
submit->flags = flags_orig;
Expand Down Expand Up @@ -123,7 +126,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
int src_cnt, size_t len, struct async_submit_ctl *submit)
{
int i;
int xor_src_cnt;
int xor_src_cnt = 0;
int src_off = 0;
void *dest_buf;
void **srcs;
Expand All @@ -135,8 +138,9 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,

/* convert to buffer pointers */
for (i = 0; i < src_cnt; i++)
srcs[i] = page_address(src_list[i]) + offset;

if (src_list[i])
srcs[xor_src_cnt++] = page_address(src_list[i]) + offset;
src_cnt = xor_src_cnt;
/* set destination address */
dest_buf = page_address(dest) + offset;

Expand Down
Loading

0 comments on commit bf699c9

Please sign in to comment.