Skip to content

Commit

Permalink
erofs: kill use_vmap module parameter
Browse files Browse the repository at this point in the history
As Christoph said [1],
"vm_map_ram is supposed to generally behave better.  So if
it doesn't please report that that to the arch maintainer
and linux-mm so that they can look into the issue.  Having
user make choices of deep down kernel internals is just
a horrible interface.

Please talk to maintainers of other bits of the kernel
if you see issues and / or need enhancements. "

Let's redo the previous conclusion and kill the vmap
approach.

[1] https://lore.kernel.org/r/[email protected]/
Reported-by: Christoph Hellwig <[email protected]>
Signed-off-by: Gao Xiang <[email protected]>
Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Greg Kroah-Hartman <[email protected]>
  • Loading branch information
Gao Xiang authored and gregkh committed Sep 5, 2019
1 parent e2c71e7 commit 73d0393
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 37 deletions.
4 changes: 0 additions & 4 deletions Documentation/filesystems/erofs.txt
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,6 @@ cache_strategy=%s Select a strategy for cached decompression from now on:
It still does in-place I/O decompression
for the rest compressed physical clusters.

Module parameters
=================
use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0).

On-disk details
===============

Expand Down
46 changes: 13 additions & 33 deletions fs/erofs/decompressor.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,6 @@ struct z_erofs_decompressor {
char *name;
};

static bool use_vmap;
module_param(use_vmap, bool, 0444);
MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)");

static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
struct list_head *pagepool)
{
Expand Down Expand Up @@ -221,32 +217,6 @@ static void copy_from_pcpubuf(struct page **out, const char *dst,
}
}

static void *erofs_vmap(struct page **pages, unsigned int count)
{
int i = 0;

if (use_vmap)
return vmap(pages, count, VM_MAP, PAGE_KERNEL);

while (1) {
void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);

/* retry two more times (totally 3 times) */
if (addr || ++i >= 3)
return addr;
vm_unmap_aliases();
}
return NULL;
}

static void erofs_vunmap(const void *mem, unsigned int count)
{
if (!use_vmap)
vm_unmap_ram(mem, count);
else
vunmap(mem);
}

static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
struct list_head *pagepool)
{
Expand All @@ -255,7 +225,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
const struct z_erofs_decompressor *alg = decompressors + rq->alg;
unsigned int dst_maptype;
void *dst;
int ret;
int ret, i;

if (nrpages_out == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out);
Expand Down Expand Up @@ -293,9 +263,19 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
goto dstmap_out;
}

dst = erofs_vmap(rq->out, nrpages_out);
i = 0;
while (1) {
dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL);

/* retry two more times (totally 3 times) */
if (dst || ++i >= 3)
break;
vm_unmap_aliases();
}

if (!dst)
return -ENOMEM;

dst_maptype = 2;

dstmap_out:
Expand All @@ -304,7 +284,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
if (!dst_maptype)
kunmap_atomic(dst);
else if (dst_maptype == 2)
erofs_vunmap(dst, nrpages_out);
vm_unmap_ram(dst, nrpages_out);
return ret;
}

Expand Down

0 comments on commit 73d0393

Please sign in to comment.