Skip to content

Commit

Permalink
crypto: iaa - Add irq support for the crypto async interface
Browse files Browse the repository at this point in the history
The existing iaa crypto async support provides an implementation that
satisfies the interface but does so in a synchronous manner - it fills
and submits the IDXD descriptor and then waits for it to complete
before returning.  This isn't a problem at the moment, since all
existing callers (e.g. zswap) wrap any asynchronous callees in a
synchronous wrapper anyway.

This change makes the iaa crypto async implementation truly
asynchronous: it fills and submits the IDXD descriptor, then returns
immediately with -EINPROGRESS.  It also sets the descriptor's 'request
completion irq' bit and sets up a callback with the IDXD driver which
is called when the operation completes and the irq fires.  The
existing callers such as zswap use synchronous wrappers to deal with
-EINPROGRESS and so work as expected without any changes.

This mode can be enabled by writing 'async_irq' to the sync_mode
iaa_crypto driver attribute:

  echo async_irq > /sys/bus/dsa/drivers/crypto/sync_mode

Async mode without interrupts (caller must poll) can be enabled by
writing 'async' to it:

  echo async > /sys/bus/dsa/drivers/crypto/sync_mode

The default sync mode can be enabled by writing 'sync' to it:

  echo sync > /sys/bus/dsa/drivers/crypto/sync_mode

The sync_mode value setting at the time the IAA algorithms are
registered is captured in each algorithm's crypto_ctx and used for all
compresses and decompresses when using a given algorithm.

Signed-off-by: Tom Zanussi <[email protected]>
Signed-off-by: Herbert Xu <[email protected]>
  • Loading branch information
tzanussi authored and herbertx committed Dec 15, 2023
1 parent 2ec6761 commit 09646c9
Show file tree
Hide file tree
Showing 2 changed files with 266 additions and 2 deletions.
2 changes: 2 additions & 0 deletions drivers/crypto/intel/iaa/iaa_crypto.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,8 @@ enum iaa_mode {
struct iaa_compression_ctx {
enum iaa_mode mode;
bool verify_compress;
bool async_mode;
bool use_irq;
};

#endif
266 changes: 264 additions & 2 deletions drivers/crypto/intel/iaa/iaa_crypto_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,6 +122,102 @@ static ssize_t verify_compress_store(struct device_driver *driver,
}
static DRIVER_ATTR_RW(verify_compress);

/*
* The iaa crypto driver supports three 'sync' methods determining how
* compressions and decompressions are performed:
*
* - sync: the compression or decompression completes before
* returning. This is the mode used by the async crypto
* interface when the sync mode is set to 'sync' and by
* the sync crypto interface regardless of setting.
*
* - async: the compression or decompression is submitted and returns
* immediately. Completion interrupts are not used so
* the caller is responsible for polling the descriptor
* for completion. This mode is applicable to only the
* async crypto interface and is ignored for anything
* else.
*
* - async_irq: the compression or decompression is submitted and
* returns immediately. Completion interrupts are
* enabled so the caller can wait for the completion and
* yield to other threads. When the compression or
* decompression completes, the completion is signaled
* and the caller awakened. This mode is applicable to
* only the async crypto interface and is ignored for
* anything else.
*
* These modes can be set using the iaa_crypto sync_mode driver
* attribute.
*/

/* Use async mode */
static bool async_mode;
/* Use interrupts */
static bool use_irq;

/**
* set_iaa_sync_mode - Set IAA sync mode
* @name: The name of the sync mode
*
* Make the IAA sync mode named @name the current sync mode used by
* compression/decompression.
*/

static int set_iaa_sync_mode(const char *name)
{
int ret = 0;

if (sysfs_streq(name, "sync")) {
async_mode = false;
use_irq = false;
} else if (sysfs_streq(name, "async")) {
async_mode = true;
use_irq = false;
} else if (sysfs_streq(name, "async_irq")) {
async_mode = true;
use_irq = true;
} else {
ret = -EINVAL;
}

return ret;
}

static ssize_t sync_mode_show(struct device_driver *driver, char *buf)
{
int ret = 0;

if (!async_mode && !use_irq)
ret = sprintf(buf, "%s\n", "sync");
else if (async_mode && !use_irq)
ret = sprintf(buf, "%s\n", "async");
else if (async_mode && use_irq)
ret = sprintf(buf, "%s\n", "async_irq");

return ret;
}

static ssize_t sync_mode_store(struct device_driver *driver,
const char *buf, size_t count)
{
int ret = -EBUSY;

mutex_lock(&iaa_devices_lock);

if (iaa_crypto_enabled)
goto out;

ret = set_iaa_sync_mode(buf);
if (ret == 0)
ret = count;
out:
mutex_unlock(&iaa_devices_lock);

return ret;
}
static DRIVER_ATTR_RW(sync_mode);

static struct iaa_compression_mode *iaa_compression_modes[IAA_COMP_MODES_MAX];

static int find_empty_iaa_compression_mode(void)
Expand Down Expand Up @@ -1001,6 +1097,111 @@ static int deflate_generic_decompress(struct acomp_req *req)
return ret;
}

static int iaa_remap_for_verify(struct device *dev, struct iaa_wq *iaa_wq,
struct acomp_req *req,
dma_addr_t *src_addr, dma_addr_t *dst_addr);

static int iaa_compress_verify(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
dma_addr_t dst_addr, unsigned int *dlen,
u32 compression_crc);

static void iaa_desc_complete(struct idxd_desc *idxd_desc,
enum idxd_complete_type comp_type,
bool free_desc, void *__ctx,
u32 *status)
{
struct iaa_device_compression_mode *active_compression_mode;
struct iaa_compression_ctx *compression_ctx;
struct crypto_ctx *ctx = __ctx;
struct iaa_device *iaa_device;
struct idxd_device *idxd;
struct iaa_wq *iaa_wq;
struct pci_dev *pdev;
struct device *dev;
int ret, err = 0;

compression_ctx = crypto_tfm_ctx(ctx->tfm);

iaa_wq = idxd_wq_get_private(idxd_desc->wq);
iaa_device = iaa_wq->iaa_device;
idxd = iaa_device->idxd;
pdev = idxd->pdev;
dev = &pdev->dev;

active_compression_mode = get_iaa_device_compression_mode(iaa_device,
compression_ctx->mode);
dev_dbg(dev, "%s: compression mode %s,"
" ctx->src_addr %llx, ctx->dst_addr %llx\n", __func__,
active_compression_mode->name,
ctx->src_addr, ctx->dst_addr);

ret = check_completion(dev, idxd_desc->iax_completion,
ctx->compress, false);
if (ret) {
dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
if (!ctx->compress &&
idxd_desc->iax_completion->status == IAA_ANALYTICS_ERROR) {
pr_warn("%s: falling back to deflate-generic decompress, "
"analytics error code %x\n", __func__,
idxd_desc->iax_completion->error_code);
ret = deflate_generic_decompress(ctx->req);
if (ret) {
dev_dbg(dev, "%s: deflate-generic failed ret=%d\n",
__func__, ret);
err = -EIO;
goto err;
}
} else {
err = -EIO;
goto err;
}
} else {
ctx->req->dlen = idxd_desc->iax_completion->output_size;
}

if (ctx->compress && compression_ctx->verify_compress) {
dma_addr_t src_addr, dst_addr;
u32 compression_crc;

compression_crc = idxd_desc->iax_completion->crc;

ret = iaa_remap_for_verify(dev, iaa_wq, ctx->req, &src_addr, &dst_addr);
if (ret) {
dev_dbg(dev, "%s: compress verify remap failed ret=%d\n", __func__, ret);
err = -EIO;
goto out;
}

ret = iaa_compress_verify(ctx->tfm, ctx->req, iaa_wq->wq, src_addr,
ctx->req->slen, dst_addr, &ctx->req->dlen,
compression_crc);
if (ret) {
dev_dbg(dev, "%s: compress verify failed ret=%d\n", __func__, ret);
err = -EIO;
}

dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_TO_DEVICE);
dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_FROM_DEVICE);

goto out;
}
err:
dma_unmap_sg(dev, ctx->req->dst, sg_nents(ctx->req->dst), DMA_FROM_DEVICE);
dma_unmap_sg(dev, ctx->req->src, sg_nents(ctx->req->src), DMA_TO_DEVICE);
out:
if (ret != 0)
dev_dbg(dev, "asynchronous compress failed ret=%d\n", ret);

if (ctx->req->base.complete)
acomp_request_complete(ctx->req, err);

if (free_desc)
idxd_free_desc(idxd_desc->wq, idxd_desc);
iaa_wq_put(idxd_desc->wq);
}

static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
struct idxd_wq *wq,
dma_addr_t src_addr, unsigned int slen,
Expand Down Expand Up @@ -1049,6 +1250,22 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src2_size = sizeof(struct aecs_comp_table_record);
desc->completion_addr = idxd_desc->compl_dma;

if (ctx->use_irq && !disable_async) {
desc->flags |= IDXD_OP_FLAG_RCI;

idxd_desc->crypto.req = req;
idxd_desc->crypto.tfm = tfm;
idxd_desc->crypto.src_addr = src_addr;
idxd_desc->crypto.dst_addr = dst_addr;
idxd_desc->crypto.compress = true;

dev_dbg(dev, "%s use_async_irq: compression mode %s,"
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
} else if (ctx->async_mode && !disable_async)
req->base.data = idxd_desc;

dev_dbg(dev, "%s: compression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
" desc->dst_addr %llx, desc->max_dst_size %d,"
Expand All @@ -1063,6 +1280,12 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,
goto err;
}

if (ctx->async_mode && !disable_async) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
}

ret = check_completion(dev, idxd_desc->iax_completion, true, false);
if (ret) {
dev_dbg(dev, "check_completion failed ret=%d\n", ret);
Expand All @@ -1073,7 +1296,8 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req,

*compression_crc = idxd_desc->iax_completion->crc;

idxd_free_desc(wq, idxd_desc);
if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
out:
return ret;
err:
Expand Down Expand Up @@ -1256,6 +1480,22 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
desc->src1_size = slen;
desc->completion_addr = idxd_desc->compl_dma;

if (ctx->use_irq && !disable_async) {
desc->flags |= IDXD_OP_FLAG_RCI;

idxd_desc->crypto.req = req;
idxd_desc->crypto.tfm = tfm;
idxd_desc->crypto.src_addr = src_addr;
idxd_desc->crypto.dst_addr = dst_addr;
idxd_desc->crypto.compress = false;

dev_dbg(dev, "%s: use_async_irq compression mode %s,"
" src_addr %llx, dst_addr %llx\n", __func__,
active_compression_mode->name,
src_addr, dst_addr);
} else if (ctx->async_mode && !disable_async)
req->base.data = idxd_desc;

dev_dbg(dev, "%s: decompression mode %s,"
" desc->src1_addr %llx, desc->src1_size %d,"
" desc->dst_addr %llx, desc->max_dst_size %d,"
Expand All @@ -1270,6 +1510,12 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,
goto err;
}

if (ctx->async_mode && !disable_async) {
ret = -EINPROGRESS;
dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__);
goto out;
}

ret = check_completion(dev, idxd_desc->iax_completion, false, false);
if (ret) {
dev_dbg(dev, "%s: check_completion failed ret=%d\n", __func__, ret);
Expand All @@ -1292,7 +1538,8 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req,

*dlen = req->dlen;

idxd_free_desc(wq, idxd_desc);
if (!ctx->async_mode)
idxd_free_desc(wq, idxd_desc);
out:
return ret;
err:
Expand Down Expand Up @@ -1601,6 +1848,8 @@ static int iaa_comp_adecompress(struct acomp_req *req)
static void compression_ctx_init(struct iaa_compression_ctx *ctx)
{
ctx->verify_compress = iaa_verify_compress;
ctx->async_mode = async_mode;
ctx->use_irq = use_irq;
}

static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm)
Expand Down Expand Up @@ -1809,6 +2058,7 @@ static struct idxd_device_driver iaa_crypto_driver = {
.remove = iaa_crypto_remove,
.name = IDXD_SUBDRIVER_NAME,
.type = dev_types,
.desc_complete = iaa_desc_complete,
};

static int __init iaa_crypto_init_module(void)
Expand Down Expand Up @@ -1847,10 +2097,20 @@ static int __init iaa_crypto_init_module(void)
goto err_verify_attr_create;
}

ret = driver_create_file(&iaa_crypto_driver.drv,
&driver_attr_sync_mode);
if (ret) {
pr_debug("IAA sync mode attr creation failed\n");
goto err_sync_attr_create;
}

pr_debug("initialized\n");
out:
return ret;

err_sync_attr_create:
driver_remove_file(&iaa_crypto_driver.drv,
&driver_attr_verify_compress);
err_verify_attr_create:
idxd_driver_unregister(&iaa_crypto_driver);
err_driver_reg:
Expand All @@ -1866,6 +2126,8 @@ static void __exit iaa_crypto_cleanup_module(void)
if (iaa_unregister_compression_device())
pr_debug("IAA compression device unregister failed\n");

driver_remove_file(&iaa_crypto_driver.drv,
&driver_attr_sync_mode);
driver_remove_file(&iaa_crypto_driver.drv,
&driver_attr_verify_compress);
idxd_driver_unregister(&iaa_crypto_driver);
Expand Down

0 comments on commit 09646c9

Please sign in to comment.