Skip to content

Commit

Permalink
crypto: scomp - Remove support for some non-trivial SG lists
Browse files Browse the repository at this point in the history
As the only user of acomp/scomp uses a trivial single-page SG
list, remove support for everything else in preprataion for the
addition of virtual address support.

However, keep support for non-trivial source SG lists as that
user is currently jumping through hoops in order to linearise
the source data.

Limit the source SG linearisation buffer to a single page as
that user never goes over that.  The only other potential user
is also unlikely to exceed that (IPComp) and it can easily do
its own linearisation if necessary.

Also keep the destination SG linearisation for IPComp.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
  • Loading branch information
Herbert Xu committed Mar 21, 2025
1 parent 39a3f23 commit 2d3553e
Show file tree
Hide file tree
Showing 4 changed files with 76 additions and 71 deletions.
1 change: 0 additions & 1 deletion crypto/acompress.c
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)

acomp->compress = alg->compress;
acomp->decompress = alg->decompress;
acomp->dst_free = alg->dst_free;
acomp->reqsize = alg->reqsize;

if (alg->exit)
Expand Down
127 changes: 73 additions & 54 deletions crypto/scompress.c
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,10 @@
#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
Expand All @@ -23,9 +25,14 @@

#include "compress.h"

#define SCOMP_SCRATCH_SIZE 65400

struct scomp_scratch {
spinlock_t lock;
void *src;
union {
void *src;
unsigned long saddr;
};
void *dst;
};

Expand Down Expand Up @@ -66,7 +73,7 @@ static void crypto_scomp_free_scratches(void)
for_each_possible_cpu(i) {
scratch = per_cpu_ptr(&scomp_scratch, i);

vfree(scratch->src);
free_page(scratch->saddr);
vfree(scratch->dst);
scratch->src = NULL;
scratch->dst = NULL;
Expand All @@ -79,14 +86,15 @@ static int crypto_scomp_alloc_scratches(void)
int i;

for_each_possible_cpu(i) {
struct page *page;
void *mem;

scratch = per_cpu_ptr(&scomp_scratch, i);

mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
if (!mem)
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, 0);
if (!page)
goto error;
scratch->src = mem;
scratch->src = page_address(page);
mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
if (!mem)
goto error;
Expand Down Expand Up @@ -161,76 +169,88 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)

static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
{
struct scomp_scratch *scratch = raw_cpu_ptr(&scomp_scratch);
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
void **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp **tfm_ctx = acomp_tfm_ctx(tfm);
struct crypto_scomp *scomp = *tfm_ctx;
struct crypto_acomp_stream *stream;
struct scomp_scratch *scratch;
unsigned int slen = req->slen;
unsigned int dlen = req->dlen;
struct page *spage, *dpage;
unsigned int soff, doff;
void *src, *dst;
unsigned int dlen;
unsigned int n;
int ret;

if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
if (!req->src || !slen)
return -EINVAL;

if (req->dst && !req->dlen)
if (!req->dst || !dlen)
return -EINVAL;

if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
req->dlen = SCOMP_SCRATCH_SIZE;

dlen = req->dlen;
soff = req->src->offset;
spage = nth_page(sg_page(req->src), soff / PAGE_SIZE);
soff = offset_in_page(soff);

scratch = raw_cpu_ptr(&scomp_scratch);
spin_lock_bh(&scratch->lock);

if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
src = page_to_virt(sg_page(req->src)) + req->src->offset;
} else {
scatterwalk_map_and_copy(scratch->src, req->src, 0,
req->slen, 0);
n = slen / PAGE_SIZE;
n += (offset_in_page(slen) + soff - 1) / PAGE_SIZE;
if (slen <= req->src->length && (!PageHighMem(nth_page(spage, n)) ||
size_add(soff, slen) <= PAGE_SIZE))
src = kmap_local_page(spage) + soff;
else
src = scratch->src;
}

if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
else
doff = req->dst->offset;
dpage = nth_page(sg_page(req->dst), doff / PAGE_SIZE);
doff = offset_in_page(doff);

n = dlen / PAGE_SIZE;
n += (offset_in_page(dlen) + doff - 1) / PAGE_SIZE;
if (dlen <= req->dst->length && (!PageHighMem(nth_page(dpage, n)) ||
size_add(doff, dlen) <= PAGE_SIZE))
dst = kmap_local_page(dpage) + doff;
else {
if (dlen > SCOMP_SCRATCH_SIZE)
dlen = SCOMP_SCRATCH_SIZE;
dst = scratch->dst;
}

spin_lock_bh(&scratch->lock);

if (src == scratch->src)
memcpy_from_sglist(src, req->src, 0, slen);

stream = raw_cpu_ptr(crypto_scomp_alg(scomp)->stream);
spin_lock(&stream->lock);
if (dir)
ret = crypto_scomp_compress(scomp, src, req->slen,
dst, &req->dlen, stream->ctx);
ret = crypto_scomp_compress(scomp, src, slen,
dst, &dlen, stream->ctx);
else
ret = crypto_scomp_decompress(scomp, src, req->slen,
dst, &req->dlen, stream->ctx);
ret = crypto_scomp_decompress(scomp, src, slen,
dst, &dlen, stream->ctx);

if (dst == scratch->dst)
memcpy_to_sglist(req->dst, 0, dst, dlen);

spin_unlock(&stream->lock);
if (!ret) {
if (!req->dst) {
req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
if (!req->dst) {
ret = -ENOMEM;
goto out;
}
} else if (req->dlen > dlen) {
ret = -ENOSPC;
goto out;
}
if (dst == scratch->dst) {
scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
req->dlen, 1);
} else {
int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
int i;
struct page *dst_page = sg_page(req->dst);

for (i = 0; i < nr_pages; i++)
flush_dcache_page(dst_page + i);
spin_unlock_bh(&scratch->lock);

req->dlen = dlen;

if (dst != scratch->dst) {
kunmap_local(dst);
dlen += doff;
for (;;) {
flush_dcache_page(dpage);
if (dlen <= PAGE_SIZE)
break;
dlen -= PAGE_SIZE;
dpage = nth_page(dpage, 1);
}
}
out:
spin_unlock_bh(&scratch->lock);
if (src != scratch->src)
kunmap_local(src);

return ret;
}

Expand Down Expand Up @@ -277,7 +297,6 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)

crt->compress = scomp_acomp_compress;
crt->decompress = scomp_acomp_decompress;
crt->dst_free = sgl_free;

return 0;
}
Expand Down
17 changes: 3 additions & 14 deletions include/crypto/acompress.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,6 @@
#include <linux/spinlock_types.h>
#include <linux/types.h>

#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001

/* Set this bit if source is virtual address instead of SG list. */
#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002

Expand Down Expand Up @@ -84,15 +82,12 @@ struct acomp_req {
*
* @compress: Function performs a compress operation
* @decompress: Function performs a de-compress operation
* @dst_free: Frees destination buffer if allocated inside the
* algorithm
* @reqsize: Context size for (de)compression requests
* @base: Common crypto API algorithm data structure
*/
struct crypto_acomp {
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
void (*dst_free)(struct scatterlist *dst);
unsigned int reqsize;
struct crypto_tfm base;
};
Expand Down Expand Up @@ -261,9 +256,8 @@ static inline void acomp_request_set_callback(struct acomp_req *req,
crypto_completion_t cmpl,
void *data)
{
u32 keep = CRYPTO_ACOMP_ALLOC_OUTPUT | CRYPTO_ACOMP_REQ_SRC_VIRT |
CRYPTO_ACOMP_REQ_SRC_NONDMA | CRYPTO_ACOMP_REQ_DST_VIRT |
CRYPTO_ACOMP_REQ_DST_NONDMA;
u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA |
CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA;

req->base.complete = cmpl;
req->base.data = data;
Expand Down Expand Up @@ -297,13 +291,10 @@ static inline void acomp_request_set_params(struct acomp_req *req,
req->slen = slen;
req->dlen = dlen;

req->base.flags &= ~(CRYPTO_ACOMP_ALLOC_OUTPUT |
CRYPTO_ACOMP_REQ_SRC_VIRT |
req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT |
CRYPTO_ACOMP_REQ_SRC_NONDMA |
CRYPTO_ACOMP_REQ_DST_VIRT |
CRYPTO_ACOMP_REQ_DST_NONDMA);
if (!req->dst)
req->base.flags |= CRYPTO_ACOMP_ALLOC_OUTPUT;
}

/**
Expand Down Expand Up @@ -403,7 +394,6 @@ static inline void acomp_request_set_dst_dma(struct acomp_req *req,
req->dvirt = dst;
req->dlen = dlen;

req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
}
Expand All @@ -424,7 +414,6 @@ static inline void acomp_request_set_dst_nondma(struct acomp_req *req,
req->dvirt = dst;
req->dlen = dlen;

req->base.flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA;
req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT;
}
Expand Down
2 changes: 0 additions & 2 deletions include/crypto/internal/scompress.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@
#include <crypto/acompress.h>
#include <crypto/algapi.h>

#define SCOMP_SCRATCH_SIZE 131072

struct acomp_req;

struct crypto_scomp {
Expand Down

0 comments on commit 2d3553e

Please sign in to comment.