crypto: api - permit users to specify numa node of acomp hardware

For a Linux server with NUMA, there are possibly multiple (de)compressors
which are either local or remote to some NUMA node. Some drivers will
automatically use the (de)compressor near the CPU calling acomp_alloc().
However, it is not necessarily correct because users who send acomp_req
could be from different NUMA node with the CPU which allocates acomp.

Just like kernel has kmalloc() and kmalloc_node(), here crypto can have
same support.

Cc: Seth Jennings <sjenning@redhat.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Vitaly Wool <vitaly.wool@konsulko.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Barry Song <song.bao.hua@hisilicon.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
This commit is contained in:
Barry Song 2020-07-05 21:18:58 +12:00 committed by Herbert Xu
parent 3347c8a079
commit 7bc13b5b60
5 changed files with 62 additions and 13 deletions

View File

@ -109,6 +109,14 @@ struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
} }
EXPORT_SYMBOL_GPL(crypto_alloc_acomp); EXPORT_SYMBOL_GPL(crypto_alloc_acomp);
struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
u32 mask, int node)
{
return crypto_alloc_tfm_node(alg_name, &crypto_acomp_type, type, mask,
node);
}
EXPORT_SYMBOL_GPL(crypto_alloc_acomp_node);
struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp) struct acomp_req *acomp_request_alloc(struct crypto_acomp *acomp)
{ {
struct crypto_tfm *tfm = crypto_acomp_tfm(acomp); struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);

View File

@ -433,8 +433,9 @@ struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
} }
EXPORT_SYMBOL_GPL(crypto_alloc_base); EXPORT_SYMBOL_GPL(crypto_alloc_base);
void *crypto_create_tfm(struct crypto_alg *alg, void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend) const struct crypto_type *frontend,
int node)
{ {
char *mem; char *mem;
struct crypto_tfm *tfm = NULL; struct crypto_tfm *tfm = NULL;
@ -445,12 +446,13 @@ void *crypto_create_tfm(struct crypto_alg *alg,
tfmsize = frontend->tfmsize; tfmsize = frontend->tfmsize;
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg); total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
mem = kzalloc(total, GFP_KERNEL); mem = kzalloc_node(total, GFP_KERNEL, node);
if (mem == NULL) if (mem == NULL)
goto out_err; goto out_err;
tfm = (struct crypto_tfm *)(mem + tfmsize); tfm = (struct crypto_tfm *)(mem + tfmsize);
tfm->__crt_alg = alg; tfm->__crt_alg = alg;
tfm->node = node;
err = frontend->init_tfm(tfm); err = frontend->init_tfm(tfm);
if (err) if (err)
@ -472,7 +474,7 @@ void *crypto_create_tfm(struct crypto_alg *alg,
out: out:
return mem; return mem;
} }
EXPORT_SYMBOL_GPL(crypto_create_tfm); EXPORT_SYMBOL_GPL(crypto_create_tfm_node);
struct crypto_alg *crypto_find_alg(const char *alg_name, struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend, const struct crypto_type *frontend,
@ -490,11 +492,13 @@ struct crypto_alg *crypto_find_alg(const char *alg_name,
EXPORT_SYMBOL_GPL(crypto_find_alg); EXPORT_SYMBOL_GPL(crypto_find_alg);
/* /*
* crypto_alloc_tfm - Locate algorithm and allocate transform * crypto_alloc_tfm_node - Locate algorithm and allocate transform
* @alg_name: Name of algorithm * @alg_name: Name of algorithm
* @frontend: Frontend algorithm type * @frontend: Frontend algorithm type
* @type: Type of algorithm * @type: Type of algorithm
* @mask: Mask for type comparison * @mask: Mask for type comparison
* @node: NUMA node in which users desire to put requests, if node is
* NUMA_NO_NODE, it means users have no special requirement.
* *
* crypto_alloc_tfm() will first attempt to locate an already loaded * crypto_alloc_tfm() will first attempt to locate an already loaded
* algorithm. If that fails and the kernel supports dynamically loadable * algorithm. If that fails and the kernel supports dynamically loadable
@ -509,8 +513,10 @@ EXPORT_SYMBOL_GPL(crypto_find_alg);
* *
* In case of error the return value is an error pointer. * In case of error the return value is an error pointer.
*/ */
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask) void *crypto_alloc_tfm_node(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask,
int node)
{ {
void *tfm; void *tfm;
int err; int err;
@ -524,7 +530,7 @@ void *crypto_alloc_tfm(const char *alg_name,
goto err; goto err;
} }
tfm = crypto_create_tfm(alg, frontend); tfm = crypto_create_tfm_node(alg, frontend, node);
if (!IS_ERR(tfm)) if (!IS_ERR(tfm))
return tfm; return tfm;
@ -542,7 +548,7 @@ void *crypto_alloc_tfm(const char *alg_name,
return ERR_PTR(err); return ERR_PTR(err);
} }
EXPORT_SYMBOL_GPL(crypto_alloc_tfm); EXPORT_SYMBOL_GPL(crypto_alloc_tfm_node);
/* /*
* crypto_destroy_tfm - Free crypto transform * crypto_destroy_tfm - Free crypto transform

View File

@ -68,13 +68,28 @@ void crypto_remove_final(struct list_head *list);
void crypto_shoot_alg(struct crypto_alg *alg); void crypto_shoot_alg(struct crypto_alg *alg);
struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type, struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg, u32 type,
u32 mask); u32 mask);
void *crypto_create_tfm(struct crypto_alg *alg, void *crypto_create_tfm_node(struct crypto_alg *alg,
const struct crypto_type *frontend); const struct crypto_type *frontend, int node);
static inline void *crypto_create_tfm(struct crypto_alg *alg,
const struct crypto_type *frontend)
{
return crypto_create_tfm_node(alg, frontend, NUMA_NO_NODE);
}
struct crypto_alg *crypto_find_alg(const char *alg_name, struct crypto_alg *crypto_find_alg(const char *alg_name,
const struct crypto_type *frontend, const struct crypto_type *frontend,
u32 type, u32 mask); u32 type, u32 mask);
void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask); void *crypto_alloc_tfm_node(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask,
int node);
static inline void *crypto_alloc_tfm(const char *alg_name,
const struct crypto_type *frontend, u32 type, u32 mask)
{
return crypto_alloc_tfm_node(alg_name, frontend, type, mask, NUMA_NO_NODE);
}
int crypto_probing_notify(unsigned long val, void *v); int crypto_probing_notify(unsigned long val, void *v);

View File

@ -106,6 +106,24 @@ struct acomp_alg {
*/ */
struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type, struct crypto_acomp *crypto_alloc_acomp(const char *alg_name, u32 type,
u32 mask); u32 mask);
/**
* crypto_alloc_acomp_node() -- allocate ACOMPRESS tfm handle with desired NUMA node
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* compression algorithm e.g. "deflate"
* @type: specifies the type of the algorithm
* @mask: specifies the mask for the algorithm
* @node: specifies the NUMA node the ZIP hardware belongs to
*
* Allocate a handle for a compression algorithm. Drivers should try to use
* (de)compressors on the specified NUMA node.
* The returned struct crypto_acomp is the handle that is required for any
* subsequent API invocation for the compression operations.
*
* Return: allocated handle in case of success; IS_ERR() is true in case
* of an error, PTR_ERR() returns the error code.
*/
struct crypto_acomp *crypto_alloc_acomp_node(const char *alg_name, u32 type,
u32 mask, int node);
static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm) static inline struct crypto_tfm *crypto_acomp_tfm(struct crypto_acomp *tfm)
{ {

View File

@ -594,6 +594,8 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
struct crypto_tfm { struct crypto_tfm {
u32 crt_flags; u32 crt_flags;
int node;
void (*exit)(struct crypto_tfm *tfm); void (*exit)(struct crypto_tfm *tfm);