kernel_optimize_test/include/crypto/internal/skcipher.h
Herbert Xu e7cd2514ea [CRYPTO] chainiv: Avoid lock spinning where possible
This patch makes chainiv avoid spinning by postponing requests on lock
contention if the user allows the use of asynchronous algorithms.  If
a synchronous algorithm is requested then we behave as before.

This should improve IPsec performance on SMP when two CPUs attempt to
transmit over the same SA.  Currently one of them will spin doing nothing
waiting for the other CPU to finish its encryption.  This patch makes it
postpone the request and get on with other work.

If only one CPU is transmitting for a given SA, then we will process
the request synchronously as before.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
2008-01-11 08:16:55 +11:00

111 lines
2.9 KiB
C

/*
* Symmetric key ciphers.
*
* Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#ifndef _CRYPTO_INTERNAL_SKCIPHER_H
#define _CRYPTO_INTERNAL_SKCIPHER_H
#include <crypto/algapi.h>
#include <crypto/skcipher.h>
#include <linux/types.h>
struct rtattr;
struct crypto_skcipher_spawn {
struct crypto_spawn base;
};
extern const struct crypto_type crypto_givcipher_type;
static inline void crypto_set_skcipher_spawn(
struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
{
crypto_set_spawn(&spawn->base, inst);
}
int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
u32 type, u32 mask);
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
static inline struct crypto_alg *crypto_skcipher_spawn_alg(
struct crypto_skcipher_spawn *spawn)
{
return spawn->base.alg;
}
static inline struct crypto_ablkcipher *crypto_spawn_skcipher(
struct crypto_skcipher_spawn *spawn)
{
return __crypto_ablkcipher_cast(
crypto_spawn_tfm(&spawn->base, crypto_skcipher_type(0),
crypto_skcipher_mask(0)));
}
int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req);
int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req);
const char *crypto_default_geniv(const struct crypto_alg *alg);
struct crypto_instance *skcipher_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type,
u32 mask);
void skcipher_geniv_free(struct crypto_instance *inst);
int skcipher_geniv_init(struct crypto_tfm *tfm);
void skcipher_geniv_exit(struct crypto_tfm *tfm);
static inline struct crypto_ablkcipher *skcipher_geniv_cipher(
struct crypto_ablkcipher *geniv)
{
return crypto_ablkcipher_crt(geniv)->base;
}
static inline int skcipher_enqueue_givcrypt(
struct crypto_queue *queue, struct skcipher_givcrypt_request *request)
{
return ablkcipher_enqueue_request(queue, &request->creq);
}
static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
struct crypto_queue *queue)
{
return container_of(ablkcipher_dequeue_request(queue),
struct skcipher_givcrypt_request, creq);
}
static inline void *skcipher_givcrypt_reqctx(
struct skcipher_givcrypt_request *req)
{
return ablkcipher_request_ctx(&req->creq);
}
static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
int err)
{
req->base.complete(&req->base, err);
}
static inline void skcipher_givcrypt_complete(
struct skcipher_givcrypt_request *req, int err)
{
ablkcipher_request_complete(&req->creq, err);
}
static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
{
return req->base.flags;
}
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */