#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/cpumask.h>
-#include <linux/errno.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/page-flags.h>
#include <linux/percpu.h>
#include <linux/scatterlist.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/string.h>
alg->exit(acomp);
if (acomp_is_async(acomp))
- crypto_free_acomp(acomp->fb);
+ crypto_free_acomp(crypto_acomp_fb(acomp));
}
static int crypto_acomp_init_tfm(struct crypto_tfm *tfm)
struct crypto_acomp *fb = NULL;
int err;
- acomp->fb = acomp;
-
if (tfm->__crt_alg->cra_type != &crypto_acomp_type)
return crypto_init_scomp_ops_async(tfm);
if (crypto_acomp_reqsize(fb) > MAX_SYNC_COMP_REQSIZE)
goto out_free_fb;
- acomp->fb = fb;
+ tfm->fb = crypto_acomp_tfm(fb);
}
acomp->compress = alg->compress;
}
EXPORT_SYMBOL_GPL(acomp_walk_virt);
-struct acomp_req *acomp_request_clone(struct acomp_req *req,
- size_t total, gfp_t gfp)
-{
- struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
- struct acomp_req *nreq;
-
- nreq = kmalloc(total, gfp);
- if (!nreq) {
- acomp_request_set_tfm(req, tfm->fb);
- req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
- return req;
- }
-
- memcpy(nreq, req, total);
- acomp_request_set_tfm(req, tfm);
- return req;
-}
-EXPORT_SYMBOL_GPL(acomp_request_clone);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
* Copyright (c) 2008 Loc Ho <lho@amcc.com>
*/
-#include <crypto/scatterwalk.h>
#include <linux/cryptouser.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/string.h>
err = alg->setkey(tfm, key, keylen);
if (!err && ahash_is_async(tfm))
- err = crypto_ahash_setkey(tfm->fb, key, keylen);
+ err = crypto_ahash_setkey(crypto_ahash_fb(tfm),
+ key, keylen);
if (unlikely(err)) {
ahash_set_needkey(tfm, alg);
return err;
tfm->__crt_alg->cra_exit(tfm);
if (ahash_is_async(hash))
- crypto_free_ahash(hash->fb);
+ crypto_free_ahash(crypto_ahash_fb(hash));
}
static int crypto_ahash_init_tfm(struct crypto_tfm *tfm)
crypto_ahash_set_statesize(hash, alg->halg.statesize);
crypto_ahash_set_reqsize(hash, crypto_tfm_alg_reqsize(tfm));
- hash->fb = hash;
-
if (tfm->__crt_alg->cra_type == &crypto_shash_type)
return crypto_init_ahash_using_shash(tfm);
if (IS_ERR(fb))
return PTR_ERR(fb);
- hash->fb = fb;
+ tfm->fb = crypto_ahash_tfm(fb);
}
ahash_set_needkey(hash, alg);
int crypto_hash_digest(struct crypto_ahash *tfm, const u8 *data,
unsigned int len, u8 *out)
{
- HASH_REQUEST_ON_STACK(req, tfm->fb);
+ HASH_REQUEST_ON_STACK(req, crypto_ahash_fb(tfm));
int err;
ahash_request_set_callback(req, 0, NULL, NULL);
}
EXPORT_SYMBOL_GPL(crypto_hash_digest);
-struct ahash_request *ahash_request_clone(struct ahash_request *req,
- size_t total, gfp_t gfp)
-{
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct ahash_request *nreq;
-
- nreq = kmalloc(total, gfp);
- if (!nreq) {
- ahash_request_set_tfm(req, tfm->fb);
- req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
- return req;
- }
-
- memcpy(nreq, req, total);
- ahash_request_set_tfm(req, tfm);
- return req;
-}
-EXPORT_SYMBOL_GPL(ahash_request_clone);
-
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
goto out;
tfm = (struct crypto_tfm *)(mem + frontend->tfmsize);
+ tfm->fb = tfm;
err = frontend->init_tfm(tfm);
if (err)
}
EXPORT_SYMBOL_GPL(crypto_destroy_alg);
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp)
+{
+ struct crypto_tfm *tfm = req->tfm;
+ struct crypto_async_request *nreq;
+
+ nreq = kmemdup(req, total, gfp);
+ if (!nreq) {
+ req->tfm = tfm->fb;
+ return req;
+ }
+
+ nreq->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
+ return nreq;
+}
+EXPORT_SYMBOL_GPL(crypto_request_clone);
+
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
int (*compress)(struct acomp_req *req);
int (*decompress)(struct acomp_req *req);
unsigned int reqsize;
- struct crypto_acomp *fb;
struct crypto_tfm base;
};
return req;
}
-struct acomp_req *acomp_request_clone(struct acomp_req *req,
- size_t total, gfp_t gfp);
+static inline struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp)
+{
+ return container_of(crypto_request_clone(&req->base, total, gfp),
+ struct acomp_req, base);
+}
#endif
bool using_shash; /* Underlying algorithm is shash, not ahash */
unsigned int statesize;
unsigned int reqsize;
- struct crypto_ahash *fb;
struct crypto_tfm base;
};
return req;
}
-struct ahash_request *ahash_request_clone(struct ahash_request *req,
- size_t total, gfp_t gfp);
+static inline struct ahash_request *ahash_request_clone(
+ struct ahash_request *req, size_t total, gfp_t gfp)
+{
+ return container_of(crypto_request_clone(&req->base, total, gfp),
+ struct ahash_request, base);
+}
#endif /* _CRYPTO_HASH_H */
return crypto_request_flags(&req->base) & ~CRYPTO_ACOMP_REQ_PRIVATE;
}
+static inline struct crypto_acomp *crypto_acomp_fb(struct crypto_acomp *tfm)
+{
+ return __crypto_acomp_tfm(crypto_acomp_tfm(tfm)->fb);
+}
+
static inline struct acomp_req *acomp_fbreq_on_stack_init(
char *buf, struct acomp_req *old)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
struct acomp_req *req = (void *)buf;
- acomp_request_set_tfm(req, tfm->fb);
+ acomp_request_set_tfm(req, crypto_acomp_fb(tfm));
req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
return crypto_tfm_req_chain(&tfm->base);
}
+static inline struct crypto_ahash *crypto_ahash_fb(struct crypto_ahash *tfm)
+{
+ return __crypto_ahash_cast(crypto_ahash_tfm(tfm)->fb);
+}
+
static inline struct ahash_request *ahash_fbreq_on_stack_init(
char *buf, struct ahash_request *old)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(old);
struct ahash_request *req = (void *)buf;
- ahash_request_set_tfm(req, tfm->fb);
+ ahash_request_set_tfm(req, crypto_ahash_fb(tfm));
req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
ahash_request_set_callback(req, ahash_request_flags(old), NULL, NULL);
req->base.flags &= ~CRYPTO_AHASH_REQ_PRIVATE;
#include <linux/completion.h>
#include <linux/errno.h>
-#include <linux/refcount.h>
+#include <linux/refcount_types.h>
#include <linux/slab.h>
#include <linux/types.h>
u32 crt_flags;
int node;
-
+
+ struct crypto_tfm *fb;
+
void (*exit)(struct crypto_tfm *tfm);
-
+
struct crypto_alg *__crt_alg;
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
req->flags &= ~CRYPTO_TFM_REQ_ON_STACK;
}
+struct crypto_async_request *crypto_request_clone(
+ struct crypto_async_request *req, size_t total, gfp_t gfp);
+
#endif /* _LINUX_CRYPTO_H */