{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
if (crypto_acomp_req_chain(tfm) || acomp_request_issg(req))
crypto_acomp_reqtfm(req)->compress(req);
return acomp_do_req_chain(req, true);
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ if (acomp_req_on_stack(req) && acomp_is_async(tfm))
+ return -EAGAIN;
if (crypto_acomp_req_chain(tfm) || acomp_request_issg(req))
crypto_acomp_reqtfm(req)->decompress(req);
return acomp_do_req_chain(req, false);
}
EXPORT_SYMBOL_GPL(acomp_walk_virt);
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp)
+{
+ struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
+ struct acomp_req *nreq;
+
+ nreq = kmalloc(total, gfp);
+ if (!nreq) {
+ acomp_request_set_tfm(req, tfm->fb);
+ req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
+ return req;
+ }
+
+ memcpy(nreq, req, total);
+ acomp_request_set_tfm(req, tfm);
+ return req;
+}
+EXPORT_SYMBOL_GPL(acomp_request_clone);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Asynchronous compression type");
#define MAX_SYNC_COMP_REQSIZE 0
#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \
+ char __##name##_req[sizeof(struct acomp_req) + \
+ MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
+ struct acomp_req *name = acomp_request_alloc_init( \
+ __##name##_req, (tfm), (gfp))
+
+#define ACOMP_REQUEST_ON_STACK(name, tfm) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
struct acomp_req *name = acomp_request_on_stack_init( \
- __##name##_req, (tfm), (gfp), false)
+ __##name##_req, (tfm))
+
+#define ACOMP_REQUEST_CLONE(name, gfp) \
+ acomp_request_clone(name, sizeof(__##name##_req), gfp)
struct acomp_req;
struct folio;
*/
int crypto_acomp_decompress(struct acomp_req *req);
-static inline struct acomp_req *acomp_request_on_stack_init(
- char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly)
+static inline struct acomp_req *acomp_request_alloc_init(
+ char *buf, struct crypto_acomp *tfm, gfp_t gfp)
{
struct acomp_req *req;
- if (!stackonly && (req = acomp_request_alloc(tfm, gfp)))
+ if ((req = acomp_request_alloc(tfm, gfp)))
return req;
req = (void *)buf;
return req;
}
+static inline struct acomp_req *acomp_request_on_stack_init(
+ char *buf, struct crypto_acomp *tfm)
+{
+ struct acomp_req *req = (void *)buf;
+
+ acomp_request_set_tfm(req, tfm);
+ req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
+ return req;
+}
+
+struct acomp_req *acomp_request_clone(struct acomp_req *req,
+ size_t total, gfp_t gfp);
+
#endif
#include <linux/spinlock.h>
#include <linux/workqueue_types.h>
-#define ACOMP_REQUEST_ON_STACK(name, tfm) \
- char __##name##_req[sizeof(struct acomp_req) + \
- MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
- struct acomp_req *name = acomp_request_on_stack_init( \
- __##name##_req, (tfm), 0, true)
-
#define ACOMP_FBREQ_ON_STACK(name, req) \
char __##name##_req[sizeof(struct acomp_req) + \
MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \
char *buf, struct acomp_req *old)
{
struct crypto_acomp *tfm = crypto_acomp_reqtfm(old);
- struct acomp_req *req;
+ struct acomp_req *req = (void *)buf;
- req = acomp_request_on_stack_init(buf, tfm, 0, true);
+ acomp_request_set_tfm(req, tfm->fb);
+ req->base.flags = CRYPTO_TFM_REQ_ON_STACK;
acomp_request_set_callback(req, acomp_request_flags(old), NULL, NULL);
req->base.flags &= ~CRYPTO_ACOMP_REQ_PRIVATE;
req->base.flags |= old->base.flags & CRYPTO_ACOMP_REQ_PRIVATE;