source: src/linux/universal/linux-3.18/crypto/mcryptd.c @ 31869

Last change on this file since 31869 was 31869, checked in by brainslayer, 2 months ago

update

File size: 17.6 KB
Line 
1/*
2 * Software multibuffer async crypto daemon.
3 *
4 * Copyright (c) 2014 Tim Chen <tim.c.chen@linux.intel.com>
5 *
6 * Adapted from crypto daemon.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 */
14
15#include <crypto/algapi.h>
16#include <crypto/internal/hash.h>
17#include <crypto/internal/aead.h>
18#include <crypto/mcryptd.h>
19#include <crypto/crypto_wq.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <linux/list.h>
24#include <linux/module.h>
25#include <linux/scatterlist.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/hardirq.h>
29
30#define MCRYPTD_MAX_CPU_QLEN 100
31#define MCRYPTD_BATCH 9
32
33static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
34                                   unsigned int tail);
35
36struct mcryptd_flush_list {
37        struct list_head list;
38        struct mutex lock;
39};
40
41static struct mcryptd_flush_list __percpu *mcryptd_flist;
42
43struct hashd_instance_ctx {
44        struct crypto_shash_spawn spawn;
45        struct mcryptd_queue *queue;
46};
47
48static void mcryptd_queue_worker(struct work_struct *work);
49
50void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay)
51{
52        struct mcryptd_flush_list *flist;
53
54        if (!cstate->flusher_engaged) {
55                /* put the flusher on the flush list */
56                flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
57                mutex_lock(&flist->lock);
58                list_add_tail(&cstate->flush_list, &flist->list);
59                cstate->flusher_engaged = true;
60                cstate->next_flush = jiffies + delay;
61                queue_delayed_work_on(smp_processor_id(), kcrypto_wq,
62                        &cstate->flush, delay);
63                mutex_unlock(&flist->lock);
64        }
65}
66EXPORT_SYMBOL(mcryptd_arm_flusher);
67
68static int mcryptd_init_queue(struct mcryptd_queue *queue,
69                             unsigned int max_cpu_qlen)
70{
71        int cpu;
72        struct mcryptd_cpu_queue *cpu_queue;
73
74        queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue);
75        pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue);
76        if (!queue->cpu_queue)
77                return -ENOMEM;
78        for_each_possible_cpu(cpu) {
79                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
80                pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue);
81                crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
82                INIT_WORK(&cpu_queue->work, mcryptd_queue_worker);
83        }
84        return 0;
85}
86
87static void mcryptd_fini_queue(struct mcryptd_queue *queue)
88{
89        int cpu;
90        struct mcryptd_cpu_queue *cpu_queue;
91
92        for_each_possible_cpu(cpu) {
93                cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
94                BUG_ON(cpu_queue->queue.qlen);
95        }
96        free_percpu(queue->cpu_queue);
97}
98
99static int mcryptd_enqueue_request(struct mcryptd_queue *queue,
100                                  struct crypto_async_request *request,
101                                  struct mcryptd_hash_request_ctx *rctx)
102{
103        int cpu, err;
104        struct mcryptd_cpu_queue *cpu_queue;
105
106        cpu = get_cpu();
107        cpu_queue = this_cpu_ptr(queue->cpu_queue);
108        rctx->tag.cpu = cpu;
109
110        err = crypto_enqueue_request(&cpu_queue->queue, request);
111        pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n",
112                 cpu, cpu_queue, request);
113        queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
114        put_cpu();
115
116        return err;
117}
118
119/*
120 * Try to opportunisticlly flush the partially completed jobs if
121 * crypto daemon is the only task running.
122 */
123static void mcryptd_opportunistic_flush(void)
124{
125        struct mcryptd_flush_list *flist;
126        struct mcryptd_alg_cstate *cstate;
127
128        flist = per_cpu_ptr(mcryptd_flist, smp_processor_id());
129        while (single_task_running()) {
130                mutex_lock(&flist->lock);
131                if (list_empty(&flist->list)) {
132                        mutex_unlock(&flist->lock);
133                        return;
134                }
135                cstate = list_entry(flist->list.next,
136                                struct mcryptd_alg_cstate, flush_list);
137                if (!cstate->flusher_engaged) {
138                        mutex_unlock(&flist->lock);
139                        return;
140                }
141                list_del(&cstate->flush_list);
142                cstate->flusher_engaged = false;
143                mutex_unlock(&flist->lock);
144                cstate->alg_state->flusher(cstate);
145        }
146}
147
148/*
149 * Called in workqueue context, do one real cryption work (via
150 * req->complete) and reschedule itself if there are more work to
151 * do.
152 */
153static void mcryptd_queue_worker(struct work_struct *work)
154{
155        struct mcryptd_cpu_queue *cpu_queue;
156        struct crypto_async_request *req, *backlog;
157        int i;
158
159        /*
160         * Need to loop through more than once for multi-buffer to
161         * be effective.
162         */
163
164        cpu_queue = container_of(work, struct mcryptd_cpu_queue, work);
165        i = 0;
166        while (i < MCRYPTD_BATCH || single_task_running()) {
167                /*
168                 * preempt_disable/enable is used to prevent
169                 * being preempted by mcryptd_enqueue_request()
170                 */
171                local_bh_disable();
172                preempt_disable();
173                backlog = crypto_get_backlog(&cpu_queue->queue);
174                req = crypto_dequeue_request(&cpu_queue->queue);
175                preempt_enable();
176                local_bh_enable();
177
178                if (!req) {
179                        mcryptd_opportunistic_flush();
180                        return;
181                }
182
183                if (backlog)
184                        backlog->complete(backlog, -EINPROGRESS);
185                req->complete(req, 0);
186                if (!cpu_queue->queue.qlen)
187                        return;
188                ++i;
189        }
190        if (cpu_queue->queue.qlen)
191                queue_work(kcrypto_wq, &cpu_queue->work);
192}
193
194void mcryptd_flusher(struct work_struct *__work)
195{
196        struct  mcryptd_alg_cstate      *alg_cpu_state;
197        struct  mcryptd_alg_state       *alg_state;
198        struct  mcryptd_flush_list      *flist;
199        int     cpu;
200
201        cpu = smp_processor_id();
202        alg_cpu_state = container_of(to_delayed_work(__work),
203                                     struct mcryptd_alg_cstate, flush);
204        alg_state = alg_cpu_state->alg_state;
205        if (alg_cpu_state->cpu != cpu)
206                pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n",
207                                cpu, alg_cpu_state->cpu);
208
209        if (alg_cpu_state->flusher_engaged) {
210                flist = per_cpu_ptr(mcryptd_flist, cpu);
211                mutex_lock(&flist->lock);
212                list_del(&alg_cpu_state->flush_list);
213                alg_cpu_state->flusher_engaged = false;
214                mutex_unlock(&flist->lock);
215                alg_state->flusher(alg_cpu_state);
216        }
217}
218EXPORT_SYMBOL_GPL(mcryptd_flusher);
219
220static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm)
221{
222        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
223        struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
224
225        return ictx->queue;
226}
227
228static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
229                                   unsigned int tail)
230{
231        char *p;
232        struct crypto_instance *inst;
233        int err;
234
235        p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
236        if (!p)
237                return ERR_PTR(-ENOMEM);
238
239        inst = (void *)(p + head);
240
241        err = -ENAMETOOLONG;
242        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
243                    "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
244                goto out_free_inst;
245
246        memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
247
248        inst->alg.cra_priority = alg->cra_priority + 50;
249        inst->alg.cra_blocksize = alg->cra_blocksize;
250        inst->alg.cra_alignmask = alg->cra_alignmask;
251
252out:
253        return p;
254
255out_free_inst:
256        kfree(p);
257        p = ERR_PTR(err);
258        goto out;
259}
260
261static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm)
262{
263        struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
264        struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
265        struct crypto_shash_spawn *spawn = &ictx->spawn;
266        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
267        struct crypto_shash *hash;
268
269        hash = crypto_spawn_shash(spawn);
270        if (IS_ERR(hash))
271                return PTR_ERR(hash);
272
273        ctx->child = hash;
274        crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
275                                 sizeof(struct mcryptd_hash_request_ctx) +
276                                 crypto_shash_descsize(hash));
277        return 0;
278}
279
280static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm)
281{
282        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
283
284        crypto_free_shash(ctx->child);
285}
286
287static int mcryptd_hash_setkey(struct crypto_ahash *parent,
288                                   const u8 *key, unsigned int keylen)
289{
290        struct mcryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
291        struct crypto_shash *child = ctx->child;
292        int err;
293
294        crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
295        crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
296                                      CRYPTO_TFM_REQ_MASK);
297        err = crypto_shash_setkey(child, key, keylen);
298        crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
299                                       CRYPTO_TFM_RES_MASK);
300        return err;
301}
302
303static int mcryptd_hash_enqueue(struct ahash_request *req,
304                                crypto_completion_t complete)
305{
306        int ret;
307
308        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
309        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
310        struct mcryptd_queue *queue =
311                mcryptd_get_queue(crypto_ahash_tfm(tfm));
312
313        rctx->complete = req->base.complete;
314        req->base.complete = complete;
315
316        ret = mcryptd_enqueue_request(queue, &req->base, rctx);
317
318        return ret;
319}
320
321static void mcryptd_hash_init(struct crypto_async_request *req_async, int err)
322{
323        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
324        struct crypto_shash *child = ctx->child;
325        struct ahash_request *req = ahash_request_cast(req_async);
326        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
327        struct shash_desc *desc = &rctx->desc;
328
329        if (unlikely(err == -EINPROGRESS))
330                goto out;
331
332        desc->tfm = child;
333        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
334
335        err = crypto_shash_init(desc);
336
337        req->base.complete = rctx->complete;
338
339out:
340        local_bh_disable();
341        rctx->complete(&req->base, err);
342        local_bh_enable();
343}
344
345static int mcryptd_hash_init_enqueue(struct ahash_request *req)
346{
347        return mcryptd_hash_enqueue(req, mcryptd_hash_init);
348}
349
350static void mcryptd_hash_update(struct crypto_async_request *req_async, int err)
351{
352        struct ahash_request *req = ahash_request_cast(req_async);
353        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
354
355        if (unlikely(err == -EINPROGRESS))
356                goto out;
357
358        err = shash_ahash_mcryptd_update(req, &rctx->desc);
359        if (err) {
360                req->base.complete = rctx->complete;
361                goto out;
362        }
363
364        return;
365out:
366        local_bh_disable();
367        rctx->complete(&req->base, err);
368        local_bh_enable();
369}
370
371static int mcryptd_hash_update_enqueue(struct ahash_request *req)
372{
373        return mcryptd_hash_enqueue(req, mcryptd_hash_update);
374}
375
376static void mcryptd_hash_final(struct crypto_async_request *req_async, int err)
377{
378        struct ahash_request *req = ahash_request_cast(req_async);
379        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
380
381        if (unlikely(err == -EINPROGRESS))
382                goto out;
383
384        err = shash_ahash_mcryptd_final(req, &rctx->desc);
385        if (err) {
386                req->base.complete = rctx->complete;
387                goto out;
388        }
389
390        return;
391out:
392        local_bh_disable();
393        rctx->complete(&req->base, err);
394        local_bh_enable();
395}
396
397static int mcryptd_hash_final_enqueue(struct ahash_request *req)
398{
399        return mcryptd_hash_enqueue(req, mcryptd_hash_final);
400}
401
402static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err)
403{
404        struct ahash_request *req = ahash_request_cast(req_async);
405        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
406
407        if (unlikely(err == -EINPROGRESS))
408                goto out;
409
410        err = shash_ahash_mcryptd_finup(req, &rctx->desc);
411
412        if (err) {
413                req->base.complete = rctx->complete;
414                goto out;
415        }
416
417        return;
418out:
419        local_bh_disable();
420        rctx->complete(&req->base, err);
421        local_bh_enable();
422}
423
424static int mcryptd_hash_finup_enqueue(struct ahash_request *req)
425{
426        return mcryptd_hash_enqueue(req, mcryptd_hash_finup);
427}
428
429static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err)
430{
431        struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
432        struct crypto_shash *child = ctx->child;
433        struct ahash_request *req = ahash_request_cast(req_async);
434        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
435        struct shash_desc *desc = &rctx->desc;
436
437        if (unlikely(err == -EINPROGRESS))
438                goto out;
439
440        desc->tfm = child;
441        desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;  /* check this again */
442
443        err = shash_ahash_mcryptd_digest(req, desc);
444
445        if (err) {
446                req->base.complete = rctx->complete;
447                goto out;
448        }
449
450        return;
451out:
452        local_bh_disable();
453        rctx->complete(&req->base, err);
454        local_bh_enable();
455}
456
457static int mcryptd_hash_digest_enqueue(struct ahash_request *req)
458{
459        return mcryptd_hash_enqueue(req, mcryptd_hash_digest);
460}
461
462static int mcryptd_hash_export(struct ahash_request *req, void *out)
463{
464        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
465
466        return crypto_shash_export(&rctx->desc, out);
467}
468
469static int mcryptd_hash_import(struct ahash_request *req, const void *in)
470{
471        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
472
473        return crypto_shash_import(&rctx->desc, in);
474}
475
476static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
477                              struct mcryptd_queue *queue)
478{
479        struct hashd_instance_ctx *ctx;
480        struct ahash_instance *inst;
481        struct shash_alg *salg;
482        struct crypto_alg *alg;
483        int err;
484
485        salg = shash_attr_alg(tb[1], 0, 0);
486        if (IS_ERR(salg))
487                return PTR_ERR(salg);
488
489        alg = &salg->base;
490        pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name);
491        inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(),
492                                        sizeof(*ctx));
493        err = PTR_ERR(inst);
494        if (IS_ERR(inst))
495                goto out_put_alg;
496
497        ctx = ahash_instance_ctx(inst);
498        ctx->queue = queue;
499
500        err = crypto_init_shash_spawn(&ctx->spawn, salg,
501                                      ahash_crypto_instance(inst));
502        if (err)
503                goto out_free_inst;
504
505        inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC;
506
507        inst->alg.halg.digestsize = salg->digestsize;
508        inst->alg.halg.statesize = salg->statesize;
509        inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
510
511        inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
512        inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm;
513
514        inst->alg.init   = mcryptd_hash_init_enqueue;
515        inst->alg.update = mcryptd_hash_update_enqueue;
516        inst->alg.final  = mcryptd_hash_final_enqueue;
517        inst->alg.finup  = mcryptd_hash_finup_enqueue;
518        inst->alg.export = mcryptd_hash_export;
519        inst->alg.import = mcryptd_hash_import;
520        inst->alg.setkey = mcryptd_hash_setkey;
521        inst->alg.digest = mcryptd_hash_digest_enqueue;
522
523        err = ahash_register_instance(tmpl, inst);
524        if (err) {
525                crypto_drop_shash(&ctx->spawn);
526out_free_inst:
527                kfree(inst);
528        }
529
530out_put_alg:
531        crypto_mod_put(alg);
532        return err;
533}
534
535static struct mcryptd_queue mqueue;
536
537static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
538{
539        struct crypto_attr_type *algt;
540
541        algt = crypto_get_attr_type(tb);
542        if (IS_ERR(algt))
543                return PTR_ERR(algt);
544
545        switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
546        case CRYPTO_ALG_TYPE_DIGEST:
547                return mcryptd_create_hash(tmpl, tb, &mqueue);
548        break;
549        }
550
551        return -EINVAL;
552}
553
554static void mcryptd_free(struct crypto_instance *inst)
555{
556        struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
557        struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
558
559        switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
560        case CRYPTO_ALG_TYPE_AHASH:
561                crypto_drop_shash(&hctx->spawn);
562                kfree(ahash_instance(inst));
563                return;
564        default:
565                crypto_drop_spawn(&ctx->spawn);
566                kfree(inst);
567        }
568}
569
570static struct crypto_template mcryptd_tmpl = {
571        .name = "mcryptd",
572        .create = mcryptd_create,
573        .free = mcryptd_free,
574        .module = THIS_MODULE,
575};
576
577struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name,
578                                        u32 type, u32 mask)
579{
580        char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME];
581        struct crypto_ahash *tfm;
582
583        if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME,
584                     "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
585                return ERR_PTR(-EINVAL);
586        tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask);
587        if (IS_ERR(tfm))
588                return ERR_CAST(tfm);
589        if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
590                crypto_free_ahash(tfm);
591                return ERR_PTR(-EINVAL);
592        }
593
594        return __mcryptd_ahash_cast(tfm);
595}
596EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash);
597
598int shash_ahash_mcryptd_digest(struct ahash_request *req,
599                               struct shash_desc *desc)
600{
601        int err;
602
603        err = crypto_shash_init(desc) ?:
604              shash_ahash_mcryptd_finup(req, desc);
605
606        return err;
607}
608EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_digest);
609
610int shash_ahash_mcryptd_update(struct ahash_request *req,
611                               struct shash_desc *desc)
612{
613        struct crypto_shash *tfm = desc->tfm;
614        struct shash_alg *shash = crypto_shash_alg(tfm);
615
616        /* alignment is to be done by multi-buffer crypto algorithm if needed */
617
618        return shash->update(desc, NULL, 0);
619}
620EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_update);
621
622int shash_ahash_mcryptd_finup(struct ahash_request *req,
623                              struct shash_desc *desc)
624{
625        struct crypto_shash *tfm = desc->tfm;
626        struct shash_alg *shash = crypto_shash_alg(tfm);
627
628        /* alignment is to be done by multi-buffer crypto algorithm if needed */
629
630        return shash->finup(desc, NULL, 0, req->result);
631}
632EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_finup);
633
634int shash_ahash_mcryptd_final(struct ahash_request *req,
635                              struct shash_desc *desc)
636{
637        struct crypto_shash *tfm = desc->tfm;
638        struct shash_alg *shash = crypto_shash_alg(tfm);
639
640        /* alignment is to be done by multi-buffer crypto algorithm if needed */
641
642        return shash->final(desc, req->result);
643}
644EXPORT_SYMBOL_GPL(shash_ahash_mcryptd_final);
645
646struct crypto_shash *mcryptd_ahash_child(struct mcryptd_ahash *tfm)
647{
648        struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
649
650        return ctx->child;
651}
652EXPORT_SYMBOL_GPL(mcryptd_ahash_child);
653
654struct shash_desc *mcryptd_shash_desc(struct ahash_request *req)
655{
656        struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
657        return &rctx->desc;
658}
659EXPORT_SYMBOL_GPL(mcryptd_shash_desc);
660
661void mcryptd_free_ahash(struct mcryptd_ahash *tfm)
662{
663        crypto_free_ahash(&tfm->base);
664}
665EXPORT_SYMBOL_GPL(mcryptd_free_ahash);
666
667
668static int __init mcryptd_init(void)
669{
670        int err, cpu;
671        struct mcryptd_flush_list *flist;
672
673        mcryptd_flist = alloc_percpu(struct mcryptd_flush_list);
674        for_each_possible_cpu(cpu) {
675                flist = per_cpu_ptr(mcryptd_flist, cpu);
676                INIT_LIST_HEAD(&flist->list);
677                mutex_init(&flist->lock);
678        }
679
680        err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN);
681        if (err) {
682                free_percpu(mcryptd_flist);
683                return err;
684        }
685
686        err = crypto_register_template(&mcryptd_tmpl);
687        if (err) {
688                mcryptd_fini_queue(&mqueue);
689                free_percpu(mcryptd_flist);
690        }
691
692        return err;
693}
694
695static void __exit mcryptd_exit(void)
696{
697        mcryptd_fini_queue(&mqueue);
698        crypto_unregister_template(&mcryptd_tmpl);
699        free_percpu(mcryptd_flist);
700}
701
702subsys_initcall(mcryptd_init);
703module_exit(mcryptd_exit);
704
705MODULE_LICENSE("GPL");
706MODULE_DESCRIPTION("Software async multibuffer crypto daemon");
707MODULE_ALIAS_CRYPTO("mcryptd");
Note: See TracBrowser for help on using the repository browser.