Replace <asm/uaccess.h> with <linux/uaccess.h> globally
[linux-block.git] / drivers / s390 / crypto / zcrypt_api.c
1 /*
2  *  zcrypt 2.1.0
3  *
4  *  Copyright IBM Corp. 2001, 2012
5  *  Author(s): Robert Burroughs
6  *             Eric Rossman (edrossma@us.ibm.com)
7  *             Cornelia Huck <cornelia.huck@de.ibm.com>
8  *
9  *  Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
10  *  Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
11  *                                Ralph Wuerthner <rwuerthn@de.ibm.com>
12  *  MSGTYPE restruct:             Holger Dengler <hd@linux.vnet.ibm.com>
13  *
14  * This program is free software; you can redistribute it and/or modify
15  * it under the terms of the GNU General Public License as published by
16  * the Free Software Foundation; either version 2, or (at your option)
17  * any later version.
18  *
19  * This program is distributed in the hope that it will be useful,
20  * but WITHOUT ANY WARRANTY; without even the implied warranty of
21  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22  * GNU General Public License for more details.
23  *
24  * You should have received a copy of the GNU General Public License
25  * along with this program; if not, write to the Free Software
26  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27  */
28
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/miscdevice.h>
33 #include <linux/fs.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/compat.h>
37 #include <linux/slab.h>
38 #include <linux/atomic.h>
39 #include <linux/uaccess.h>
40 #include <linux/hw_random.h>
41 #include <linux/debugfs.h>
42 #include <asm/debug.h>
43
44 #define CREATE_TRACE_POINTS
45 #include <asm/trace/zcrypt.h>
46
47 #include "zcrypt_api.h"
48 #include "zcrypt_debug.h"
49
50 #include "zcrypt_msgtype6.h"
51 #include "zcrypt_msgtype50.h"
52
53 /*
54  * Module description.
55  */
56 MODULE_AUTHOR("IBM Corporation");
57 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
58                    "Copyright IBM Corp. 2001, 2012");
59 MODULE_LICENSE("GPL");
60
61 /*
62  * zcrypt tracepoint functions
63  */
64 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
65 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
66
67 static int zcrypt_hwrng_seed = 1;
68 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP);
69 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on).");
70
71 DEFINE_SPINLOCK(zcrypt_list_lock);
72 LIST_HEAD(zcrypt_card_list);
73 int zcrypt_device_count;
74
75 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
76 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
77
78 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
79 EXPORT_SYMBOL(zcrypt_rescan_req);
80
81 static LIST_HEAD(zcrypt_ops_list);
82
83 /* Zcrypt related debug feature stuff. */
84 static struct dentry *zcrypt_dbf_root;
85 debug_info_t *zcrypt_dbf_info;
86
87 /**
88  * Process a rescan of the transport layer.
89  *
90  * Returns 1, if the rescan has been processed, otherwise 0.
91  */
92 static inline int zcrypt_process_rescan(void)
93 {
94         if (atomic_read(&zcrypt_rescan_req)) {
95                 atomic_set(&zcrypt_rescan_req, 0);
96                 atomic_inc(&zcrypt_rescan_count);
97                 ap_bus_force_rescan();
98                 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d",
99                            atomic_inc_return(&zcrypt_rescan_count));
100                 return 1;
101         }
102         return 0;
103 }
104
105 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
106 {
107         list_add_tail(&zops->list, &zcrypt_ops_list);
108 }
109
110 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
111 {
112         list_del_init(&zops->list);
113 }
114
115 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
116 {
117         struct zcrypt_ops *zops;
118
119         list_for_each_entry(zops, &zcrypt_ops_list, list)
120                 if ((zops->variant == variant) &&
121                     (!strncmp(zops->name, name, sizeof(zops->name))))
122                         return zops;
123         return NULL;
124 }
125 EXPORT_SYMBOL(zcrypt_msgtype);
126
127 /**
128  * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
129  *
130  * This function is not supported beyond zcrypt 1.3.1.
131  */
132 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
133                            size_t count, loff_t *f_pos)
134 {
135         return -EPERM;
136 }
137
138 /**
139  * zcrypt_write(): Not allowed.
140  *
141  * Write is is not allowed
142  */
143 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
144                             size_t count, loff_t *f_pos)
145 {
146         return -EPERM;
147 }
148
149 /**
150  * zcrypt_open(): Count number of users.
151  *
152  * Device open function to count number of users.
153  */
154 static int zcrypt_open(struct inode *inode, struct file *filp)
155 {
156         atomic_inc(&zcrypt_open_count);
157         return nonseekable_open(inode, filp);
158 }
159
160 /**
161  * zcrypt_release(): Count number of users.
162  *
163  * Device close function to count number of users.
164  */
165 static int zcrypt_release(struct inode *inode, struct file *filp)
166 {
167         atomic_dec(&zcrypt_open_count);
168         return 0;
169 }
170
171 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
172                                                      struct zcrypt_queue *zq,
173                                                      unsigned int weight)
174 {
175         if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
176                 return NULL;
177         zcrypt_queue_get(zq);
178         get_device(&zq->queue->ap_dev.device);
179         atomic_add(weight, &zc->load);
180         atomic_add(weight, &zq->load);
181         zq->request_count++;
182         return zq;
183 }
184
185 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
186                                      struct zcrypt_queue *zq,
187                                      unsigned int weight)
188 {
189         struct module *mod = zq->queue->ap_dev.drv->driver.owner;
190
191         zq->request_count--;
192         atomic_sub(weight, &zc->load);
193         atomic_sub(weight, &zq->load);
194         put_device(&zq->queue->ap_dev.device);
195         zcrypt_queue_put(zq);
196         module_put(mod);
197 }
198
199 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
200                                        struct zcrypt_card *pref_zc,
201                                        unsigned weight, unsigned pref_weight)
202 {
203         if (!pref_zc)
204                 return 0;
205         weight += atomic_read(&zc->load);
206         pref_weight += atomic_read(&pref_zc->load);
207         if (weight == pref_weight)
208                 return atomic_read(&zc->card->total_request_count) >
209                         atomic_read(&pref_zc->card->total_request_count);
210         return weight > pref_weight;
211 }
212
213 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
214                                         struct zcrypt_queue *pref_zq,
215                                         unsigned weight, unsigned pref_weight)
216 {
217         if (!pref_zq)
218                 return 0;
219         weight += atomic_read(&zq->load);
220         pref_weight += atomic_read(&pref_zq->load);
221         if (weight == pref_weight)
222                 return &zq->queue->total_request_count >
223                         &pref_zq->queue->total_request_count;
224         return weight > pref_weight;
225 }
226
227 /*
228  * zcrypt ioctls.
229  */
230 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
231 {
232         struct zcrypt_card *zc, *pref_zc;
233         struct zcrypt_queue *zq, *pref_zq;
234         unsigned int weight, pref_weight;
235         unsigned int func_code;
236         int qid = 0, rc = -ENODEV;
237
238         trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
239
240         if (mex->outputdatalength < mex->inputdatalength) {
241                 rc = -EINVAL;
242                 goto out;
243         }
244
245         /*
246          * As long as outputdatalength is big enough, we can set the
247          * outputdatalength equal to the inputdatalength, since that is the
248          * number of bytes we will copy in any case
249          */
250         mex->outputdatalength = mex->inputdatalength;
251
252         rc = get_rsa_modex_fc(mex, &func_code);
253         if (rc)
254                 goto out;
255
256         pref_zc = NULL;
257         pref_zq = NULL;
258         spin_lock(&zcrypt_list_lock);
259         for_each_zcrypt_card(zc) {
260                 /* Check for online accelarator and CCA cards */
261                 if (!zc->online || !(zc->card->functions & 0x18000000))
262                         continue;
263                 /* Check for size limits */
264                 if (zc->min_mod_size > mex->inputdatalength ||
265                     zc->max_mod_size < mex->inputdatalength)
266                         continue;
267                 /* get weight index of the card device  */
268                 weight = zc->speed_rating[func_code];
269                 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
270                         continue;
271                 for_each_zcrypt_queue(zq, zc) {
272                         /* check if device is online and eligible */
273                         if (!zq->online || !zq->ops->rsa_modexpo)
274                                 continue;
275                         if (zcrypt_queue_compare(zq, pref_zq,
276                                                  weight, pref_weight))
277                                 continue;
278                         pref_zc = zc;
279                         pref_zq = zq;
280                         pref_weight = weight;
281                 }
282         }
283         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
284         spin_unlock(&zcrypt_list_lock);
285
286         if (!pref_zq) {
287                 rc = -ENODEV;
288                 goto out;
289         }
290
291         qid = pref_zq->queue->qid;
292         rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
293
294         spin_lock(&zcrypt_list_lock);
295         zcrypt_drop_queue(pref_zc, pref_zq, weight);
296         spin_unlock(&zcrypt_list_lock);
297
298 out:
299         trace_s390_zcrypt_rep(mex, func_code, rc,
300                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
301         return rc;
302 }
303
304 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
305 {
306         struct zcrypt_card *zc, *pref_zc;
307         struct zcrypt_queue *zq, *pref_zq;
308         unsigned int weight, pref_weight;
309         unsigned int func_code;
310         int qid = 0, rc = -ENODEV;
311
312         trace_s390_zcrypt_req(crt, TP_ICARSACRT);
313
314         if (crt->outputdatalength < crt->inputdatalength) {
315                 rc = -EINVAL;
316                 goto out;
317         }
318
319         /*
320          * As long as outputdatalength is big enough, we can set the
321          * outputdatalength equal to the inputdatalength, since that is the
322          * number of bytes we will copy in any case
323          */
324         crt->outputdatalength = crt->inputdatalength;
325
326         rc = get_rsa_crt_fc(crt, &func_code);
327         if (rc)
328                 goto out;
329
330         pref_zc = NULL;
331         pref_zq = NULL;
332         spin_lock(&zcrypt_list_lock);
333         for_each_zcrypt_card(zc) {
334                 /* Check for online accelarator and CCA cards */
335                 if (!zc->online || !(zc->card->functions & 0x18000000))
336                         continue;
337                 /* Check for size limits */
338                 if (zc->min_mod_size > crt->inputdatalength ||
339                     zc->max_mod_size < crt->inputdatalength)
340                         continue;
341                 /* get weight index of the card device  */
342                 weight = zc->speed_rating[func_code];
343                 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
344                         continue;
345                 for_each_zcrypt_queue(zq, zc) {
346                         /* check if device is online and eligible */
347                         if (!zq->online || !zq->ops->rsa_modexpo_crt)
348                                 continue;
349                         if (zcrypt_queue_compare(zq, pref_zq,
350                                                  weight, pref_weight))
351                                 continue;
352                         pref_zc = zc;
353                         pref_zq = zq;
354                         pref_weight = weight;
355                 }
356         }
357         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
358         spin_unlock(&zcrypt_list_lock);
359
360         if (!pref_zq) {
361                 rc = -ENODEV;
362                 goto out;
363         }
364
365         qid = pref_zq->queue->qid;
366         rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
367
368         spin_lock(&zcrypt_list_lock);
369         zcrypt_drop_queue(pref_zc, pref_zq, weight);
370         spin_unlock(&zcrypt_list_lock);
371
372 out:
373         trace_s390_zcrypt_rep(crt, func_code, rc,
374                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
375         return rc;
376 }
377
378 static long zcrypt_send_cprb(struct ica_xcRB *xcRB)
379 {
380         struct zcrypt_card *zc, *pref_zc;
381         struct zcrypt_queue *zq, *pref_zq;
382         struct ap_message ap_msg;
383         unsigned int weight, pref_weight;
384         unsigned int func_code;
385         unsigned short *domain;
386         int qid = 0, rc = -ENODEV;
387
388         trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
389
390         rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain);
391         if (rc)
392                 goto out;
393
394         pref_zc = NULL;
395         pref_zq = NULL;
396         spin_lock(&zcrypt_list_lock);
397         for_each_zcrypt_card(zc) {
398                 /* Check for online CCA cards */
399                 if (!zc->online || !(zc->card->functions & 0x10000000))
400                         continue;
401                 /* Check for user selected CCA card */
402                 if (xcRB->user_defined != AUTOSELECT &&
403                     xcRB->user_defined != zc->card->id)
404                         continue;
405                 /* get weight index of the card device  */
406                 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
407                 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
408                         continue;
409                 for_each_zcrypt_queue(zq, zc) {
410                         /* check if device is online and eligible */
411                         if (!zq->online ||
412                             !zq->ops->send_cprb ||
413                             ((*domain != (unsigned short) AUTOSELECT) &&
414                              (*domain != AP_QID_QUEUE(zq->queue->qid))))
415                                 continue;
416                         if (zcrypt_queue_compare(zq, pref_zq,
417                                                  weight, pref_weight))
418                                 continue;
419                         pref_zc = zc;
420                         pref_zq = zq;
421                         pref_weight = weight;
422                 }
423         }
424         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
425         spin_unlock(&zcrypt_list_lock);
426
427         if (!pref_zq) {
428                 rc = -ENODEV;
429                 goto out;
430         }
431
432         /* in case of auto select, provide the correct domain */
433         qid = pref_zq->queue->qid;
434         if (*domain == (unsigned short) AUTOSELECT)
435                 *domain = AP_QID_QUEUE(qid);
436
437         rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
438
439         spin_lock(&zcrypt_list_lock);
440         zcrypt_drop_queue(pref_zc, pref_zq, weight);
441         spin_unlock(&zcrypt_list_lock);
442
443 out:
444         trace_s390_zcrypt_rep(xcRB, func_code, rc,
445                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
446         return rc;
447 }
448
449 static bool is_desired_ep11_card(unsigned int dev_id,
450                                  unsigned short target_num,
451                                  struct ep11_target_dev *targets)
452 {
453         while (target_num-- > 0) {
454                 if (dev_id == targets->ap_id)
455                         return true;
456                 targets++;
457         }
458         return false;
459 }
460
461 static bool is_desired_ep11_queue(unsigned int dev_qid,
462                                   unsigned short target_num,
463                                   struct ep11_target_dev *targets)
464 {
465         while (target_num-- > 0) {
466                 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid)
467                         return true;
468                 targets++;
469         }
470         return false;
471 }
472
473 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
474 {
475         struct zcrypt_card *zc, *pref_zc;
476         struct zcrypt_queue *zq, *pref_zq;
477         struct ep11_target_dev *targets;
478         unsigned short target_num;
479         unsigned int weight, pref_weight;
480         unsigned int func_code;
481         struct ap_message ap_msg;
482         int qid = 0, rc = -ENODEV;
483
484         trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
485
486         target_num = (unsigned short) xcrb->targets_num;
487
488         /* empty list indicates autoselect (all available targets) */
489         targets = NULL;
490         if (target_num != 0) {
491                 struct ep11_target_dev __user *uptr;
492
493                 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
494                 if (!targets) {
495                         rc = -ENOMEM;
496                         goto out;
497                 }
498
499                 uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
500                 if (copy_from_user(targets, uptr,
501                                    target_num * sizeof(*targets))) {
502                         rc = -EFAULT;
503                         goto out;
504                 }
505         }
506
507         rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code);
508         if (rc)
509                 goto out_free;
510
511         pref_zc = NULL;
512         pref_zq = NULL;
513         spin_lock(&zcrypt_list_lock);
514         for_each_zcrypt_card(zc) {
515                 /* Check for online EP11 cards */
516                 if (!zc->online || !(zc->card->functions & 0x04000000))
517                         continue;
518                 /* Check for user selected EP11 card */
519                 if (targets &&
520                     !is_desired_ep11_card(zc->card->id, target_num, targets))
521                         continue;
522                 /* get weight index of the card device  */
523                 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
524                 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
525                         continue;
526                 for_each_zcrypt_queue(zq, zc) {
527                         /* check if device is online and eligible */
528                         if (!zq->online ||
529                             !zq->ops->send_ep11_cprb ||
530                             (targets &&
531                              !is_desired_ep11_queue(zq->queue->qid,
532                                                     target_num, targets)))
533                                 continue;
534                         if (zcrypt_queue_compare(zq, pref_zq,
535                                                  weight, pref_weight))
536                                 continue;
537                         pref_zc = zc;
538                         pref_zq = zq;
539                         pref_weight = weight;
540                 }
541         }
542         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
543         spin_unlock(&zcrypt_list_lock);
544
545         if (!pref_zq) {
546                 rc = -ENODEV;
547                 goto out_free;
548         }
549
550         qid = pref_zq->queue->qid;
551         rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
552
553         spin_lock(&zcrypt_list_lock);
554         zcrypt_drop_queue(pref_zc, pref_zq, weight);
555         spin_unlock(&zcrypt_list_lock);
556
557 out_free:
558         kfree(targets);
559 out:
560         trace_s390_zcrypt_rep(xcrb, func_code, rc,
561                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
562         return rc;
563 }
564
565 static long zcrypt_rng(char *buffer)
566 {
567         struct zcrypt_card *zc, *pref_zc;
568         struct zcrypt_queue *zq, *pref_zq;
569         unsigned int weight, pref_weight;
570         unsigned int func_code;
571         struct ap_message ap_msg;
572         unsigned int domain;
573         int qid = 0, rc = -ENODEV;
574
575         trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
576
577         rc = get_rng_fc(&ap_msg, &func_code, &domain);
578         if (rc)
579                 goto out;
580
581         pref_zc = NULL;
582         pref_zq = NULL;
583         spin_lock(&zcrypt_list_lock);
584         for_each_zcrypt_card(zc) {
585                 /* Check for online CCA cards */
586                 if (!zc->online || !(zc->card->functions & 0x10000000))
587                         continue;
588                 /* get weight index of the card device  */
589                 weight = zc->speed_rating[func_code];
590                 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight))
591                         continue;
592                 for_each_zcrypt_queue(zq, zc) {
593                         /* check if device is online and eligible */
594                         if (!zq->online || !zq->ops->rng)
595                                 continue;
596                         if (zcrypt_queue_compare(zq, pref_zq,
597                                                  weight, pref_weight))
598                                 continue;
599                         pref_zc = zc;
600                         pref_zq = zq;
601                         pref_weight = weight;
602                 }
603         }
604         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
605         spin_unlock(&zcrypt_list_lock);
606
607         if (!pref_zq)
608                 return -ENODEV;
609
610         qid = pref_zq->queue->qid;
611         rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
612
613         spin_lock(&zcrypt_list_lock);
614         zcrypt_drop_queue(pref_zc, pref_zq, weight);
615         spin_unlock(&zcrypt_list_lock);
616
617 out:
618         trace_s390_zcrypt_rep(buffer, func_code, rc,
619                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
620         return rc;
621 }
622
623 static void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix)
624 {
625         struct zcrypt_card *zc;
626         struct zcrypt_queue *zq;
627         struct zcrypt_device_status *stat;
628
629         memset(matrix, 0, sizeof(*matrix));
630         spin_lock(&zcrypt_list_lock);
631         for_each_zcrypt_card(zc) {
632                 for_each_zcrypt_queue(zq, zc) {
633                         stat = matrix->device;
634                         stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS;
635                         stat += AP_QID_QUEUE(zq->queue->qid);
636                         stat->hwtype = zc->card->ap_dev.device_type;
637                         stat->functions = zc->card->functions >> 26;
638                         stat->qid = zq->queue->qid;
639                         stat->online = zq->online ? 0x01 : 0x00;
640                 }
641         }
642         spin_unlock(&zcrypt_list_lock);
643 }
644 EXPORT_SYMBOL(zcrypt_device_status_mask);
645
646 static void zcrypt_status_mask(char status[AP_DEVICES])
647 {
648         struct zcrypt_card *zc;
649         struct zcrypt_queue *zq;
650
651         memset(status, 0, sizeof(char) * AP_DEVICES);
652         spin_lock(&zcrypt_list_lock);
653         for_each_zcrypt_card(zc) {
654                 for_each_zcrypt_queue(zq, zc) {
655                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
656                                 continue;
657                         status[AP_QID_CARD(zq->queue->qid)] =
658                                 zc->online ? zc->user_space_type : 0x0d;
659                 }
660         }
661         spin_unlock(&zcrypt_list_lock);
662 }
663
664 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES])
665 {
666         struct zcrypt_card *zc;
667         struct zcrypt_queue *zq;
668
669         memset(qdepth, 0, sizeof(char)  * AP_DEVICES);
670         spin_lock(&zcrypt_list_lock);
671         for_each_zcrypt_card(zc) {
672                 for_each_zcrypt_queue(zq, zc) {
673                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
674                                 continue;
675                         spin_lock(&zq->queue->lock);
676                         qdepth[AP_QID_CARD(zq->queue->qid)] =
677                                 zq->queue->pendingq_count +
678                                 zq->queue->requestq_count;
679                         spin_unlock(&zq->queue->lock);
680                 }
681         }
682         spin_unlock(&zcrypt_list_lock);
683 }
684
685 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES])
686 {
687         struct zcrypt_card *zc;
688         struct zcrypt_queue *zq;
689
690         memset(reqcnt, 0, sizeof(int) * AP_DEVICES);
691         spin_lock(&zcrypt_list_lock);
692         for_each_zcrypt_card(zc) {
693                 for_each_zcrypt_queue(zq, zc) {
694                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
695                                 continue;
696                         spin_lock(&zq->queue->lock);
697                         reqcnt[AP_QID_CARD(zq->queue->qid)] =
698                                 zq->queue->total_request_count;
699                         spin_unlock(&zq->queue->lock);
700                 }
701         }
702         spin_unlock(&zcrypt_list_lock);
703 }
704
705 static int zcrypt_pendingq_count(void)
706 {
707         struct zcrypt_card *zc;
708         struct zcrypt_queue *zq;
709         int pendingq_count;
710
711         pendingq_count = 0;
712         spin_lock(&zcrypt_list_lock);
713         for_each_zcrypt_card(zc) {
714                 for_each_zcrypt_queue(zq, zc) {
715                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
716                                 continue;
717                         spin_lock(&zq->queue->lock);
718                         pendingq_count += zq->queue->pendingq_count;
719                         spin_unlock(&zq->queue->lock);
720                 }
721         }
722         spin_unlock(&zcrypt_list_lock);
723         return pendingq_count;
724 }
725
726 static int zcrypt_requestq_count(void)
727 {
728         struct zcrypt_card *zc;
729         struct zcrypt_queue *zq;
730         int requestq_count;
731
732         requestq_count = 0;
733         spin_lock(&zcrypt_list_lock);
734         for_each_zcrypt_card(zc) {
735                 for_each_zcrypt_queue(zq, zc) {
736                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
737                                 continue;
738                         spin_lock(&zq->queue->lock);
739                         requestq_count += zq->queue->requestq_count;
740                         spin_unlock(&zq->queue->lock);
741                 }
742         }
743         spin_unlock(&zcrypt_list_lock);
744         return requestq_count;
745 }
746
747 static int zcrypt_count_type(int type)
748 {
749         struct zcrypt_card *zc;
750         struct zcrypt_queue *zq;
751         int device_count;
752
753         device_count = 0;
754         spin_lock(&zcrypt_list_lock);
755         for_each_zcrypt_card(zc) {
756                 if (zc->card->id != type)
757                         continue;
758                 for_each_zcrypt_queue(zq, zc) {
759                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
760                                 continue;
761                         device_count++;
762                 }
763         }
764         spin_unlock(&zcrypt_list_lock);
765         return device_count;
766 }
767
768 /**
769  * zcrypt_ica_status(): Old, depracted combi status call.
770  *
771  * Old, deprecated combi status call.
772  */
773 static long zcrypt_ica_status(struct file *filp, unsigned long arg)
774 {
775         struct ica_z90_status *pstat;
776         int ret;
777
778         pstat = kzalloc(sizeof(*pstat), GFP_KERNEL);
779         if (!pstat)
780                 return -ENOMEM;
781         pstat->totalcount = zcrypt_device_count;
782         pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA);
783         pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC);
784         pstat->requestqWaitCount = zcrypt_requestq_count();
785         pstat->pendingqWaitCount = zcrypt_pendingq_count();
786         pstat->totalOpenCount = atomic_read(&zcrypt_open_count);
787         pstat->cryptoDomain = ap_domain_index;
788         zcrypt_status_mask(pstat->status);
789         zcrypt_qdepth_mask(pstat->qdepth);
790         ret = 0;
791         if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat)))
792                 ret = -EFAULT;
793         kfree(pstat);
794         return ret;
795 }
796
797 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
798                                   unsigned long arg)
799 {
800         int rc;
801
802         switch (cmd) {
803         case ICARSAMODEXPO: {
804                 struct ica_rsa_modexpo __user *umex = (void __user *) arg;
805                 struct ica_rsa_modexpo mex;
806                 if (copy_from_user(&mex, umex, sizeof(mex)))
807                         return -EFAULT;
808                 do {
809                         rc = zcrypt_rsa_modexpo(&mex);
810                 } while (rc == -EAGAIN);
811                 /* on failure: retry once again after a requested rescan */
812                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
813                         do {
814                                 rc = zcrypt_rsa_modexpo(&mex);
815                         } while (rc == -EAGAIN);
816                 if (rc)
817                         return rc;
818                 return put_user(mex.outputdatalength, &umex->outputdatalength);
819         }
820         case ICARSACRT: {
821                 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg;
822                 struct ica_rsa_modexpo_crt crt;
823                 if (copy_from_user(&crt, ucrt, sizeof(crt)))
824                         return -EFAULT;
825                 do {
826                         rc = zcrypt_rsa_crt(&crt);
827                 } while (rc == -EAGAIN);
828                 /* on failure: retry once again after a requested rescan */
829                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
830                         do {
831                                 rc = zcrypt_rsa_crt(&crt);
832                         } while (rc == -EAGAIN);
833                 if (rc)
834                         return rc;
835                 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
836         }
837         case ZSECSENDCPRB: {
838                 struct ica_xcRB __user *uxcRB = (void __user *) arg;
839                 struct ica_xcRB xcRB;
840                 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB)))
841                         return -EFAULT;
842                 do {
843                         rc = zcrypt_send_cprb(&xcRB);
844                 } while (rc == -EAGAIN);
845                 /* on failure: retry once again after a requested rescan */
846                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
847                         do {
848                                 rc = zcrypt_send_cprb(&xcRB);
849                         } while (rc == -EAGAIN);
850                 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB)))
851                         return -EFAULT;
852                 return rc;
853         }
854         case ZSENDEP11CPRB: {
855                 struct ep11_urb __user *uxcrb = (void __user *)arg;
856                 struct ep11_urb xcrb;
857                 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
858                         return -EFAULT;
859                 do {
860                         rc = zcrypt_send_ep11_cprb(&xcrb);
861                 } while (rc == -EAGAIN);
862                 /* on failure: retry once again after a requested rescan */
863                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
864                         do {
865                                 rc = zcrypt_send_ep11_cprb(&xcrb);
866                         } while (rc == -EAGAIN);
867                 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
868                         return -EFAULT;
869                 return rc;
870         }
871         case ZDEVICESTATUS: {
872                 struct zcrypt_device_matrix *device_status;
873
874                 device_status = kzalloc(sizeof(struct zcrypt_device_matrix),
875                                         GFP_KERNEL);
876                 if (!device_status)
877                         return -ENOMEM;
878
879                 zcrypt_device_status_mask(device_status);
880
881                 if (copy_to_user((char __user *) arg, device_status,
882                                  sizeof(struct zcrypt_device_matrix))) {
883                         kfree(device_status);
884                         return -EFAULT;
885                 }
886
887                 kfree(device_status);
888                 return 0;
889         }
890         case Z90STAT_STATUS_MASK: {
891                 char status[AP_DEVICES];
892                 zcrypt_status_mask(status);
893                 if (copy_to_user((char __user *) arg, status,
894                                  sizeof(char) * AP_DEVICES))
895                         return -EFAULT;
896                 return 0;
897         }
898         case Z90STAT_QDEPTH_MASK: {
899                 char qdepth[AP_DEVICES];
900                 zcrypt_qdepth_mask(qdepth);
901                 if (copy_to_user((char __user *) arg, qdepth,
902                                  sizeof(char) * AP_DEVICES))
903                         return -EFAULT;
904                 return 0;
905         }
906         case Z90STAT_PERDEV_REQCNT: {
907                 int reqcnt[AP_DEVICES];
908                 zcrypt_perdev_reqcnt(reqcnt);
909                 if (copy_to_user((int __user *) arg, reqcnt,
910                                  sizeof(int) * AP_DEVICES))
911                         return -EFAULT;
912                 return 0;
913         }
914         case Z90STAT_REQUESTQ_COUNT:
915                 return put_user(zcrypt_requestq_count(), (int __user *) arg);
916         case Z90STAT_PENDINGQ_COUNT:
917                 return put_user(zcrypt_pendingq_count(), (int __user *) arg);
918         case Z90STAT_TOTALOPEN_COUNT:
919                 return put_user(atomic_read(&zcrypt_open_count),
920                                 (int __user *) arg);
921         case Z90STAT_DOMAIN_INDEX:
922                 return put_user(ap_domain_index, (int __user *) arg);
923         /*
924          * Deprecated ioctls. Don't add another device count ioctl,
925          * you can count them yourself in the user space with the
926          * output of the Z90STAT_STATUS_MASK ioctl.
927          */
928         case ICAZ90STATUS:
929                 return zcrypt_ica_status(filp, arg);
930         case Z90STAT_TOTALCOUNT:
931                 return put_user(zcrypt_device_count, (int __user *) arg);
932         case Z90STAT_PCICACOUNT:
933                 return put_user(zcrypt_count_type(ZCRYPT_PCICA),
934                                 (int __user *) arg);
935         case Z90STAT_PCICCCOUNT:
936                 return put_user(zcrypt_count_type(ZCRYPT_PCICC),
937                                 (int __user *) arg);
938         case Z90STAT_PCIXCCMCL2COUNT:
939                 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2),
940                                 (int __user *) arg);
941         case Z90STAT_PCIXCCMCL3COUNT:
942                 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
943                                 (int __user *) arg);
944         case Z90STAT_PCIXCCCOUNT:
945                 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) +
946                                 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3),
947                                 (int __user *) arg);
948         case Z90STAT_CEX2CCOUNT:
949                 return put_user(zcrypt_count_type(ZCRYPT_CEX2C),
950                                 (int __user *) arg);
951         case Z90STAT_CEX2ACOUNT:
952                 return put_user(zcrypt_count_type(ZCRYPT_CEX2A),
953                                 (int __user *) arg);
954         default:
955                 /* unknown ioctl number */
956                 return -ENOIOCTLCMD;
957         }
958 }
959
960 #ifdef CONFIG_COMPAT
961 /*
962  * ioctl32 conversion routines
963  */
964 struct compat_ica_rsa_modexpo {
965         compat_uptr_t   inputdata;
966         unsigned int    inputdatalength;
967         compat_uptr_t   outputdata;
968         unsigned int    outputdatalength;
969         compat_uptr_t   b_key;
970         compat_uptr_t   n_modulus;
971 };
972
973 static long trans_modexpo32(struct file *filp, unsigned int cmd,
974                             unsigned long arg)
975 {
976         struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
977         struct compat_ica_rsa_modexpo mex32;
978         struct ica_rsa_modexpo mex64;
979         long rc;
980
981         if (copy_from_user(&mex32, umex32, sizeof(mex32)))
982                 return -EFAULT;
983         mex64.inputdata = compat_ptr(mex32.inputdata);
984         mex64.inputdatalength = mex32.inputdatalength;
985         mex64.outputdata = compat_ptr(mex32.outputdata);
986         mex64.outputdatalength = mex32.outputdatalength;
987         mex64.b_key = compat_ptr(mex32.b_key);
988         mex64.n_modulus = compat_ptr(mex32.n_modulus);
989         do {
990                 rc = zcrypt_rsa_modexpo(&mex64);
991         } while (rc == -EAGAIN);
992         /* on failure: retry once again after a requested rescan */
993         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
994                 do {
995                         rc = zcrypt_rsa_modexpo(&mex64);
996                 } while (rc == -EAGAIN);
997         if (rc)
998                 return rc;
999         return put_user(mex64.outputdatalength,
1000                         &umex32->outputdatalength);
1001 }
1002
1003 struct compat_ica_rsa_modexpo_crt {
1004         compat_uptr_t   inputdata;
1005         unsigned int    inputdatalength;
1006         compat_uptr_t   outputdata;
1007         unsigned int    outputdatalength;
1008         compat_uptr_t   bp_key;
1009         compat_uptr_t   bq_key;
1010         compat_uptr_t   np_prime;
1011         compat_uptr_t   nq_prime;
1012         compat_uptr_t   u_mult_inv;
1013 };
1014
1015 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd,
1016                                 unsigned long arg)
1017 {
1018         struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1019         struct compat_ica_rsa_modexpo_crt crt32;
1020         struct ica_rsa_modexpo_crt crt64;
1021         long rc;
1022
1023         if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1024                 return -EFAULT;
1025         crt64.inputdata = compat_ptr(crt32.inputdata);
1026         crt64.inputdatalength = crt32.inputdatalength;
1027         crt64.outputdata=  compat_ptr(crt32.outputdata);
1028         crt64.outputdatalength = crt32.outputdatalength;
1029         crt64.bp_key = compat_ptr(crt32.bp_key);
1030         crt64.bq_key = compat_ptr(crt32.bq_key);
1031         crt64.np_prime = compat_ptr(crt32.np_prime);
1032         crt64.nq_prime = compat_ptr(crt32.nq_prime);
1033         crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1034         do {
1035                 rc = zcrypt_rsa_crt(&crt64);
1036         } while (rc == -EAGAIN);
1037         /* on failure: retry once again after a requested rescan */
1038         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1039                 do {
1040                         rc = zcrypt_rsa_crt(&crt64);
1041                 } while (rc == -EAGAIN);
1042         if (rc)
1043                 return rc;
1044         return put_user(crt64.outputdatalength,
1045                         &ucrt32->outputdatalength);
1046 }
1047
1048 struct compat_ica_xcRB {
1049         unsigned short  agent_ID;
1050         unsigned int    user_defined;
1051         unsigned short  request_ID;
1052         unsigned int    request_control_blk_length;
1053         unsigned char   padding1[16 - sizeof (compat_uptr_t)];
1054         compat_uptr_t   request_control_blk_addr;
1055         unsigned int    request_data_length;
1056         char            padding2[16 - sizeof (compat_uptr_t)];
1057         compat_uptr_t   request_data_address;
1058         unsigned int    reply_control_blk_length;
1059         char            padding3[16 - sizeof (compat_uptr_t)];
1060         compat_uptr_t   reply_control_blk_addr;
1061         unsigned int    reply_data_length;
1062         char            padding4[16 - sizeof (compat_uptr_t)];
1063         compat_uptr_t   reply_data_addr;
1064         unsigned short  priority_window;
1065         unsigned int    status;
1066 } __attribute__((packed));
1067
1068 static long trans_xcRB32(struct file *filp, unsigned int cmd,
1069                          unsigned long arg)
1070 {
1071         struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg);
1072         struct compat_ica_xcRB xcRB32;
1073         struct ica_xcRB xcRB64;
1074         long rc;
1075
1076         if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32)))
1077                 return -EFAULT;
1078         xcRB64.agent_ID = xcRB32.agent_ID;
1079         xcRB64.user_defined = xcRB32.user_defined;
1080         xcRB64.request_ID = xcRB32.request_ID;
1081         xcRB64.request_control_blk_length =
1082                 xcRB32.request_control_blk_length;
1083         xcRB64.request_control_blk_addr =
1084                 compat_ptr(xcRB32.request_control_blk_addr);
1085         xcRB64.request_data_length =
1086                 xcRB32.request_data_length;
1087         xcRB64.request_data_address =
1088                 compat_ptr(xcRB32.request_data_address);
1089         xcRB64.reply_control_blk_length =
1090                 xcRB32.reply_control_blk_length;
1091         xcRB64.reply_control_blk_addr =
1092                 compat_ptr(xcRB32.reply_control_blk_addr);
1093         xcRB64.reply_data_length = xcRB32.reply_data_length;
1094         xcRB64.reply_data_addr =
1095                 compat_ptr(xcRB32.reply_data_addr);
1096         xcRB64.priority_window = xcRB32.priority_window;
1097         xcRB64.status = xcRB32.status;
1098         do {
1099                 rc = zcrypt_send_cprb(&xcRB64);
1100         } while (rc == -EAGAIN);
1101         /* on failure: retry once again after a requested rescan */
1102         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1103                 do {
1104                         rc = zcrypt_send_cprb(&xcRB64);
1105                 } while (rc == -EAGAIN);
1106         xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length;
1107         xcRB32.reply_data_length = xcRB64.reply_data_length;
1108         xcRB32.status = xcRB64.status;
1109         if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32)))
1110                         return -EFAULT;
1111         return rc;
1112 }
1113
1114 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1115                          unsigned long arg)
1116 {
1117         if (cmd == ICARSAMODEXPO)
1118                 return trans_modexpo32(filp, cmd, arg);
1119         if (cmd == ICARSACRT)
1120                 return trans_modexpo_crt32(filp, cmd, arg);
1121         if (cmd == ZSECSENDCPRB)
1122                 return trans_xcRB32(filp, cmd, arg);
1123         return zcrypt_unlocked_ioctl(filp, cmd, arg);
1124 }
1125 #endif
1126
1127 /*
1128  * Misc device file operations.
1129  */
1130 static const struct file_operations zcrypt_fops = {
1131         .owner          = THIS_MODULE,
1132         .read           = zcrypt_read,
1133         .write          = zcrypt_write,
1134         .unlocked_ioctl = zcrypt_unlocked_ioctl,
1135 #ifdef CONFIG_COMPAT
1136         .compat_ioctl   = zcrypt_compat_ioctl,
1137 #endif
1138         .open           = zcrypt_open,
1139         .release        = zcrypt_release,
1140         .llseek         = no_llseek,
1141 };
1142
1143 /*
1144  * Misc device.
1145  */
1146 static struct miscdevice zcrypt_misc_device = {
1147         .minor      = MISC_DYNAMIC_MINOR,
1148         .name       = "z90crypt",
1149         .fops       = &zcrypt_fops,
1150 };
1151
1152 /*
1153  * Deprecated /proc entry support.
1154  */
1155 static struct proc_dir_entry *zcrypt_entry;
1156
1157 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len)
1158 {
1159         int i;
1160
1161         for (i = 0; i < len; i++)
1162                 seq_printf(m, "%01x", (unsigned int) addr[i]);
1163         seq_putc(m, ' ');
1164 }
1165
1166 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len)
1167 {
1168         int inl, c, cx;
1169
1170         seq_printf(m, "    ");
1171         inl = 0;
1172         for (c = 0; c < (len / 16); c++) {
1173                 sprintcl(m, addr+inl, 16);
1174                 inl += 16;
1175         }
1176         cx = len%16;
1177         if (cx) {
1178                 sprintcl(m, addr+inl, cx);
1179                 inl += cx;
1180         }
1181         seq_putc(m, '\n');
1182 }
1183
1184 static void sprinthx(unsigned char *title, struct seq_file *m,
1185                      unsigned char *addr, unsigned int len)
1186 {
1187         int inl, r, rx;
1188
1189         seq_printf(m, "\n%s\n", title);
1190         inl = 0;
1191         for (r = 0; r < (len / 64); r++) {
1192                 sprintrw(m, addr+inl, 64);
1193                 inl += 64;
1194         }
1195         rx = len % 64;
1196         if (rx) {
1197                 sprintrw(m, addr+inl, rx);
1198                 inl += rx;
1199         }
1200         seq_putc(m, '\n');
1201 }
1202
1203 static void sprinthx4(unsigned char *title, struct seq_file *m,
1204                       unsigned int *array, unsigned int len)
1205 {
1206         seq_printf(m, "\n%s\n", title);
1207         seq_hex_dump(m, "    ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
1208         seq_putc(m, '\n');
1209 }
1210
1211 static int zcrypt_proc_show(struct seq_file *m, void *v)
1212 {
1213         char workarea[sizeof(int) * AP_DEVICES];
1214
1215         seq_printf(m, "\nzcrypt version: %d.%d.%d\n",
1216                    ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT);
1217         seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index);
1218         seq_printf(m, "Total device count: %d\n", zcrypt_device_count);
1219         seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA));
1220         seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC));
1221         seq_printf(m, "PCIXCC MCL2 count: %d\n",
1222                    zcrypt_count_type(ZCRYPT_PCIXCC_MCL2));
1223         seq_printf(m, "PCIXCC MCL3 count: %d\n",
1224                    zcrypt_count_type(ZCRYPT_PCIXCC_MCL3));
1225         seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C));
1226         seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A));
1227         seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C));
1228         seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A));
1229         seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count());
1230         seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count());
1231         seq_printf(m, "Total open handles: %d\n\n",
1232                    atomic_read(&zcrypt_open_count));
1233         zcrypt_status_mask(workarea);
1234         sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) "
1235                  "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A",
1236                  m, workarea, AP_DEVICES);
1237         zcrypt_qdepth_mask(workarea);
1238         sprinthx("Waiting work element counts", m, workarea, AP_DEVICES);
1239         zcrypt_perdev_reqcnt((int *) workarea);
1240         sprinthx4("Per-device successfully completed request counts",
1241                   m, (unsigned int *) workarea, AP_DEVICES);
1242         return 0;
1243 }
1244
1245 static int zcrypt_proc_open(struct inode *inode, struct file *file)
1246 {
1247         return single_open(file, zcrypt_proc_show, NULL);
1248 }
1249
1250 static void zcrypt_disable_card(int index)
1251 {
1252         struct zcrypt_card *zc;
1253         struct zcrypt_queue *zq;
1254
1255         spin_lock(&zcrypt_list_lock);
1256         for_each_zcrypt_card(zc) {
1257                 for_each_zcrypt_queue(zq, zc) {
1258                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1259                                 continue;
1260                         zq->online = 0;
1261                         ap_flush_queue(zq->queue);
1262                 }
1263         }
1264         spin_unlock(&zcrypt_list_lock);
1265 }
1266
1267 static void zcrypt_enable_card(int index)
1268 {
1269         struct zcrypt_card *zc;
1270         struct zcrypt_queue *zq;
1271
1272         spin_lock(&zcrypt_list_lock);
1273         for_each_zcrypt_card(zc) {
1274                 for_each_zcrypt_queue(zq, zc) {
1275                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1276                                 continue;
1277                         zq->online = 1;
1278                         ap_flush_queue(zq->queue);
1279                 }
1280         }
1281         spin_unlock(&zcrypt_list_lock);
1282 }
1283
1284 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer,
1285                                  size_t count, loff_t *pos)
1286 {
1287         unsigned char *lbuf, *ptr;
1288         size_t local_count;
1289         int j;
1290
1291         if (count <= 0)
1292                 return 0;
1293
1294 #define LBUFSIZE 1200UL
1295         lbuf = kmalloc(LBUFSIZE, GFP_KERNEL);
1296         if (!lbuf)
1297                 return 0;
1298
1299         local_count = min(LBUFSIZE - 1, count);
1300         if (copy_from_user(lbuf, buffer, local_count) != 0) {
1301                 kfree(lbuf);
1302                 return -EFAULT;
1303         }
1304         lbuf[local_count] = '\0';
1305
1306         ptr = strstr(lbuf, "Online devices");
1307         if (!ptr)
1308                 goto out;
1309         ptr = strstr(ptr, "\n");
1310         if (!ptr)
1311                 goto out;
1312         ptr++;
1313
1314         if (strstr(ptr, "Waiting work element counts") == NULL)
1315                 goto out;
1316
1317         for (j = 0; j < 64 && *ptr; ptr++) {
1318                 /*
1319                  * '0' for no device, '1' for PCICA, '2' for PCICC,
1320                  * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3,
1321                  * '5' for CEX2C and '6' for CEX2A'
1322                  * '7' for CEX3C and '8' for CEX3A
1323                  */
1324                 if (*ptr >= '0' && *ptr <= '8')
1325                         j++;
1326                 else if (*ptr == 'd' || *ptr == 'D')
1327                         zcrypt_disable_card(j++);
1328                 else if (*ptr == 'e' || *ptr == 'E')
1329                         zcrypt_enable_card(j++);
1330                 else if (*ptr != ' ' && *ptr != '\t')
1331                         break;
1332         }
1333 out:
1334         kfree(lbuf);
1335         return count;
1336 }
1337
1338 static const struct file_operations zcrypt_proc_fops = {
1339         .owner          = THIS_MODULE,
1340         .open           = zcrypt_proc_open,
1341         .read           = seq_read,
1342         .llseek         = seq_lseek,
1343         .release        = single_release,
1344         .write          = zcrypt_proc_write,
1345 };
1346
1347 static int zcrypt_rng_device_count;
1348 static u32 *zcrypt_rng_buffer;
1349 static int zcrypt_rng_buffer_index;
1350 static DEFINE_MUTEX(zcrypt_rng_mutex);
1351
1352 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1353 {
1354         int rc;
1355
1356         /*
1357          * We don't need locking here because the RNG API guarantees serialized
1358          * read method calls.
1359          */
1360         if (zcrypt_rng_buffer_index == 0) {
1361                 rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1362                 /* on failure: retry once again after a requested rescan */
1363                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1364                         rc = zcrypt_rng((char *) zcrypt_rng_buffer);
1365                 if (rc < 0)
1366                         return -EIO;
1367                 zcrypt_rng_buffer_index = rc / sizeof *data;
1368         }
1369         *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1370         return sizeof *data;
1371 }
1372
1373 static struct hwrng zcrypt_rng_dev = {
1374         .name           = "zcrypt",
1375         .data_read      = zcrypt_rng_data_read,
1376         .quality        = 990,
1377 };
1378
1379 int zcrypt_rng_device_add(void)
1380 {
1381         int rc = 0;
1382
1383         mutex_lock(&zcrypt_rng_mutex);
1384         if (zcrypt_rng_device_count == 0) {
1385                 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL);
1386                 if (!zcrypt_rng_buffer) {
1387                         rc = -ENOMEM;
1388                         goto out;
1389                 }
1390                 zcrypt_rng_buffer_index = 0;
1391                 if (!zcrypt_hwrng_seed)
1392                         zcrypt_rng_dev.quality = 0;
1393                 rc = hwrng_register(&zcrypt_rng_dev);
1394                 if (rc)
1395                         goto out_free;
1396                 zcrypt_rng_device_count = 1;
1397         } else
1398                 zcrypt_rng_device_count++;
1399         mutex_unlock(&zcrypt_rng_mutex);
1400         return 0;
1401
1402 out_free:
1403         free_page((unsigned long) zcrypt_rng_buffer);
1404 out:
1405         mutex_unlock(&zcrypt_rng_mutex);
1406         return rc;
1407 }
1408
1409 void zcrypt_rng_device_remove(void)
1410 {
1411         mutex_lock(&zcrypt_rng_mutex);
1412         zcrypt_rng_device_count--;
1413         if (zcrypt_rng_device_count == 0) {
1414                 hwrng_unregister(&zcrypt_rng_dev);
1415                 free_page((unsigned long) zcrypt_rng_buffer);
1416         }
1417         mutex_unlock(&zcrypt_rng_mutex);
1418 }
1419
1420 int __init zcrypt_debug_init(void)
1421 {
1422         zcrypt_dbf_root = debugfs_create_dir("zcrypt", NULL);
1423         zcrypt_dbf_info = debug_register("zcrypt", 1, 1,
1424                                          DBF_MAX_SPRINTF_ARGS * sizeof(long));
1425         debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
1426         debug_set_level(zcrypt_dbf_info, DBF_ERR);
1427
1428         return 0;
1429 }
1430
1431 void zcrypt_debug_exit(void)
1432 {
1433         debugfs_remove(zcrypt_dbf_root);
1434         debug_unregister(zcrypt_dbf_info);
1435 }
1436
1437 /**
1438  * zcrypt_api_init(): Module initialization.
1439  *
1440  * The module initialization code.
1441  */
1442 int __init zcrypt_api_init(void)
1443 {
1444         int rc;
1445
1446         rc = zcrypt_debug_init();
1447         if (rc)
1448                 goto out;
1449
1450         atomic_set(&zcrypt_rescan_req, 0);
1451
1452         /* Register the request sprayer. */
1453         rc = misc_register(&zcrypt_misc_device);
1454         if (rc < 0)
1455                 goto out;
1456
1457         /* Set up the proc file system */
1458         zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL,
1459                                    &zcrypt_proc_fops);
1460         if (!zcrypt_entry) {
1461                 rc = -ENOMEM;
1462                 goto out_misc;
1463         }
1464
1465         zcrypt_msgtype6_init();
1466         zcrypt_msgtype50_init();
1467         return 0;
1468
1469 out_misc:
1470         misc_deregister(&zcrypt_misc_device);
1471 out:
1472         return rc;
1473 }
1474
1475 /**
1476  * zcrypt_api_exit(): Module termination.
1477  *
1478  * The module termination code.
1479  */
1480 void __exit zcrypt_api_exit(void)
1481 {
1482         remove_proc_entry("driver/z90crypt", NULL);
1483         misc_deregister(&zcrypt_misc_device);
1484         zcrypt_msgtype6_exit();
1485         zcrypt_msgtype50_exit();
1486         zcrypt_debug_exit();
1487 }
1488
1489 module_init(zcrypt_api_init);
1490 module_exit(zcrypt_api_exit);