crypto: nx - add NX-842 platform frontend driver
[linux-2.6-block.git] / drivers / crypto / nx / nx-842-pseries.c
1 /*
2  * Driver for IBM Power 842 compression accelerator
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) IBM Corporation, 2012
19  *
20  * Authors: Robert Jennings <rcj@linux.vnet.ibm.com>
21  *          Seth Jennings <sjenning@linux.vnet.ibm.com>
22  */
23
24 #include <asm/page.h>
25 #include <asm/vio.h>
26
27 #include "nx-842.h"
28 #include "nx_csbcpb.h" /* struct nx_csbcpb */
29
30 #define MODULE_NAME NX842_PSERIES_MODULE_NAME
31 MODULE_LICENSE("GPL");
32 MODULE_AUTHOR("Robert Jennings <rcj@linux.vnet.ibm.com>");
33 MODULE_DESCRIPTION("842 H/W Compression driver for IBM Power processors");
34
35 #define SHIFT_4K 12
36 #define SHIFT_64K 16
37 #define SIZE_4K (1UL << SHIFT_4K)
38 #define SIZE_64K (1UL << SHIFT_64K)
39
40 /* IO buffer must be 128 byte aligned */
41 #define IO_BUFFER_ALIGN 128
42
43 struct nx842_header {
44         int blocks_nr; /* number of compressed blocks */
45         int offset; /* offset of the first block (from beginning of header) */
46         int sizes[0]; /* size of compressed blocks */
47 };
48
49 static inline int nx842_header_size(const struct nx842_header *hdr)
50 {
51         return sizeof(struct nx842_header) +
52                         hdr->blocks_nr * sizeof(hdr->sizes[0]);
53 }
54
55 /* Macros for fields within nx_csbcpb */
56 /* Check the valid bit within the csbcpb valid field */
57 #define NX842_CSBCBP_VALID_CHK(x) (x & BIT_MASK(7))
58
59 /* CE macros operate on the completion_extension field bits in the csbcpb.
60  * CE0 0=full completion, 1=partial completion
61  * CE1 0=CE0 indicates completion, 1=termination (output may be modified)
62  * CE2 0=processed_bytes is source bytes, 1=processed_bytes is target bytes */
63 #define NX842_CSBCPB_CE0(x)     (x & BIT_MASK(7))
64 #define NX842_CSBCPB_CE1(x)     (x & BIT_MASK(6))
65 #define NX842_CSBCPB_CE2(x)     (x & BIT_MASK(5))
66
67 /* The NX unit accepts data only on 4K page boundaries */
68 #define NX842_HW_PAGE_SHIFT     SHIFT_4K
69 #define NX842_HW_PAGE_SIZE      (ASM_CONST(1) << NX842_HW_PAGE_SHIFT)
70 #define NX842_HW_PAGE_MASK      (~(NX842_HW_PAGE_SIZE-1))
71
72 enum nx842_status {
73         UNAVAILABLE,
74         AVAILABLE
75 };
76
77 struct ibm_nx842_counters {
78         atomic64_t comp_complete;
79         atomic64_t comp_failed;
80         atomic64_t decomp_complete;
81         atomic64_t decomp_failed;
82         atomic64_t swdecomp;
83         atomic64_t comp_times[32];
84         atomic64_t decomp_times[32];
85 };
86
87 static struct nx842_devdata {
88         struct vio_dev *vdev;
89         struct device *dev;
90         struct ibm_nx842_counters *counters;
91         unsigned int max_sg_len;
92         unsigned int max_sync_size;
93         unsigned int max_sync_sg;
94         enum nx842_status status;
95 } __rcu *devdata;
96 static DEFINE_SPINLOCK(devdata_mutex);
97
98 #define NX842_COUNTER_INC(_x) \
99 static inline void nx842_inc_##_x( \
100         const struct nx842_devdata *dev) { \
101         if (dev) \
102                 atomic64_inc(&dev->counters->_x); \
103 }
104 NX842_COUNTER_INC(comp_complete);
105 NX842_COUNTER_INC(comp_failed);
106 NX842_COUNTER_INC(decomp_complete);
107 NX842_COUNTER_INC(decomp_failed);
108 NX842_COUNTER_INC(swdecomp);
109
110 #define NX842_HIST_SLOTS 16
111
112 static void ibm_nx842_incr_hist(atomic64_t *times, unsigned int time)
113 {
114         int bucket = fls(time);
115
116         if (bucket)
117                 bucket = min((NX842_HIST_SLOTS - 1), bucket - 1);
118
119         atomic64_inc(&times[bucket]);
120 }
121
122 /* NX unit operation flags */
123 #define NX842_OP_COMPRESS       0x0
124 #define NX842_OP_CRC            0x1
125 #define NX842_OP_DECOMPRESS     0x2
126 #define NX842_OP_COMPRESS_CRC   (NX842_OP_COMPRESS | NX842_OP_CRC)
127 #define NX842_OP_DECOMPRESS_CRC (NX842_OP_DECOMPRESS | NX842_OP_CRC)
128 #define NX842_OP_ASYNC          (1<<23)
129 #define NX842_OP_NOTIFY         (1<<22)
130 #define NX842_OP_NOTIFY_INT(x)  ((x & 0xff)<<8)
131
132 static unsigned long nx842_get_desired_dma(struct vio_dev *viodev)
133 {
134         /* No use of DMA mappings within the driver. */
135         return 0;
136 }
137
138 struct nx842_slentry {
139         unsigned long ptr; /* Real address (use __pa()) */
140         unsigned long len;
141 };
142
143 /* pHyp scatterlist entry */
144 struct nx842_scatterlist {
145         int entry_nr; /* number of slentries */
146         struct nx842_slentry *entries; /* ptr to array of slentries */
147 };
148
149 /* Does not include sizeof(entry_nr) in the size */
150 static inline unsigned long nx842_get_scatterlist_size(
151                                 struct nx842_scatterlist *sl)
152 {
153         return sl->entry_nr * sizeof(struct nx842_slentry);
154 }
155
156 static inline unsigned long nx842_get_pa(void *addr)
157 {
158         if (is_vmalloc_addr(addr))
159                 return page_to_phys(vmalloc_to_page(addr))
160                        + offset_in_page(addr);
161         else
162                 return __pa(addr);
163 }
164
165 static int nx842_build_scatterlist(unsigned long buf, int len,
166                         struct nx842_scatterlist *sl)
167 {
168         unsigned long nextpage;
169         struct nx842_slentry *entry;
170
171         sl->entry_nr = 0;
172
173         entry = sl->entries;
174         while (len) {
175                 entry->ptr = nx842_get_pa((void *)buf);
176                 nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
177                 if (nextpage < buf + len) {
178                         /* we aren't at the end yet */
179                         if (IS_ALIGNED(buf, NX842_HW_PAGE_SIZE))
180                                 /* we are in the middle (or beginning) */
181                                 entry->len = NX842_HW_PAGE_SIZE;
182                         else
183                                 /* we are at the beginning */
184                                 entry->len = nextpage - buf;
185                 } else {
186                         /* at the end */
187                         entry->len = len;
188                 }
189
190                 len -= entry->len;
191                 buf += entry->len;
192                 sl->entry_nr++;
193                 entry++;
194         }
195
196         return 0;
197 }
198
199 /*
200  * Working memory for software decompression
201  */
202 struct sw842_fifo {
203         union {
204                 char f8[256][8];
205                 char f4[512][4];
206         };
207         char f2[256][2];
208         unsigned char f84_full;
209         unsigned char f2_full;
210         unsigned char f8_count;
211         unsigned char f2_count;
212         unsigned int f4_count;
213 };
214
215 /*
216  * Working memory for crypto API
217  */
218 struct nx842_workmem {
219         char bounce[PAGE_SIZE]; /* bounce buffer for decompression input */
220         union {
221                 /* hardware working memory */
222                 struct {
223                         /* scatterlist */
224                         char slin[SIZE_4K];
225                         char slout[SIZE_4K];
226                         /* coprocessor status/parameter block */
227                         struct nx_csbcpb csbcpb;
228                 };
229                 /* software working memory */
230                 struct sw842_fifo swfifo; /* software decompression fifo */
231         };
232 };
233
234 static int nx842_validate_result(struct device *dev,
235         struct cop_status_block *csb)
236 {
237         /* The csb must be valid after returning from vio_h_cop_sync */
238         if (!NX842_CSBCBP_VALID_CHK(csb->valid)) {
239                 dev_err(dev, "%s: cspcbp not valid upon completion.\n",
240                                 __func__);
241                 dev_dbg(dev, "valid:0x%02x cs:0x%02x cc:0x%02x ce:0x%02x\n",
242                                 csb->valid,
243                                 csb->crb_seq_number,
244                                 csb->completion_code,
245                                 csb->completion_extension);
246                 dev_dbg(dev, "processed_bytes:%d address:0x%016lx\n",
247                                 csb->processed_byte_count,
248                                 (unsigned long)csb->address);
249                 return -EIO;
250         }
251
252         /* Check return values from the hardware in the CSB */
253         switch (csb->completion_code) {
254         case 0: /* Completed without error */
255                 break;
256         case 64: /* Target bytes > Source bytes during compression */
257         case 13: /* Output buffer too small */
258                 dev_dbg(dev, "%s: Compression output larger than input\n",
259                                         __func__);
260                 return -ENOSPC;
261         case 66: /* Input data contains an illegal template field */
262         case 67: /* Template indicates data past the end of the input stream */
263                 dev_dbg(dev, "%s: Bad data for decompression (code:%d)\n",
264                                         __func__, csb->completion_code);
265                 return -EINVAL;
266         default:
267                 dev_dbg(dev, "%s: Unspecified error (code:%d)\n",
268                                         __func__, csb->completion_code);
269                 return -EIO;
270         }
271
272         /* Hardware sanity check */
273         if (!NX842_CSBCPB_CE2(csb->completion_extension)) {
274                 dev_err(dev, "%s: No error returned by hardware, but "
275                                 "data returned is unusable, contact support.\n"
276                                 "(Additional info: csbcbp->processed bytes "
277                                 "does not specify processed bytes for the "
278                                 "target buffer.)\n", __func__);
279                 return -EIO;
280         }
281
282         return 0;
283 }
284
285 /**
286  * nx842_pseries_compress - Compress data using the 842 algorithm
287  *
288  * Compression provide by the NX842 coprocessor on IBM Power systems.
289  * The input buffer is compressed and the result is stored in the
290  * provided output buffer.
291  *
292  * Upon return from this function @outlen contains the length of the
293  * compressed data.  If there is an error then @outlen will be 0 and an
294  * error will be specified by the return code from this function.
295  *
296  * @in: Pointer to input buffer, must be page aligned
297  * @inlen: Length of input buffer, must be PAGE_SIZE
298  * @out: Pointer to output buffer
299  * @outlen: Length of output buffer
300  * @wrkmem: ptr to buffer for working memory, size determined by
301  *          NX842_MEM_COMPRESS
302  *
303  * Returns:
304  *   0          Success, output of length @outlen stored in the buffer at @out
305  *   -ENOMEM    Unable to allocate internal buffers
306  *   -ENOSPC    Output buffer is to small
307  *   -EMSGSIZE  XXX Difficult to describe this limitation
308  *   -EIO       Internal error
309  *   -ENODEV    Hardware unavailable
310  */
311 static int nx842_pseries_compress(const unsigned char *in, unsigned int inlen,
312                                   unsigned char *out, unsigned int *outlen,
313                                   void *wmem)
314 {
315         struct nx842_header *hdr;
316         struct nx842_devdata *local_devdata;
317         struct device *dev = NULL;
318         struct nx842_workmem *workmem;
319         struct nx842_scatterlist slin, slout;
320         struct nx_csbcpb *csbcpb;
321         int ret = 0, max_sync_size, i, bytesleft, size, hdrsize;
322         unsigned long inbuf, outbuf, padding;
323         struct vio_pfo_op op = {
324                 .done = NULL,
325                 .handle = 0,
326                 .timeout = 0,
327         };
328         unsigned long start_time = get_tb();
329
330         /*
331          * Make sure input buffer is 64k page aligned.  This is assumed since
332          * this driver is designed for page compression only (for now).  This
333          * is very nice since we can now use direct DDE(s) for the input and
334          * the alignment is guaranteed.
335         */
336         inbuf = (unsigned long)in;
337         if (!IS_ALIGNED(inbuf, PAGE_SIZE) || inlen != PAGE_SIZE)
338                 return -EINVAL;
339
340         rcu_read_lock();
341         local_devdata = rcu_dereference(devdata);
342         if (!local_devdata || !local_devdata->dev) {
343                 rcu_read_unlock();
344                 return -ENODEV;
345         }
346         max_sync_size = local_devdata->max_sync_size;
347         dev = local_devdata->dev;
348
349         /* Create the header */
350         hdr = (struct nx842_header *)out;
351         hdr->blocks_nr = PAGE_SIZE / max_sync_size;
352         hdrsize = nx842_header_size(hdr);
353         outbuf = (unsigned long)out + hdrsize;
354         bytesleft = *outlen - hdrsize;
355
356         /* Init scatterlist */
357         workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
358                 NX842_HW_PAGE_SIZE);
359         slin.entries = (struct nx842_slentry *)workmem->slin;
360         slout.entries = (struct nx842_slentry *)workmem->slout;
361
362         /* Init operation */
363         op.flags = NX842_OP_COMPRESS;
364         csbcpb = &workmem->csbcpb;
365         memset(csbcpb, 0, sizeof(*csbcpb));
366         op.csbcpb = nx842_get_pa(csbcpb);
367         op.out = nx842_get_pa(slout.entries);
368
369         for (i = 0; i < hdr->blocks_nr; i++) {
370                 /*
371                  * Aligning the output blocks to 128 bytes does waste space,
372                  * but it prevents the need for bounce buffers and memory
373                  * copies.  It also simplifies the code a lot.  In the worst
374                  * case (64k page, 4k max_sync_size), you lose up to
375                  * (128*16)/64k = ~3% the compression factor. For 64k
376                  * max_sync_size, the loss would be at most 128/64k = ~0.2%.
377                  */
378                 padding = ALIGN(outbuf, IO_BUFFER_ALIGN) - outbuf;
379                 outbuf += padding;
380                 bytesleft -= padding;
381                 if (i == 0)
382                         /* save offset into first block in header */
383                         hdr->offset = padding + hdrsize;
384
385                 if (bytesleft <= 0) {
386                         ret = -ENOSPC;
387                         goto unlock;
388                 }
389
390                 /*
391                  * NOTE: If the default max_sync_size is changed from 4k
392                  * to 64k, remove the "likely" case below, since a
393                  * scatterlist will always be needed.
394                  */
395                 if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
396                         /* Create direct DDE */
397                         op.in = nx842_get_pa((void *)inbuf);
398                         op.inlen = max_sync_size;
399
400                 } else {
401                         /* Create indirect DDE (scatterlist) */
402                         nx842_build_scatterlist(inbuf, max_sync_size, &slin);
403                         op.in = nx842_get_pa(slin.entries);
404                         op.inlen = -nx842_get_scatterlist_size(&slin);
405                 }
406
407                 /*
408                  * If max_sync_size != NX842_HW_PAGE_SIZE, an indirect
409                  * DDE is required for the outbuf.
410                  * If max_sync_size == NX842_HW_PAGE_SIZE, outbuf must
411                  * also be page aligned (1 in 128/4k=32 chance) in order
412                  * to use a direct DDE.
413                  * This is unlikely, just use an indirect DDE always.
414                  */
415                 nx842_build_scatterlist(outbuf,
416                         min(bytesleft, max_sync_size), &slout);
417                 /* op.out set before loop */
418                 op.outlen = -nx842_get_scatterlist_size(&slout);
419
420                 /* Send request to pHyp */
421                 ret = vio_h_cop_sync(local_devdata->vdev, &op);
422
423                 /* Check for pHyp error */
424                 if (ret) {
425                         dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
426                                 __func__, ret, op.hcall_err);
427                         ret = -EIO;
428                         goto unlock;
429                 }
430
431                 /* Check for hardware error */
432                 ret = nx842_validate_result(dev, &csbcpb->csb);
433                 if (ret && ret != -ENOSPC)
434                         goto unlock;
435
436                 /* Handle incompressible data */
437                 if (unlikely(ret == -ENOSPC)) {
438                         if (bytesleft < max_sync_size) {
439                                 /*
440                                  * Not enough space left in the output buffer
441                                  * to store uncompressed block
442                                  */
443                                 goto unlock;
444                         } else {
445                                 /* Store incompressible block */
446                                 memcpy((void *)outbuf, (void *)inbuf,
447                                         max_sync_size);
448                                 hdr->sizes[i] = -max_sync_size;
449                                 outbuf += max_sync_size;
450                                 bytesleft -= max_sync_size;
451                                 /* Reset ret, incompressible data handled */
452                                 ret = 0;
453                         }
454                 } else {
455                         /* Normal case, compression was successful */
456                         size = csbcpb->csb.processed_byte_count;
457                         dev_dbg(dev, "%s: processed_bytes=%d\n",
458                                 __func__, size);
459                         hdr->sizes[i] = size;
460                         outbuf += size;
461                         bytesleft -= size;
462                 }
463
464                 inbuf += max_sync_size;
465         }
466
467         *outlen = (unsigned int)(outbuf - (unsigned long)out);
468
469 unlock:
470         if (ret)
471                 nx842_inc_comp_failed(local_devdata);
472         else {
473                 nx842_inc_comp_complete(local_devdata);
474                 ibm_nx842_incr_hist(local_devdata->counters->comp_times,
475                         (get_tb() - start_time) / tb_ticks_per_usec);
476         }
477         rcu_read_unlock();
478         return ret;
479 }
480
481 static int sw842_decompress(const unsigned char *, int, unsigned char *, int *,
482                         const void *);
483
484 /**
485  * nx842_pseries_decompress - Decompress data using the 842 algorithm
486  *
487  * Decompression provide by the NX842 coprocessor on IBM Power systems.
488  * The input buffer is decompressed and the result is stored in the
489  * provided output buffer.  The size allocated to the output buffer is
490  * provided by the caller of this function in @outlen.  Upon return from
491  * this function @outlen contains the length of the decompressed data.
492  * If there is an error then @outlen will be 0 and an error will be
493  * specified by the return code from this function.
494  *
495  * @in: Pointer to input buffer, will use bounce buffer if not 128 byte
496  *      aligned
497  * @inlen: Length of input buffer
498  * @out: Pointer to output buffer, must be page aligned
499  * @outlen: Length of output buffer, must be PAGE_SIZE
500  * @wrkmem: ptr to buffer for working memory, size determined by
501  *          NX842_MEM_COMPRESS
502  *
503  * Returns:
504  *   0          Success, output of length @outlen stored in the buffer at @out
505  *   -ENODEV    Hardware decompression device is unavailable
506  *   -ENOMEM    Unable to allocate internal buffers
507  *   -ENOSPC    Output buffer is to small
508  *   -EINVAL    Bad input data encountered when attempting decompress
509  *   -EIO       Internal error
510  */
511 static int nx842_pseries_decompress(const unsigned char *in, unsigned int inlen,
512                                     unsigned char *out, unsigned int *outlen,
513                                     void *wmem)
514 {
515         struct nx842_header *hdr;
516         struct nx842_devdata *local_devdata;
517         struct device *dev = NULL;
518         struct nx842_workmem *workmem;
519         struct nx842_scatterlist slin, slout;
520         struct nx_csbcpb *csbcpb;
521         int ret = 0, i, size, max_sync_size;
522         unsigned long inbuf, outbuf;
523         struct vio_pfo_op op = {
524                 .done = NULL,
525                 .handle = 0,
526                 .timeout = 0,
527         };
528         unsigned long start_time = get_tb();
529
530         /* Ensure page alignment and size */
531         outbuf = (unsigned long)out;
532         if (!IS_ALIGNED(outbuf, PAGE_SIZE) || *outlen != PAGE_SIZE)
533                 return -EINVAL;
534
535         rcu_read_lock();
536         local_devdata = rcu_dereference(devdata);
537         if (local_devdata)
538                 dev = local_devdata->dev;
539
540         /* Get header */
541         hdr = (struct nx842_header *)in;
542
543         workmem = (struct nx842_workmem *)ALIGN((unsigned long)wmem,
544                 NX842_HW_PAGE_SIZE);
545
546         inbuf = (unsigned long)in + hdr->offset;
547         if (likely(!IS_ALIGNED(inbuf, IO_BUFFER_ALIGN))) {
548                 /* Copy block(s) into bounce buffer for alignment */
549                 memcpy(workmem->bounce, in + hdr->offset, inlen - hdr->offset);
550                 inbuf = (unsigned long)workmem->bounce;
551         }
552
553         /* Init scatterlist */
554         slin.entries = (struct nx842_slentry *)workmem->slin;
555         slout.entries = (struct nx842_slentry *)workmem->slout;
556
557         /* Init operation */
558         op.flags = NX842_OP_DECOMPRESS;
559         csbcpb = &workmem->csbcpb;
560         memset(csbcpb, 0, sizeof(*csbcpb));
561         op.csbcpb = nx842_get_pa(csbcpb);
562
563         /*
564          * max_sync_size may have changed since compression,
565          * so we can't read it from the device info. We need
566          * to derive it from hdr->blocks_nr.
567          */
568         max_sync_size = PAGE_SIZE / hdr->blocks_nr;
569
570         for (i = 0; i < hdr->blocks_nr; i++) {
571                 /* Skip padding */
572                 inbuf = ALIGN(inbuf, IO_BUFFER_ALIGN);
573
574                 if (hdr->sizes[i] < 0) {
575                         /* Negative sizes indicate uncompressed data blocks */
576                         size = abs(hdr->sizes[i]);
577                         memcpy((void *)outbuf, (void *)inbuf, size);
578                         outbuf += size;
579                         inbuf += size;
580                         continue;
581                 }
582
583                 if (!dev)
584                         goto sw;
585
586                 /*
587                  * The better the compression, the more likely the "likely"
588                  * case becomes.
589                  */
590                 if (likely((inbuf & NX842_HW_PAGE_MASK) ==
591                         ((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
592                         /* Create direct DDE */
593                         op.in = nx842_get_pa((void *)inbuf);
594                         op.inlen = hdr->sizes[i];
595                 } else {
596                         /* Create indirect DDE (scatterlist) */
597                         nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
598                         op.in = nx842_get_pa(slin.entries);
599                         op.inlen = -nx842_get_scatterlist_size(&slin);
600                 }
601
602                 /*
603                  * NOTE: If the default max_sync_size is changed from 4k
604                  * to 64k, remove the "likely" case below, since a
605                  * scatterlist will always be needed.
606                  */
607                 if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
608                         /* Create direct DDE */
609                         op.out = nx842_get_pa((void *)outbuf);
610                         op.outlen = max_sync_size;
611                 } else {
612                         /* Create indirect DDE (scatterlist) */
613                         nx842_build_scatterlist(outbuf, max_sync_size, &slout);
614                         op.out = nx842_get_pa(slout.entries);
615                         op.outlen = -nx842_get_scatterlist_size(&slout);
616                 }
617
618                 /* Send request to pHyp */
619                 ret = vio_h_cop_sync(local_devdata->vdev, &op);
620
621                 /* Check for pHyp error */
622                 if (ret) {
623                         dev_dbg(dev, "%s: vio_h_cop_sync error (ret=%d, hret=%ld)\n",
624                                 __func__, ret, op.hcall_err);
625                         dev = NULL;
626                         goto sw;
627                 }
628
629                 /* Check for hardware error */
630                 ret = nx842_validate_result(dev, &csbcpb->csb);
631                 if (ret) {
632                         dev = NULL;
633                         goto sw;
634                 }
635
636                 /* HW decompression success */
637                 inbuf += hdr->sizes[i];
638                 outbuf += csbcpb->csb.processed_byte_count;
639                 continue;
640
641 sw:
642                 /* software decompression */
643                 size = max_sync_size;
644                 ret = sw842_decompress(
645                         (unsigned char *)inbuf, hdr->sizes[i],
646                         (unsigned char *)outbuf, &size, wmem);
647                 if (ret)
648                         pr_debug("%s: sw842_decompress failed with %d\n",
649                                 __func__, ret);
650
651                 if (ret) {
652                         if (ret != -ENOSPC && ret != -EINVAL &&
653                                         ret != -EMSGSIZE)
654                                 ret = -EIO;
655                         goto unlock;
656                 }
657
658                 /* SW decompression success */
659                 inbuf += hdr->sizes[i];
660                 outbuf += size;
661         }
662
663         *outlen = (unsigned int)(outbuf - (unsigned long)out);
664
665 unlock:
666         if (ret)
667                 /* decompress fail */
668                 nx842_inc_decomp_failed(local_devdata);
669         else {
670                 if (!dev)
671                         /* software decompress */
672                         nx842_inc_swdecomp(local_devdata);
673                 nx842_inc_decomp_complete(local_devdata);
674                 ibm_nx842_incr_hist(local_devdata->counters->decomp_times,
675                         (get_tb() - start_time) / tb_ticks_per_usec);
676         }
677
678         rcu_read_unlock();
679         return ret;
680 }
681
682 /**
683  * nx842_OF_set_defaults -- Set default (disabled) values for devdata
684  *
685  * @devdata - struct nx842_devdata to update
686  *
687  * Returns:
688  *  0 on success
689  *  -ENOENT if @devdata ptr is NULL
690  */
691 static int nx842_OF_set_defaults(struct nx842_devdata *devdata)
692 {
693         if (devdata) {
694                 devdata->max_sync_size = 0;
695                 devdata->max_sync_sg = 0;
696                 devdata->max_sg_len = 0;
697                 devdata->status = UNAVAILABLE;
698                 return 0;
699         } else
700                 return -ENOENT;
701 }
702
703 /**
704  * nx842_OF_upd_status -- Update the device info from OF status prop
705  *
706  * The status property indicates if the accelerator is enabled.  If the
707  * device is in the OF tree it indicates that the hardware is present.
708  * The status field indicates if the device is enabled when the status
709  * is 'okay'.  Otherwise the device driver will be disabled.
710  *
711  * @devdata - struct nx842_devdata to update
712  * @prop - struct property point containing the maxsyncop for the update
713  *
714  * Returns:
715  *  0 - Device is available
716  *  -EINVAL - Device is not available
717  */
718 static int nx842_OF_upd_status(struct nx842_devdata *devdata,
719                                         struct property *prop) {
720         int ret = 0;
721         const char *status = (const char *)prop->value;
722
723         if (!strncmp(status, "okay", (size_t)prop->length)) {
724                 devdata->status = AVAILABLE;
725         } else {
726                 dev_info(devdata->dev, "%s: status '%s' is not 'okay'\n",
727                                 __func__, status);
728                 devdata->status = UNAVAILABLE;
729         }
730
731         return ret;
732 }
733
734 /**
735  * nx842_OF_upd_maxsglen -- Update the device info from OF maxsglen prop
736  *
737  * Definition of the 'ibm,max-sg-len' OF property:
738  *  This field indicates the maximum byte length of a scatter list
739  *  for the platform facility. It is a single cell encoded as with encode-int.
740  *
741  * Example:
742  *  # od -x ibm,max-sg-len
743  *  0000000 0000 0ff0
744  *
745  *  In this example, the maximum byte length of a scatter list is
746  *  0x0ff0 (4,080).
747  *
748  * @devdata - struct nx842_devdata to update
749  * @prop - struct property point containing the maxsyncop for the update
750  *
751  * Returns:
752  *  0 on success
753  *  -EINVAL on failure
754  */
755 static int nx842_OF_upd_maxsglen(struct nx842_devdata *devdata,
756                                         struct property *prop) {
757         int ret = 0;
758         const int *maxsglen = prop->value;
759
760         if (prop->length != sizeof(*maxsglen)) {
761                 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sg-len property\n", __func__);
762                 dev_dbg(devdata->dev, "%s: ibm,max-sg-len is %d bytes long, expected %lu bytes\n", __func__,
763                                 prop->length, sizeof(*maxsglen));
764                 ret = -EINVAL;
765         } else {
766                 devdata->max_sg_len = (unsigned int)min(*maxsglen,
767                                 (int)NX842_HW_PAGE_SIZE);
768         }
769
770         return ret;
771 }
772
773 /**
774  * nx842_OF_upd_maxsyncop -- Update the device info from OF maxsyncop prop
775  *
776  * Definition of the 'ibm,max-sync-cop' OF property:
777  *  Two series of cells.  The first series of cells represents the maximums
778  *  that can be synchronously compressed. The second series of cells
779  *  represents the maximums that can be synchronously decompressed.
780  *  1. The first cell in each series contains the count of the number of
781  *     data length, scatter list elements pairs that follow â€“ each being
782  *     of the form
783  *    a. One cell data byte length
784  *    b. One cell total number of scatter list elements
785  *
786  * Example:
787  *  # od -x ibm,max-sync-cop
788  *  0000000 0000 0001 0000 1000 0000 01fe 0000 0001
789  *  0000020 0000 1000 0000 01fe
790  *
791  *  In this example, compression supports 0x1000 (4,096) data byte length
792  *  and 0x1fe (510) total scatter list elements.  Decompression supports
793  *  0x1000 (4,096) data byte length and 0x1f3 (510) total scatter list
794  *  elements.
795  *
796  * @devdata - struct nx842_devdata to update
797  * @prop - struct property point containing the maxsyncop for the update
798  *
799  * Returns:
800  *  0 on success
801  *  -EINVAL on failure
802  */
803 static int nx842_OF_upd_maxsyncop(struct nx842_devdata *devdata,
804                                         struct property *prop) {
805         int ret = 0;
806         const struct maxsynccop_t {
807                 int comp_elements;
808                 int comp_data_limit;
809                 int comp_sg_limit;
810                 int decomp_elements;
811                 int decomp_data_limit;
812                 int decomp_sg_limit;
813         } *maxsynccop;
814
815         if (prop->length != sizeof(*maxsynccop)) {
816                 dev_err(devdata->dev, "%s: unexpected format for ibm,max-sync-cop property\n", __func__);
817                 dev_dbg(devdata->dev, "%s: ibm,max-sync-cop is %d bytes long, expected %lu bytes\n", __func__, prop->length,
818                                 sizeof(*maxsynccop));
819                 ret = -EINVAL;
820                 goto out;
821         }
822
823         maxsynccop = (const struct maxsynccop_t *)prop->value;
824
825         /* Use one limit rather than separate limits for compression and
826          * decompression. Set a maximum for this so as not to exceed the
827          * size that the header can support and round the value down to
828          * the hardware page size (4K) */
829         devdata->max_sync_size =
830                         (unsigned int)min(maxsynccop->comp_data_limit,
831                                         maxsynccop->decomp_data_limit);
832
833         devdata->max_sync_size = min_t(unsigned int, devdata->max_sync_size,
834                                         SIZE_64K);
835
836         if (devdata->max_sync_size < SIZE_4K) {
837                 dev_err(devdata->dev, "%s: hardware max data size (%u) is "
838                                 "less than the driver minimum, unable to use "
839                                 "the hardware device\n",
840                                 __func__, devdata->max_sync_size);
841                 ret = -EINVAL;
842                 goto out;
843         }
844
845         devdata->max_sync_sg = (unsigned int)min(maxsynccop->comp_sg_limit,
846                                                 maxsynccop->decomp_sg_limit);
847         if (devdata->max_sync_sg < 1) {
848                 dev_err(devdata->dev, "%s: hardware max sg size (%u) is "
849                                 "less than the driver minimum, unable to use "
850                                 "the hardware device\n",
851                                 __func__, devdata->max_sync_sg);
852                 ret = -EINVAL;
853                 goto out;
854         }
855
856 out:
857         return ret;
858 }
859
860 /**
861  *
862  * nx842_OF_upd -- Handle OF properties updates for the device.
863  *
864  * Set all properties from the OF tree.  Optionally, a new property
865  * can be provided by the @new_prop pointer to overwrite an existing value.
866  * The device will remain disabled until all values are valid, this function
867  * will return an error for updates unless all values are valid.
868  *
869  * @new_prop: If not NULL, this property is being updated.  If NULL, update
870  *  all properties from the current values in the OF tree.
871  *
872  * Returns:
873  *  0 - Success
874  *  -ENOMEM - Could not allocate memory for new devdata structure
875  *  -EINVAL - property value not found, new_prop is not a recognized
876  *      property for the device or property value is not valid.
877  *  -ENODEV - Device is not available
878  */
879 static int nx842_OF_upd(struct property *new_prop)
880 {
881         struct nx842_devdata *old_devdata = NULL;
882         struct nx842_devdata *new_devdata = NULL;
883         struct device_node *of_node = NULL;
884         struct property *status = NULL;
885         struct property *maxsglen = NULL;
886         struct property *maxsyncop = NULL;
887         int ret = 0;
888         unsigned long flags;
889
890         spin_lock_irqsave(&devdata_mutex, flags);
891         old_devdata = rcu_dereference_check(devdata,
892                         lockdep_is_held(&devdata_mutex));
893         if (old_devdata)
894                 of_node = old_devdata->dev->of_node;
895
896         if (!old_devdata || !of_node) {
897                 pr_err("%s: device is not available\n", __func__);
898                 spin_unlock_irqrestore(&devdata_mutex, flags);
899                 return -ENODEV;
900         }
901
902         new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
903         if (!new_devdata) {
904                 dev_err(old_devdata->dev, "%s: Could not allocate memory for device data\n", __func__);
905                 ret = -ENOMEM;
906                 goto error_out;
907         }
908
909         memcpy(new_devdata, old_devdata, sizeof(*old_devdata));
910         new_devdata->counters = old_devdata->counters;
911
912         /* Set ptrs for existing properties */
913         status = of_find_property(of_node, "status", NULL);
914         maxsglen = of_find_property(of_node, "ibm,max-sg-len", NULL);
915         maxsyncop = of_find_property(of_node, "ibm,max-sync-cop", NULL);
916         if (!status || !maxsglen || !maxsyncop) {
917                 dev_err(old_devdata->dev, "%s: Could not locate device properties\n", __func__);
918                 ret = -EINVAL;
919                 goto error_out;
920         }
921
922         /*
923          * If this is a property update, there are only certain properties that
924          * we care about. Bail if it isn't in the below list
925          */
926         if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
927                          strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
928                          strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
929                 goto out;
930
931         /* Perform property updates */
932         ret = nx842_OF_upd_status(new_devdata, status);
933         if (ret)
934                 goto error_out;
935
936         ret = nx842_OF_upd_maxsglen(new_devdata, maxsglen);
937         if (ret)
938                 goto error_out;
939
940         ret = nx842_OF_upd_maxsyncop(new_devdata, maxsyncop);
941         if (ret)
942                 goto error_out;
943
944 out:
945         dev_info(old_devdata->dev, "%s: max_sync_size new:%u old:%u\n",
946                         __func__, new_devdata->max_sync_size,
947                         old_devdata->max_sync_size);
948         dev_info(old_devdata->dev, "%s: max_sync_sg new:%u old:%u\n",
949                         __func__, new_devdata->max_sync_sg,
950                         old_devdata->max_sync_sg);
951         dev_info(old_devdata->dev, "%s: max_sg_len new:%u old:%u\n",
952                         __func__, new_devdata->max_sg_len,
953                         old_devdata->max_sg_len);
954
955         rcu_assign_pointer(devdata, new_devdata);
956         spin_unlock_irqrestore(&devdata_mutex, flags);
957         synchronize_rcu();
958         dev_set_drvdata(new_devdata->dev, new_devdata);
959         kfree(old_devdata);
960         return 0;
961
962 error_out:
963         if (new_devdata) {
964                 dev_info(old_devdata->dev, "%s: device disabled\n", __func__);
965                 nx842_OF_set_defaults(new_devdata);
966                 rcu_assign_pointer(devdata, new_devdata);
967                 spin_unlock_irqrestore(&devdata_mutex, flags);
968                 synchronize_rcu();
969                 dev_set_drvdata(new_devdata->dev, new_devdata);
970                 kfree(old_devdata);
971         } else {
972                 dev_err(old_devdata->dev, "%s: could not update driver from hardware\n", __func__);
973                 spin_unlock_irqrestore(&devdata_mutex, flags);
974         }
975
976         if (!ret)
977                 ret = -EINVAL;
978         return ret;
979 }
980
981 /**
982  * nx842_OF_notifier - Process updates to OF properties for the device
983  *
984  * @np: notifier block
985  * @action: notifier action
986  * @update: struct pSeries_reconfig_prop_update pointer if action is
987  *      PSERIES_UPDATE_PROPERTY
988  *
989  * Returns:
990  *      NOTIFY_OK on success
991  *      NOTIFY_BAD encoded with error number on failure, use
992  *              notifier_to_errno() to decode this value
993  */
994 static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
995                              void *data)
996 {
997         struct of_reconfig_data *upd = data;
998         struct nx842_devdata *local_devdata;
999         struct device_node *node = NULL;
1000
1001         rcu_read_lock();
1002         local_devdata = rcu_dereference(devdata);
1003         if (local_devdata)
1004                 node = local_devdata->dev->of_node;
1005
1006         if (local_devdata &&
1007                         action == OF_RECONFIG_UPDATE_PROPERTY &&
1008                         !strcmp(upd->dn->name, node->name)) {
1009                 rcu_read_unlock();
1010                 nx842_OF_upd(upd->prop);
1011         } else
1012                 rcu_read_unlock();
1013
1014         return NOTIFY_OK;
1015 }
1016
1017 static struct notifier_block nx842_of_nb = {
1018         .notifier_call = nx842_OF_notifier,
1019 };
1020
1021 #define nx842_counter_read(_name)                                       \
1022 static ssize_t nx842_##_name##_show(struct device *dev,         \
1023                 struct device_attribute *attr,                          \
1024                 char *buf) {                                            \
1025         struct nx842_devdata *local_devdata;                    \
1026         int p = 0;                                                      \
1027         rcu_read_lock();                                                \
1028         local_devdata = rcu_dereference(devdata);                       \
1029         if (local_devdata)                                              \
1030                 p = snprintf(buf, PAGE_SIZE, "%ld\n",                   \
1031                        atomic64_read(&local_devdata->counters->_name)); \
1032         rcu_read_unlock();                                              \
1033         return p;                                                       \
1034 }
1035
1036 #define NX842DEV_COUNTER_ATTR_RO(_name)                                 \
1037         nx842_counter_read(_name);                                      \
1038         static struct device_attribute dev_attr_##_name = __ATTR(_name, \
1039                                                 0444,                   \
1040                                                 nx842_##_name##_show,\
1041                                                 NULL);
1042
1043 NX842DEV_COUNTER_ATTR_RO(comp_complete);
1044 NX842DEV_COUNTER_ATTR_RO(comp_failed);
1045 NX842DEV_COUNTER_ATTR_RO(decomp_complete);
1046 NX842DEV_COUNTER_ATTR_RO(decomp_failed);
1047 NX842DEV_COUNTER_ATTR_RO(swdecomp);
1048
1049 static ssize_t nx842_timehist_show(struct device *,
1050                 struct device_attribute *, char *);
1051
1052 static struct device_attribute dev_attr_comp_times = __ATTR(comp_times, 0444,
1053                 nx842_timehist_show, NULL);
1054 static struct device_attribute dev_attr_decomp_times = __ATTR(decomp_times,
1055                 0444, nx842_timehist_show, NULL);
1056
1057 static ssize_t nx842_timehist_show(struct device *dev,
1058                 struct device_attribute *attr, char *buf) {
1059         char *p = buf;
1060         struct nx842_devdata *local_devdata;
1061         atomic64_t *times;
1062         int bytes_remain = PAGE_SIZE;
1063         int bytes;
1064         int i;
1065
1066         rcu_read_lock();
1067         local_devdata = rcu_dereference(devdata);
1068         if (!local_devdata) {
1069                 rcu_read_unlock();
1070                 return 0;
1071         }
1072
1073         if (attr == &dev_attr_comp_times)
1074                 times = local_devdata->counters->comp_times;
1075         else if (attr == &dev_attr_decomp_times)
1076                 times = local_devdata->counters->decomp_times;
1077         else {
1078                 rcu_read_unlock();
1079                 return 0;
1080         }
1081
1082         for (i = 0; i < (NX842_HIST_SLOTS - 2); i++) {
1083                 bytes = snprintf(p, bytes_remain, "%u-%uus:\t%ld\n",
1084                                i ? (2<<(i-1)) : 0, (2<<i)-1,
1085                                atomic64_read(&times[i]));
1086                 bytes_remain -= bytes;
1087                 p += bytes;
1088         }
1089         /* The last bucket holds everything over
1090          * 2<<(NX842_HIST_SLOTS - 2) us */
1091         bytes = snprintf(p, bytes_remain, "%uus - :\t%ld\n",
1092                         2<<(NX842_HIST_SLOTS - 2),
1093                         atomic64_read(&times[(NX842_HIST_SLOTS - 1)]));
1094         p += bytes;
1095
1096         rcu_read_unlock();
1097         return p - buf;
1098 }
1099
1100 static struct attribute *nx842_sysfs_entries[] = {
1101         &dev_attr_comp_complete.attr,
1102         &dev_attr_comp_failed.attr,
1103         &dev_attr_decomp_complete.attr,
1104         &dev_attr_decomp_failed.attr,
1105         &dev_attr_swdecomp.attr,
1106         &dev_attr_comp_times.attr,
1107         &dev_attr_decomp_times.attr,
1108         NULL,
1109 };
1110
1111 static struct attribute_group nx842_attribute_group = {
1112         .name = NULL,           /* put in device directory */
1113         .attrs = nx842_sysfs_entries,
1114 };
1115
1116 static struct nx842_driver nx842_pseries_driver = {
1117         .owner =        THIS_MODULE,
1118         .compress =     nx842_pseries_compress,
1119         .decompress =   nx842_pseries_decompress,
1120 };
1121
1122 static int __init nx842_probe(struct vio_dev *viodev,
1123                                   const struct vio_device_id *id)
1124 {
1125         struct nx842_devdata *old_devdata, *new_devdata = NULL;
1126         unsigned long flags;
1127         int ret = 0;
1128
1129         spin_lock_irqsave(&devdata_mutex, flags);
1130         old_devdata = rcu_dereference_check(devdata,
1131                         lockdep_is_held(&devdata_mutex));
1132
1133         if (old_devdata && old_devdata->vdev != NULL) {
1134                 dev_err(&viodev->dev, "%s: Attempt to register more than one instance of the hardware\n", __func__);
1135                 ret = -1;
1136                 goto error_unlock;
1137         }
1138
1139         dev_set_drvdata(&viodev->dev, NULL);
1140
1141         new_devdata = kzalloc(sizeof(*new_devdata), GFP_NOFS);
1142         if (!new_devdata) {
1143                 dev_err(&viodev->dev, "%s: Could not allocate memory for device data\n", __func__);
1144                 ret = -ENOMEM;
1145                 goto error_unlock;
1146         }
1147
1148         new_devdata->counters = kzalloc(sizeof(*new_devdata->counters),
1149                         GFP_NOFS);
1150         if (!new_devdata->counters) {
1151                 dev_err(&viodev->dev, "%s: Could not allocate memory for performance counters\n", __func__);
1152                 ret = -ENOMEM;
1153                 goto error_unlock;
1154         }
1155
1156         new_devdata->vdev = viodev;
1157         new_devdata->dev = &viodev->dev;
1158         nx842_OF_set_defaults(new_devdata);
1159
1160         rcu_assign_pointer(devdata, new_devdata);
1161         spin_unlock_irqrestore(&devdata_mutex, flags);
1162         synchronize_rcu();
1163         kfree(old_devdata);
1164
1165         of_reconfig_notifier_register(&nx842_of_nb);
1166
1167         ret = nx842_OF_upd(NULL);
1168         if (ret && ret != -ENODEV) {
1169                 dev_err(&viodev->dev, "could not parse device tree. %d\n", ret);
1170                 ret = -1;
1171                 goto error;
1172         }
1173
1174         rcu_read_lock();
1175         dev_set_drvdata(&viodev->dev, rcu_dereference(devdata));
1176         rcu_read_unlock();
1177
1178         if (sysfs_create_group(&viodev->dev.kobj, &nx842_attribute_group)) {
1179                 dev_err(&viodev->dev, "could not create sysfs device attributes\n");
1180                 ret = -1;
1181                 goto error;
1182         }
1183
1184         nx842_register_driver(&nx842_pseries_driver);
1185
1186         return 0;
1187
1188 error_unlock:
1189         spin_unlock_irqrestore(&devdata_mutex, flags);
1190         if (new_devdata)
1191                 kfree(new_devdata->counters);
1192         kfree(new_devdata);
1193 error:
1194         return ret;
1195 }
1196
1197 static int __exit nx842_remove(struct vio_dev *viodev)
1198 {
1199         struct nx842_devdata *old_devdata;
1200         unsigned long flags;
1201
1202         pr_info("Removing IBM Power 842 compression device\n");
1203         sysfs_remove_group(&viodev->dev.kobj, &nx842_attribute_group);
1204
1205         spin_lock_irqsave(&devdata_mutex, flags);
1206         old_devdata = rcu_dereference_check(devdata,
1207                         lockdep_is_held(&devdata_mutex));
1208         of_reconfig_notifier_unregister(&nx842_of_nb);
1209         RCU_INIT_POINTER(devdata, NULL);
1210         spin_unlock_irqrestore(&devdata_mutex, flags);
1211         synchronize_rcu();
1212         dev_set_drvdata(&viodev->dev, NULL);
1213         if (old_devdata)
1214                 kfree(old_devdata->counters);
1215         kfree(old_devdata);
1216
1217         nx842_unregister_driver(&nx842_pseries_driver);
1218
1219         return 0;
1220 }
1221
1222 static struct vio_device_id nx842_driver_ids[] = {
1223         {NX842_PSERIES_COMPAT_NAME "-v1", NX842_PSERIES_COMPAT_NAME},
1224         {"", ""},
1225 };
1226
1227 static struct vio_driver nx842_driver = {
1228         .name = MODULE_NAME,
1229         .probe = nx842_probe,
1230         .remove = __exit_p(nx842_remove),
1231         .get_desired_dma = nx842_get_desired_dma,
1232         .id_table = nx842_driver_ids,
1233 };
1234
1235 static int __init nx842_init(void)
1236 {
1237         struct nx842_devdata *new_devdata;
1238         pr_info("Registering IBM Power 842 compression driver\n");
1239
1240         BUILD_BUG_ON(sizeof(struct nx842_workmem) > NX842_MEM_COMPRESS);
1241
1242         RCU_INIT_POINTER(devdata, NULL);
1243         new_devdata = kzalloc(sizeof(*new_devdata), GFP_KERNEL);
1244         if (!new_devdata) {
1245                 pr_err("Could not allocate memory for device data\n");
1246                 return -ENOMEM;
1247         }
1248         new_devdata->status = UNAVAILABLE;
1249         RCU_INIT_POINTER(devdata, new_devdata);
1250
1251         return vio_register_driver(&nx842_driver);
1252 }
1253
1254 module_init(nx842_init);
1255
1256 static void __exit nx842_exit(void)
1257 {
1258         struct nx842_devdata *old_devdata;
1259         unsigned long flags;
1260
1261         pr_info("Exiting IBM Power 842 compression driver\n");
1262         spin_lock_irqsave(&devdata_mutex, flags);
1263         old_devdata = rcu_dereference_check(devdata,
1264                         lockdep_is_held(&devdata_mutex));
1265         RCU_INIT_POINTER(devdata, NULL);
1266         spin_unlock_irqrestore(&devdata_mutex, flags);
1267         synchronize_rcu();
1268         if (old_devdata)
1269                 dev_set_drvdata(old_devdata->dev, NULL);
1270         kfree(old_devdata);
1271         nx842_unregister_driver(&nx842_pseries_driver);
1272         vio_unregister_driver(&nx842_driver);
1273 }
1274
1275 module_exit(nx842_exit);
1276
1277 /*********************************
1278  * 842 software decompressor
1279 *********************************/
1280 typedef int (*sw842_template_op)(const char **, int *, unsigned char **,
1281                                                 struct sw842_fifo *);
1282
1283 static int sw842_data8(const char **, int *, unsigned char **,
1284                                                 struct sw842_fifo *);
1285 static int sw842_data4(const char **, int *, unsigned char **,
1286                                                 struct sw842_fifo *);
1287 static int sw842_data2(const char **, int *, unsigned char **,
1288                                                 struct sw842_fifo *);
1289 static int sw842_ptr8(const char **, int *, unsigned char **,
1290                                                 struct sw842_fifo *);
1291 static int sw842_ptr4(const char **, int *, unsigned char **,
1292                                                 struct sw842_fifo *);
1293 static int sw842_ptr2(const char **, int *, unsigned char **,
1294                                                 struct sw842_fifo *);
1295
1296 /* special templates */
1297 #define SW842_TMPL_REPEAT 0x1B
1298 #define SW842_TMPL_ZEROS 0x1C
1299 #define SW842_TMPL_EOF 0x1E
1300
1301 static sw842_template_op sw842_tmpl_ops[26][4] = {
1302         { sw842_data8, NULL}, /* 0 (00000) */
1303         { sw842_data4, sw842_data2, sw842_ptr2,  NULL},
1304         { sw842_data4, sw842_ptr2,  sw842_data2, NULL},
1305         { sw842_data4, sw842_ptr2,  sw842_ptr2,  NULL},
1306         { sw842_data4, sw842_ptr4,  NULL},
1307         { sw842_data2, sw842_ptr2,  sw842_data4, NULL},
1308         { sw842_data2, sw842_ptr2,  sw842_data2, sw842_ptr2},
1309         { sw842_data2, sw842_ptr2,  sw842_ptr2,  sw842_data2},
1310         { sw842_data2, sw842_ptr2,  sw842_ptr2,  sw842_ptr2,},
1311         { sw842_data2, sw842_ptr2,  sw842_ptr4,  NULL},
1312         { sw842_ptr2,  sw842_data2, sw842_data4, NULL}, /* 10 (01010) */
1313         { sw842_ptr2,  sw842_data4, sw842_ptr2,  NULL},
1314         { sw842_ptr2,  sw842_data2, sw842_ptr2,  sw842_data2},
1315         { sw842_ptr2,  sw842_data2, sw842_ptr2,  sw842_ptr2},
1316         { sw842_ptr2,  sw842_data2, sw842_ptr4,  NULL},
1317         { sw842_ptr2,  sw842_ptr2,  sw842_data4, NULL},
1318         { sw842_ptr2,  sw842_ptr2,  sw842_data2, sw842_ptr2},
1319         { sw842_ptr2,  sw842_ptr2,  sw842_ptr2,  sw842_data2},
1320         { sw842_ptr2,  sw842_ptr2,  sw842_ptr2,  sw842_ptr2},
1321         { sw842_ptr2,  sw842_ptr2,  sw842_ptr4,  NULL},
1322         { sw842_ptr4,  sw842_data4, NULL}, /* 20 (10100) */
1323         { sw842_ptr4,  sw842_data2, sw842_ptr2,  NULL},
1324         { sw842_ptr4,  sw842_ptr2,  sw842_data2, NULL},
1325         { sw842_ptr4,  sw842_ptr2,  sw842_ptr2,  NULL},
1326         { sw842_ptr4,  sw842_ptr4,  NULL},
1327         { sw842_ptr8,  NULL}
1328 };
1329
1330 /* Software decompress helpers */
1331
1332 static uint8_t sw842_get_byte(const char *buf, int bit)
1333 {
1334         uint8_t tmpl;
1335         uint16_t tmp;
1336         tmp = htons(*(uint16_t *)(buf));
1337         tmp = (uint16_t)(tmp << bit);
1338         tmp = ntohs(tmp);
1339         memcpy(&tmpl, &tmp, 1);
1340         return tmpl;
1341 }
1342
1343 static uint8_t sw842_get_template(const char **buf, int *bit)
1344 {
1345         uint8_t byte;
1346         byte = sw842_get_byte(*buf, *bit);
1347         byte = byte >> 3;
1348         byte &= 0x1F;
1349         *buf += (*bit + 5) / 8;
1350         *bit = (*bit + 5) % 8;
1351         return byte;
1352 }
1353
1354 /* repeat_count happens to be 5-bit too (like the template) */
1355 static uint8_t sw842_get_repeat_count(const char **buf, int *bit)
1356 {
1357         uint8_t byte;
1358         byte = sw842_get_byte(*buf, *bit);
1359         byte = byte >> 2;
1360         byte &= 0x3F;
1361         *buf += (*bit + 6) / 8;
1362         *bit = (*bit + 6) % 8;
1363         return byte;
1364 }
1365
1366 static uint8_t sw842_get_ptr2(const char **buf, int *bit)
1367 {
1368         uint8_t ptr;
1369         ptr = sw842_get_byte(*buf, *bit);
1370         (*buf)++;
1371         return ptr;
1372 }
1373
1374 static uint16_t sw842_get_ptr4(const char **buf, int *bit,
1375                 struct sw842_fifo *fifo)
1376 {
1377         uint16_t ptr;
1378         ptr = htons(*(uint16_t *)(*buf));
1379         ptr = (uint16_t)(ptr << *bit);
1380         ptr = ptr >> 7;
1381         ptr &= 0x01FF;
1382         *buf += (*bit + 9) / 8;
1383         *bit = (*bit + 9) % 8;
1384         return ptr;
1385 }
1386
1387 static uint8_t sw842_get_ptr8(const char **buf, int *bit,
1388                 struct sw842_fifo *fifo)
1389 {
1390         return sw842_get_ptr2(buf, bit);
1391 }
1392
1393 /* Software decompress template ops */
1394
1395 static int sw842_data8(const char **inbuf, int *inbit,
1396                 unsigned char **outbuf, struct sw842_fifo *fifo)
1397 {
1398         int ret;
1399
1400         ret = sw842_data4(inbuf, inbit, outbuf, fifo);
1401         if (ret)
1402                 return ret;
1403         ret = sw842_data4(inbuf, inbit, outbuf, fifo);
1404         return ret;
1405 }
1406
1407 static int sw842_data4(const char **inbuf, int *inbit,
1408                 unsigned char **outbuf, struct sw842_fifo *fifo)
1409 {
1410         int ret;
1411
1412         ret = sw842_data2(inbuf, inbit, outbuf, fifo);
1413         if (ret)
1414                 return ret;
1415         ret = sw842_data2(inbuf, inbit, outbuf, fifo);
1416         return ret;
1417 }
1418
1419 static int sw842_data2(const char **inbuf, int *inbit,
1420                 unsigned char **outbuf, struct sw842_fifo *fifo)
1421 {
1422         **outbuf = sw842_get_byte(*inbuf, *inbit);
1423         (*inbuf)++;
1424         (*outbuf)++;
1425         **outbuf = sw842_get_byte(*inbuf, *inbit);
1426         (*inbuf)++;
1427         (*outbuf)++;
1428         return 0;
1429 }
1430
1431 static int sw842_ptr8(const char **inbuf, int *inbit,
1432                 unsigned char **outbuf, struct sw842_fifo *fifo)
1433 {
1434         uint8_t ptr;
1435         ptr = sw842_get_ptr8(inbuf, inbit, fifo);
1436         if (!fifo->f84_full && (ptr >= fifo->f8_count))
1437                 return 1;
1438         memcpy(*outbuf, fifo->f8[ptr], 8);
1439         *outbuf += 8;
1440         return 0;
1441 }
1442
1443 static int sw842_ptr4(const char **inbuf, int *inbit,
1444                 unsigned char **outbuf, struct sw842_fifo *fifo)
1445 {
1446         uint16_t ptr;
1447         ptr = sw842_get_ptr4(inbuf, inbit, fifo);
1448         if (!fifo->f84_full && (ptr >= fifo->f4_count))
1449                 return 1;
1450         memcpy(*outbuf, fifo->f4[ptr], 4);
1451         *outbuf += 4;
1452         return 0;
1453 }
1454
1455 static int sw842_ptr2(const char **inbuf, int *inbit,
1456                 unsigned char **outbuf, struct sw842_fifo *fifo)
1457 {
1458         uint8_t ptr;
1459         ptr = sw842_get_ptr2(inbuf, inbit);
1460         if (!fifo->f2_full && (ptr >= fifo->f2_count))
1461                 return 1;
1462         memcpy(*outbuf, fifo->f2[ptr], 2);
1463         *outbuf += 2;
1464         return 0;
1465 }
1466
1467 static void sw842_copy_to_fifo(const char *buf, struct sw842_fifo *fifo)
1468 {
1469         unsigned char initial_f2count = fifo->f2_count;
1470
1471         memcpy(fifo->f8[fifo->f8_count], buf, 8);
1472         fifo->f4_count += 2;
1473         fifo->f8_count += 1;
1474
1475         if (!fifo->f84_full && fifo->f4_count >= 512) {
1476                 fifo->f84_full = 1;
1477                 fifo->f4_count /= 512;
1478         }
1479
1480         memcpy(fifo->f2[fifo->f2_count++], buf, 2);
1481         memcpy(fifo->f2[fifo->f2_count++], buf + 2, 2);
1482         memcpy(fifo->f2[fifo->f2_count++], buf + 4, 2);
1483         memcpy(fifo->f2[fifo->f2_count++], buf + 6, 2);
1484         if (fifo->f2_count < initial_f2count)
1485                 fifo->f2_full = 1;
1486 }
1487
1488 static int sw842_decompress(const unsigned char *src, int srclen,
1489                         unsigned char *dst, int *destlen,
1490                         const void *wrkmem)
1491 {
1492         uint8_t tmpl;
1493         const char *inbuf;
1494         int inbit = 0;
1495         unsigned char *outbuf, *outbuf_end, *origbuf, *prevbuf;
1496         const char *inbuf_end;
1497         sw842_template_op op;
1498         int opindex;
1499         int i, repeat_count;
1500         struct sw842_fifo *fifo;
1501         int ret = 0;
1502
1503         fifo = &((struct nx842_workmem *)(wrkmem))->swfifo;
1504         memset(fifo, 0, sizeof(*fifo));
1505
1506         origbuf = NULL;
1507         inbuf = src;
1508         inbuf_end = src + srclen;
1509         outbuf = dst;
1510         outbuf_end = dst + *destlen;
1511
1512         while ((tmpl = sw842_get_template(&inbuf, &inbit)) != SW842_TMPL_EOF) {
1513                 if (inbuf >= inbuf_end) {
1514                         ret = -EINVAL;
1515                         goto out;
1516                 }
1517
1518                 opindex = 0;
1519                 prevbuf = origbuf;
1520                 origbuf = outbuf;
1521                 switch (tmpl) {
1522                 case SW842_TMPL_REPEAT:
1523                         if (prevbuf == NULL) {
1524                                 ret = -EINVAL;
1525                                 goto out;
1526                         }
1527
1528                         repeat_count = sw842_get_repeat_count(&inbuf,
1529                                                                 &inbit) + 1;
1530
1531                         /* Did the repeat count advance past the end of input */
1532                         if (inbuf > inbuf_end) {
1533                                 ret = -EINVAL;
1534                                 goto out;
1535                         }
1536
1537                         for (i = 0; i < repeat_count; i++) {
1538                                 /* Would this overflow the output buffer */
1539                                 if ((outbuf + 8) > outbuf_end) {
1540                                         ret = -ENOSPC;
1541                                         goto out;
1542                                 }
1543
1544                                 memcpy(outbuf, prevbuf, 8);
1545                                 sw842_copy_to_fifo(outbuf, fifo);
1546                                 outbuf += 8;
1547                         }
1548                         break;
1549
1550                 case SW842_TMPL_ZEROS:
1551                         /* Would this overflow the output buffer */
1552                         if ((outbuf + 8) > outbuf_end) {
1553                                 ret = -ENOSPC;
1554                                 goto out;
1555                         }
1556
1557                         memset(outbuf, 0, 8);
1558                         sw842_copy_to_fifo(outbuf, fifo);
1559                         outbuf += 8;
1560                         break;
1561
1562                 default:
1563                         if (tmpl > 25) {
1564                                 ret = -EINVAL;
1565                                 goto out;
1566                         }
1567
1568                         /* Does this go past the end of the input buffer */
1569                         if ((inbuf + 2) > inbuf_end) {
1570                                 ret = -EINVAL;
1571                                 goto out;
1572                         }
1573
1574                         /* Would this overflow the output buffer */
1575                         if ((outbuf + 8) > outbuf_end) {
1576                                 ret = -ENOSPC;
1577                                 goto out;
1578                         }
1579
1580                         while (opindex < 4 &&
1581                                 (op = sw842_tmpl_ops[tmpl][opindex++])
1582                                         != NULL) {
1583                                 ret = (*op)(&inbuf, &inbit, &outbuf, fifo);
1584                                 if (ret) {
1585                                         ret = -EINVAL;
1586                                         goto out;
1587                                 }
1588                                 sw842_copy_to_fifo(origbuf, fifo);
1589                         }
1590                 }
1591         }
1592
1593 out:
1594         if (!ret)
1595                 *destlen = (unsigned int)(outbuf - dst);
1596         else
1597                 *destlen = 0;
1598
1599         return ret;
1600 }