Merge drm/drm-fixes into drm-misc-fixes
[linux-block.git] / arch / s390 / lib / xor.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Optimized xor_block operation for RAID4/5
4  *
5  * Copyright IBM Corp. 2016
6  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7  */
8
9 #include <linux/types.h>
10 #include <linux/export.h>
11 #include <linux/raid/xor.h>
12 #include <asm/xor.h>
13
14 static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
15                      const unsigned long * __restrict p2)
16 {
17         asm volatile(
18                 "       larl    1,2f\n"
19                 "       aghi    %0,-1\n"
20                 "       jm      3f\n"
21                 "       srlg    0,%0,8\n"
22                 "       ltgr    0,0\n"
23                 "       jz      1f\n"
24                 "0:     xc      0(256,%1),0(%2)\n"
25                 "       la      %1,256(%1)\n"
26                 "       la      %2,256(%2)\n"
27                 "       brctg   0,0b\n"
28                 "1:     ex      %0,0(1)\n"
29                 "       j       3f\n"
30                 "2:     xc      0(1,%1),0(%2)\n"
31                 "3:\n"
32                 : : "d" (bytes), "a" (p1), "a" (p2)
33                 : "0", "1", "cc", "memory");
34 }
35
36 static void xor_xc_3(unsigned long bytes, unsigned long * __restrict p1,
37                      const unsigned long * __restrict p2,
38                      const unsigned long * __restrict p3)
39 {
40         asm volatile(
41                 "       larl    1,2f\n"
42                 "       aghi    %0,-1\n"
43                 "       jm      3f\n"
44                 "       srlg    0,%0,8\n"
45                 "       ltgr    0,0\n"
46                 "       jz      1f\n"
47                 "0:     xc      0(256,%1),0(%2)\n"
48                 "       xc      0(256,%1),0(%3)\n"
49                 "       la      %1,256(%1)\n"
50                 "       la      %2,256(%2)\n"
51                 "       la      %3,256(%3)\n"
52                 "       brctg   0,0b\n"
53                 "1:     ex      %0,0(1)\n"
54                 "       ex      %0,6(1)\n"
55                 "       j       3f\n"
56                 "2:     xc      0(1,%1),0(%2)\n"
57                 "       xc      0(1,%1),0(%3)\n"
58                 "3:\n"
59                 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3)
60                 : : "0", "1", "cc", "memory");
61 }
62
63 static void xor_xc_4(unsigned long bytes, unsigned long * __restrict p1,
64                      const unsigned long * __restrict p2,
65                      const unsigned long * __restrict p3,
66                      const unsigned long * __restrict p4)
67 {
68         asm volatile(
69                 "       larl    1,2f\n"
70                 "       aghi    %0,-1\n"
71                 "       jm      3f\n"
72                 "       srlg    0,%0,8\n"
73                 "       ltgr    0,0\n"
74                 "       jz      1f\n"
75                 "0:     xc      0(256,%1),0(%2)\n"
76                 "       xc      0(256,%1),0(%3)\n"
77                 "       xc      0(256,%1),0(%4)\n"
78                 "       la      %1,256(%1)\n"
79                 "       la      %2,256(%2)\n"
80                 "       la      %3,256(%3)\n"
81                 "       la      %4,256(%4)\n"
82                 "       brctg   0,0b\n"
83                 "1:     ex      %0,0(1)\n"
84                 "       ex      %0,6(1)\n"
85                 "       ex      %0,12(1)\n"
86                 "       j       3f\n"
87                 "2:     xc      0(1,%1),0(%2)\n"
88                 "       xc      0(1,%1),0(%3)\n"
89                 "       xc      0(1,%1),0(%4)\n"
90                 "3:\n"
91                 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4)
92                 : : "0", "1", "cc", "memory");
93 }
94
95 static void xor_xc_5(unsigned long bytes, unsigned long * __restrict p1,
96                      const unsigned long * __restrict p2,
97                      const unsigned long * __restrict p3,
98                      const unsigned long * __restrict p4,
99                      const unsigned long * __restrict p5)
100 {
101         asm volatile(
102                 "       larl    1,2f\n"
103                 "       aghi    %0,-1\n"
104                 "       jm      3f\n"
105                 "       srlg    0,%0,8\n"
106                 "       ltgr    0,0\n"
107                 "       jz      1f\n"
108                 "0:     xc      0(256,%1),0(%2)\n"
109                 "       xc      0(256,%1),0(%3)\n"
110                 "       xc      0(256,%1),0(%4)\n"
111                 "       xc      0(256,%1),0(%5)\n"
112                 "       la      %1,256(%1)\n"
113                 "       la      %2,256(%2)\n"
114                 "       la      %3,256(%3)\n"
115                 "       la      %4,256(%4)\n"
116                 "       la      %5,256(%5)\n"
117                 "       brctg   0,0b\n"
118                 "1:     ex      %0,0(1)\n"
119                 "       ex      %0,6(1)\n"
120                 "       ex      %0,12(1)\n"
121                 "       ex      %0,18(1)\n"
122                 "       j       3f\n"
123                 "2:     xc      0(1,%1),0(%2)\n"
124                 "       xc      0(1,%1),0(%3)\n"
125                 "       xc      0(1,%1),0(%4)\n"
126                 "       xc      0(1,%1),0(%5)\n"
127                 "3:\n"
128                 : "+d" (bytes), "+a" (p1), "+a" (p2), "+a" (p3), "+a" (p4),
129                   "+a" (p5)
130                 : : "0", "1", "cc", "memory");
131 }
132
133 struct xor_block_template xor_block_xc = {
134         .name = "xc",
135         .do_2 = xor_xc_2,
136         .do_3 = xor_xc_3,
137         .do_4 = xor_xc_4,
138         .do_5 = xor_xc_5,
139 };
140 EXPORT_SYMBOL(xor_block_xc);