Merge tag 'xfs-6.9-merge-9' of git://git.kernel.org/pub/scm/fs/xfs/xfs-linux
[linux-block.git] / arch / mips / mm / page.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2007  Maciej W. Rozycki
8  * Copyright (C) 2008  Thiemo Seufer
9  * Copyright (C) 2012  MIPS Technologies, Inc.
10  */
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/proc_fs.h>
16
17 #include <asm/bugs.h>
18 #include <asm/cacheops.h>
19 #include <asm/cpu-type.h>
20 #include <asm/inst.h>
21 #include <asm/io.h>
22 #include <asm/page.h>
23 #include <asm/prefetch.h>
24 #include <asm/bootinfo.h>
25 #include <asm/mipsregs.h>
26 #include <asm/mmu_context.h>
27 #include <asm/regdef.h>
28 #include <asm/cpu.h>
29
30 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
31 #include <asm/sibyte/sb1250.h>
32 #include <asm/sibyte/sb1250_regs.h>
33 #include <asm/sibyte/sb1250_dma.h>
34 #endif
35
36 #include <asm/uasm.h>
37
38 /* Handle labels (which must be positive integers). */
39 enum label_id {
40         label_clear_nopref = 1,
41         label_clear_pref,
42         label_copy_nopref,
43         label_copy_pref_both,
44         label_copy_pref_store,
45 };
46
47 UASM_L_LA(_clear_nopref)
48 UASM_L_LA(_clear_pref)
49 UASM_L_LA(_copy_nopref)
50 UASM_L_LA(_copy_pref_both)
51 UASM_L_LA(_copy_pref_store)
52
53 /* We need one branch and therefore one relocation per target label. */
54 static struct uasm_label labels[5];
55 static struct uasm_reloc relocs[5];
56
57 #define cpu_is_r4600_v1_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002010)
58 #define cpu_is_r4600_v2_x()     ((read_c0_prid() & 0xfffffff0) == 0x00002020)
59
60 /*
61  * R6 has a limited offset of the pref instruction.
62  * Skip it if the offset is more than 9 bits.
63  */
64 #define _uasm_i_pref(a, b, c, d)                \
65 do {                                            \
66         if (cpu_has_mips_r6) {                  \
67                 if (c <= 0xff && c >= -0x100)   \
68                         uasm_i_pref(a, b, c, d);\
69         } else {                                \
70                 uasm_i_pref(a, b, c, d);        \
71         }                                       \
72 } while(0)
73
74 static int pref_bias_clear_store;
75 static int pref_bias_copy_load;
76 static int pref_bias_copy_store;
77
78 static u32 pref_src_mode;
79 static u32 pref_dst_mode;
80
81 static int clear_word_size;
82 static int copy_word_size;
83
84 static int half_clear_loop_size;
85 static int half_copy_loop_size;
86
87 static int cache_line_size;
88 #define cache_line_mask() (cache_line_size - 1)
89
90 static inline void
91 pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
92 {
93         if (cpu_has_64bit_gp_regs &&
94             IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) &&
95             r4k_daddiu_bug()) {
96                 if (off > 0x7fff) {
97                         uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off));
98                         uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off));
99                 } else
100                         uasm_i_addiu(buf, GPR_T9, GPR_ZERO, off);
101                 uasm_i_daddu(buf, reg1, reg2, GPR_T9);
102         } else {
103                 if (off > 0x7fff) {
104                         uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off));
105                         uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off));
106                         UASM_i_ADDU(buf, reg1, reg2, GPR_T9);
107                 } else
108                         UASM_i_ADDIU(buf, reg1, reg2, off);
109         }
110 }
111
112 static void set_prefetch_parameters(void)
113 {
114         if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
115                 clear_word_size = 8;
116         else
117                 clear_word_size = 4;
118
119         if (cpu_has_64bit_gp_regs)
120                 copy_word_size = 8;
121         else
122                 copy_word_size = 4;
123
124         /*
125          * The pref's used here are using "streaming" hints, which cause the
126          * copied data to be kicked out of the cache sooner.  A page copy often
127          * ends up copying a lot more data than is commonly used, so this seems
128          * to make sense in terms of reducing cache pollution, but I've no real
129          * performance data to back this up.
130          */
131         if (cpu_has_prefetch) {
132                 /*
133                  * XXX: Most prefetch bias values in here are based on
134                  * guesswork.
135                  */
136                 cache_line_size = cpu_dcache_line_size();
137                 switch (current_cpu_type()) {
138                 case CPU_R5500:
139                 case CPU_TX49XX:
140                         /* These processors only support the Pref_Load. */
141                         pref_bias_copy_load = 256;
142                         break;
143
144                 case CPU_R10000:
145                 case CPU_R12000:
146                 case CPU_R14000:
147                 case CPU_R16000:
148                         /*
149                          * Those values have been experimentally tuned for an
150                          * Origin 200.
151                          */
152                         pref_bias_clear_store = 512;
153                         pref_bias_copy_load = 256;
154                         pref_bias_copy_store = 256;
155                         pref_src_mode = Pref_LoadStreamed;
156                         pref_dst_mode = Pref_StoreStreamed;
157                         break;
158
159                 case CPU_SB1:
160                 case CPU_SB1A:
161                         pref_bias_clear_store = 128;
162                         pref_bias_copy_load = 128;
163                         pref_bias_copy_store = 128;
164                         /*
165                          * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
166                          * hints are broken.
167                          */
168                         if (current_cpu_type() == CPU_SB1 &&
169                             (current_cpu_data.processor_id & 0xff) < 0x02) {
170                                 pref_src_mode = Pref_Load;
171                                 pref_dst_mode = Pref_Store;
172                         } else {
173                                 pref_src_mode = Pref_LoadStreamed;
174                                 pref_dst_mode = Pref_StoreStreamed;
175                         }
176                         break;
177
178                 case CPU_LOONGSON64:
179                         /* Loongson-3 only support the Pref_Load/Pref_Store. */
180                         pref_bias_clear_store = 128;
181                         pref_bias_copy_load = 128;
182                         pref_bias_copy_store = 128;
183                         pref_src_mode = Pref_Load;
184                         pref_dst_mode = Pref_Store;
185                         break;
186
187                 default:
188                         pref_bias_clear_store = 128;
189                         pref_bias_copy_load = 256;
190                         pref_bias_copy_store = 128;
191                         pref_src_mode = Pref_LoadStreamed;
192                         if (cpu_has_mips_r6)
193                                 /*
194                                  * Bit 30 (Pref_PrepareForStore) has been
195                                  * removed from MIPS R6. Use bit 5
196                                  * (Pref_StoreStreamed).
197                                  */
198                                 pref_dst_mode = Pref_StoreStreamed;
199                         else
200                                 pref_dst_mode = Pref_PrepareForStore;
201                         break;
202                 }
203         } else {
204                 if (cpu_has_cache_cdex_s)
205                         cache_line_size = cpu_scache_line_size();
206                 else if (cpu_has_cache_cdex_p)
207                         cache_line_size = cpu_dcache_line_size();
208         }
209         /*
210          * Too much unrolling will overflow the available space in
211          * clear_space_array / copy_page_array.
212          */
213         half_clear_loop_size = min(16 * clear_word_size,
214                                    max(cache_line_size >> 1,
215                                        4 * clear_word_size));
216         half_copy_loop_size = min(16 * copy_word_size,
217                                   max(cache_line_size >> 1,
218                                       4 * copy_word_size));
219 }
220
221 static void build_clear_store(u32 **buf, int off)
222 {
223         if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
224                 uasm_i_sd(buf, GPR_ZERO, off, GPR_A0);
225         } else {
226                 uasm_i_sw(buf, GPR_ZERO, off, GPR_A0);
227         }
228 }
229
230 static inline void build_clear_pref(u32 **buf, int off)
231 {
232         if (off & cache_line_mask())
233                 return;
234
235         if (pref_bias_clear_store) {
236                 _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
237                             GPR_A0);
238         } else if (cache_line_size == (half_clear_loop_size << 1)) {
239                 if (cpu_has_cache_cdex_s) {
240                         uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0);
241                 } else if (cpu_has_cache_cdex_p) {
242                         if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
243                             cpu_is_r4600_v1_x()) {
244                                 uasm_i_nop(buf);
245                                 uasm_i_nop(buf);
246                                 uasm_i_nop(buf);
247                                 uasm_i_nop(buf);
248                         }
249
250                         if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
251                             cpu_is_r4600_v2_x())
252                                 uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT);
253
254                         uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0);
255                 }
256         }
257 }
258
259 extern u32 __clear_page_start;
260 extern u32 __clear_page_end;
261 extern u32 __copy_page_start;
262 extern u32 __copy_page_end;
263
264 void build_clear_page(void)
265 {
266         int off;
267         u32 *buf = &__clear_page_start;
268         struct uasm_label *l = labels;
269         struct uasm_reloc *r = relocs;
270         int i;
271         static atomic_t run_once = ATOMIC_INIT(0);
272
273         if (atomic_xchg(&run_once, 1)) {
274                 return;
275         }
276
277         memset(labels, 0, sizeof(labels));
278         memset(relocs, 0, sizeof(relocs));
279
280         set_prefetch_parameters();
281
282         /*
283          * This algorithm makes the following assumptions:
284          *   - The prefetch bias is a multiple of 2 words.
285          *   - The prefetch bias is less than one page.
286          */
287         BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
288         BUG_ON(PAGE_SIZE < pref_bias_clear_store);
289
290         off = PAGE_SIZE - pref_bias_clear_store;
291         if (off > 0xffff || !pref_bias_clear_store)
292                 pg_addiu(&buf, GPR_A2, GPR_A0, off);
293         else
294                 uasm_i_ori(&buf, GPR_A2, GPR_A0, off);
295
296         if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
297                 uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000));
298
299         off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
300                                 * cache_line_size : 0;
301         while (off) {
302                 build_clear_pref(&buf, -off);
303                 off -= cache_line_size;
304         }
305         uasm_l_clear_pref(&l, buf);
306         do {
307                 build_clear_pref(&buf, off);
308                 build_clear_store(&buf, off);
309                 off += clear_word_size;
310         } while (off < half_clear_loop_size);
311         pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
312         off = -off;
313         do {
314                 build_clear_pref(&buf, off);
315                 if (off == -clear_word_size)
316                         uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_pref);
317                 build_clear_store(&buf, off);
318                 off += clear_word_size;
319         } while (off < 0);
320
321         if (pref_bias_clear_store) {
322                 pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_clear_store);
323                 uasm_l_clear_nopref(&l, buf);
324                 off = 0;
325                 do {
326                         build_clear_store(&buf, off);
327                         off += clear_word_size;
328                 } while (off < half_clear_loop_size);
329                 pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
330                 off = -off;
331                 do {
332                         if (off == -clear_word_size)
333                                 uasm_il_bne(&buf, &r, GPR_A0, GPR_A2,
334                                             label_clear_nopref);
335                         build_clear_store(&buf, off);
336                         off += clear_word_size;
337                 } while (off < 0);
338         }
339
340         uasm_i_jr(&buf, GPR_RA);
341         uasm_i_nop(&buf);
342
343         BUG_ON(buf > &__clear_page_end);
344
345         uasm_resolve_relocs(relocs, labels);
346
347         pr_debug("Synthesized clear page handler (%u instructions).\n",
348                  (u32)(buf - &__clear_page_start));
349
350         pr_debug("\t.set push\n");
351         pr_debug("\t.set noreorder\n");
352         for (i = 0; i < (buf - &__clear_page_start); i++)
353                 pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]);
354         pr_debug("\t.set pop\n");
355 }
356
357 static void build_copy_load(u32 **buf, int reg, int off)
358 {
359         if (cpu_has_64bit_gp_regs) {
360                 uasm_i_ld(buf, reg, off, GPR_A1);
361         } else {
362                 uasm_i_lw(buf, reg, off, GPR_A1);
363         }
364 }
365
366 static void build_copy_store(u32 **buf, int reg, int off)
367 {
368         if (cpu_has_64bit_gp_regs) {
369                 uasm_i_sd(buf, reg, off, GPR_A0);
370         } else {
371                 uasm_i_sw(buf, reg, off, GPR_A0);
372         }
373 }
374
375 static inline void build_copy_load_pref(u32 **buf, int off)
376 {
377         if (off & cache_line_mask())
378                 return;
379
380         if (pref_bias_copy_load)
381                 _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, GPR_A1);
382 }
383
384 static inline void build_copy_store_pref(u32 **buf, int off)
385 {
386         if (off & cache_line_mask())
387                 return;
388
389         if (pref_bias_copy_store) {
390                 _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
391                             GPR_A0);
392         } else if (cache_line_size == (half_copy_loop_size << 1)) {
393                 if (cpu_has_cache_cdex_s) {
394                         uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0);
395                 } else if (cpu_has_cache_cdex_p) {
396                         if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) &&
397                             cpu_is_r4600_v1_x()) {
398                                 uasm_i_nop(buf);
399                                 uasm_i_nop(buf);
400                                 uasm_i_nop(buf);
401                                 uasm_i_nop(buf);
402                         }
403
404                         if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) &&
405                             cpu_is_r4600_v2_x())
406                                 uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT);
407
408                         uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0);
409                 }
410         }
411 }
412
413 void build_copy_page(void)
414 {
415         int off;
416         u32 *buf = &__copy_page_start;
417         struct uasm_label *l = labels;
418         struct uasm_reloc *r = relocs;
419         int i;
420         static atomic_t run_once = ATOMIC_INIT(0);
421
422         if (atomic_xchg(&run_once, 1)) {
423                 return;
424         }
425
426         memset(labels, 0, sizeof(labels));
427         memset(relocs, 0, sizeof(relocs));
428
429         set_prefetch_parameters();
430
431         /*
432          * This algorithm makes the following assumptions:
433          *   - All prefetch biases are multiples of 8 words.
434          *   - The prefetch biases are less than one page.
435          *   - The store prefetch bias isn't greater than the load
436          *     prefetch bias.
437          */
438         BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
439         BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
440         BUG_ON(PAGE_SIZE < pref_bias_copy_load);
441         BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
442
443         off = PAGE_SIZE - pref_bias_copy_load;
444         if (off > 0xffff || !pref_bias_copy_load)
445                 pg_addiu(&buf, GPR_A2, GPR_A0, off);
446         else
447                 uasm_i_ori(&buf, GPR_A2, GPR_A0, off);
448
449         if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x())
450                 uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000));
451
452         off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
453                                 cache_line_size : 0;
454         while (off) {
455                 build_copy_load_pref(&buf, -off);
456                 off -= cache_line_size;
457         }
458         off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) *
459                                 cache_line_size : 0;
460         while (off) {
461                 build_copy_store_pref(&buf, -off);
462                 off -= cache_line_size;
463         }
464         uasm_l_copy_pref_both(&l, buf);
465         do {
466                 build_copy_load_pref(&buf, off);
467                 build_copy_load(&buf, GPR_T0, off);
468                 build_copy_load_pref(&buf, off + copy_word_size);
469                 build_copy_load(&buf, GPR_T1, off + copy_word_size);
470                 build_copy_load_pref(&buf, off + 2 * copy_word_size);
471                 build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
472                 build_copy_load_pref(&buf, off + 3 * copy_word_size);
473                 build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
474                 build_copy_store_pref(&buf, off);
475                 build_copy_store(&buf, GPR_T0, off);
476                 build_copy_store_pref(&buf, off + copy_word_size);
477                 build_copy_store(&buf, GPR_T1, off + copy_word_size);
478                 build_copy_store_pref(&buf, off + 2 * copy_word_size);
479                 build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
480                 build_copy_store_pref(&buf, off + 3 * copy_word_size);
481                 build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
482                 off += 4 * copy_word_size;
483         } while (off < half_copy_loop_size);
484         pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
485         pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
486         off = -off;
487         do {
488                 build_copy_load_pref(&buf, off);
489                 build_copy_load(&buf, GPR_T0, off);
490                 build_copy_load_pref(&buf, off + copy_word_size);
491                 build_copy_load(&buf, GPR_T1, off + copy_word_size);
492                 build_copy_load_pref(&buf, off + 2 * copy_word_size);
493                 build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
494                 build_copy_load_pref(&buf, off + 3 * copy_word_size);
495                 build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
496                 build_copy_store_pref(&buf, off);
497                 build_copy_store(&buf, GPR_T0, off);
498                 build_copy_store_pref(&buf, off + copy_word_size);
499                 build_copy_store(&buf, GPR_T1, off + copy_word_size);
500                 build_copy_store_pref(&buf, off + 2 * copy_word_size);
501                 build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
502                 build_copy_store_pref(&buf, off + 3 * copy_word_size);
503                 if (off == -(4 * copy_word_size))
504                         uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_both);
505                 build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
506                 off += 4 * copy_word_size;
507         } while (off < 0);
508
509         if (pref_bias_copy_load - pref_bias_copy_store) {
510                 pg_addiu(&buf, GPR_A2, GPR_A0,
511                          pref_bias_copy_load - pref_bias_copy_store);
512                 uasm_l_copy_pref_store(&l, buf);
513                 off = 0;
514                 do {
515                         build_copy_load(&buf, GPR_T0, off);
516                         build_copy_load(&buf, GPR_T1, off + copy_word_size);
517                         build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
518                         build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
519                         build_copy_store_pref(&buf, off);
520                         build_copy_store(&buf, GPR_T0, off);
521                         build_copy_store_pref(&buf, off + copy_word_size);
522                         build_copy_store(&buf, GPR_T1, off + copy_word_size);
523                         build_copy_store_pref(&buf, off + 2 * copy_word_size);
524                         build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
525                         build_copy_store_pref(&buf, off + 3 * copy_word_size);
526                         build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
527                         off += 4 * copy_word_size;
528                 } while (off < half_copy_loop_size);
529                 pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
530                 pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
531                 off = -off;
532                 do {
533                         build_copy_load(&buf, GPR_T0, off);
534                         build_copy_load(&buf, GPR_T1, off + copy_word_size);
535                         build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
536                         build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
537                         build_copy_store_pref(&buf, off);
538                         build_copy_store(&buf, GPR_T0, off);
539                         build_copy_store_pref(&buf, off + copy_word_size);
540                         build_copy_store(&buf, GPR_T1, off + copy_word_size);
541                         build_copy_store_pref(&buf, off + 2 * copy_word_size);
542                         build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
543                         build_copy_store_pref(&buf, off + 3 * copy_word_size);
544                         if (off == -(4 * copy_word_size))
545                                 uasm_il_bne(&buf, &r, GPR_A2, GPR_A0,
546                                             label_copy_pref_store);
547                         build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
548                         off += 4 * copy_word_size;
549                 } while (off < 0);
550         }
551
552         if (pref_bias_copy_store) {
553                 pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_store);
554                 uasm_l_copy_nopref(&l, buf);
555                 off = 0;
556                 do {
557                         build_copy_load(&buf, GPR_T0, off);
558                         build_copy_load(&buf, GPR_T1, off + copy_word_size);
559                         build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
560                         build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
561                         build_copy_store(&buf, GPR_T0, off);
562                         build_copy_store(&buf, GPR_T1, off + copy_word_size);
563                         build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
564                         build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
565                         off += 4 * copy_word_size;
566                 } while (off < half_copy_loop_size);
567                 pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off);
568                 pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off);
569                 off = -off;
570                 do {
571                         build_copy_load(&buf, GPR_T0, off);
572                         build_copy_load(&buf, GPR_T1, off + copy_word_size);
573                         build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size);
574                         build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size);
575                         build_copy_store(&buf, GPR_T0, off);
576                         build_copy_store(&buf, GPR_T1, off + copy_word_size);
577                         build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size);
578                         if (off == -(4 * copy_word_size))
579                                 uasm_il_bne(&buf, &r, GPR_A2, GPR_A0,
580                                             label_copy_nopref);
581                         build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size);
582                         off += 4 * copy_word_size;
583                 } while (off < 0);
584         }
585
586         uasm_i_jr(&buf, GPR_RA);
587         uasm_i_nop(&buf);
588
589         BUG_ON(buf > &__copy_page_end);
590
591         uasm_resolve_relocs(relocs, labels);
592
593         pr_debug("Synthesized copy page handler (%u instructions).\n",
594                  (u32)(buf - &__copy_page_start));
595
596         pr_debug("\t.set push\n");
597         pr_debug("\t.set noreorder\n");
598         for (i = 0; i < (buf - &__copy_page_start); i++)
599                 pr_debug("\t.word 0x%08x\n", (&__copy_page_start)[i]);
600         pr_debug("\t.set pop\n");
601 }
602
603 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS
604 extern void clear_page_cpu(void *page);
605 extern void copy_page_cpu(void *to, void *from);
606
607 /*
608  * Pad descriptors to cacheline, since each is exclusively owned by a
609  * particular CPU.
610  */
611 struct dmadscr {
612         u64 dscr_a;
613         u64 dscr_b;
614         u64 pad_a;
615         u64 pad_b;
616 } ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
617
618 void clear_page(void *page)
619 {
620         u64 to_phys = CPHYSADDR((unsigned long)page);
621         unsigned int cpu = smp_processor_id();
622
623         /* if the page is not in KSEG0, use old way */
624         if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
625                 return clear_page_cpu(page);
626
627         page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
628                                  M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
629         page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
630         __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
631
632         /*
633          * Don't really want to do it this way, but there's no
634          * reliable way to delay completion detection.
635          */
636         while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
637                  & M_DM_DSCR_BASE_INTERRUPT))
638                 ;
639         __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
640 }
641 EXPORT_SYMBOL(clear_page);
642
643 void copy_page(void *to, void *from)
644 {
645         u64 from_phys = CPHYSADDR((unsigned long)from);
646         u64 to_phys = CPHYSADDR((unsigned long)to);
647         unsigned int cpu = smp_processor_id();
648
649         /* if any page is not in KSEG0, use old way */
650         if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
651             || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
652                 return copy_page_cpu(to, from);
653
654         page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
655                                  M_DM_DSCRA_INTERRUPT;
656         page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
657         __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
658
659         /*
660          * Don't really want to do it this way, but there's no
661          * reliable way to delay completion detection.
662          */
663         while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
664                  & M_DM_DSCR_BASE_INTERRUPT))
665                 ;
666         __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
667 }
668 EXPORT_SYMBOL(copy_page);
669
670 #endif /* CONFIG_SIBYTE_DMA_PAGEOPS */