3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2015 Intel Corporation.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
21 * Copyright(c) 2015 Intel Corporation.
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
27 * - Redistributions of source code must retain the above copyright
28 * notice, this list of conditions and the following disclaimer.
29 * - Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in
31 * the documentation and/or other materials provided with the
33 * - Neither the name of Intel Corporation nor the names of its
34 * contributors may be used to endorse or promote products derived
35 * from this software without specific prior written permission.
37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
53 /* additive distance between non-SOP and SOP space */
54 #define SOP_DISTANCE (TXE_PIO_SIZE / 2)
55 #define PIO_BLOCK_MASK (PIO_BLOCK_SIZE-1)
56 /* number of QUADWORDs in a block */
57 #define PIO_BLOCK_QWS (PIO_BLOCK_SIZE/sizeof(u64))
60 * pio_copy - copy data block to MMIO space
61 * @pbuf: a number of blocks allocated within a PIO send context
63 * @from: source, must be 8 byte aligned
64 * @count: number of DWORD (32-bit) quantities to copy from source
66 * Copy data from source to PIO Send Buffer memory, 8 bytes at a time.
67 * Must always write full BLOCK_SIZE bytes blocks. The first block must
68 * be written to the corresponding SOP=1 address.
71 * o pbuf->start always starts on a block boundary
72 * o pbuf can wrap only at a block boundary
74 void pio_copy(struct hfi1_devdata *dd, struct pio_buf *pbuf, u64 pbc,
75 const void *from, size_t count)
77 void __iomem *dest = pbuf->start + SOP_DISTANCE;
78 void __iomem *send = dest + PIO_BLOCK_SIZE;
79 void __iomem *dend; /* 8-byte data end */
85 /* calculate where the QWORD data ends - in SOP=1 space */
86 dend = dest + ((count>>1) * sizeof(u64));
89 /* all QWORD data is within the SOP block, does *not*
90 reach the end of the SOP block */
93 writeq(*(u64 *)from, dest);
98 * No boundary checks are needed here:
99 * 0. We're not on the SOP block boundary
100 * 1. The possible DWORD dangle will still be within
102 * 2. We cannot wrap except on a block boundary.
105 /* QWORD data extends _to_ or beyond the SOP block */
107 /* write 8-byte SOP chunk data */
108 while (dest < send) {
109 writeq(*(u64 *)from, dest);
113 /* drop out of the SOP range */
114 dest -= SOP_DISTANCE;
115 dend -= SOP_DISTANCE;
118 * If the wrap comes before or matches the data end,
119 * copy until until the wrap, then wrap.
121 * If the data ends at the end of the SOP above and
122 * the buffer wraps, then pbuf->end == dend == dest
123 * and nothing will get written, but we will wrap in
124 * case there is a dangling DWORD.
126 if (pbuf->end <= dend) {
127 while (dest < pbuf->end) {
128 writeq(*(u64 *)from, dest);
137 /* write 8-byte non-SOP, non-wrap chunk data */
138 while (dest < dend) {
139 writeq(*(u64 *)from, dest);
144 /* at this point we have wrapped if we are going to wrap */
146 /* write dangling u32, if any */
151 val.val32[0] = *(u32 *)from;
152 writeq(val.val64, dest);
155 /* fill in rest of block, no need to check pbuf->end
156 as we only wrap on a block boundary */
157 while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
162 /* finished with this buffer */
163 this_cpu_dec(*pbuf->sc->buffers_allocated);
167 /* USE_SHIFTS is faster in user-space tests on a Xeon X5570 @ 2.93GHz */
171 * Handle carry bytes using shifts and masks.
173 * NOTE: the value the unused portion of carry is expected to always be zero.
177 * "zero" shift - bit shift used to zero out upper bytes. Input is
178 * the count of LSB bytes to preserve.
180 #define zshift(x) (8 * (8-(x)))
183 * "merge" shift - bit shift used to merge with carry bytes. Input is
184 * the LSB byte count to move beyond.
186 #define mshift(x) (8 * (x))
189 * Read nbytes bytes from "from" and return them in the LSB bytes
190 * of pbuf->carry. Other bytes are zeroed. Any previous value
191 * pbuf->carry is lost.
194 * o do not read from from if nbytes is zero
195 * o from may _not_ be u64 aligned
196 * o nbytes must not span a QW boundary
198 static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
204 pbuf->carry.val64 = 0;
206 /* align our pointer */
207 off = (unsigned long)from & 0x7;
208 from = (void *)((unsigned long)from & ~0x7l);
209 pbuf->carry.val64 = ((*(u64 *)from)
210 << zshift(nbytes + off))/* zero upper bytes */
211 >> zshift(nbytes); /* place at bottom */
213 pbuf->carry_bytes = nbytes;
217 * Read nbytes bytes from "from" and put them at the next significant bytes
218 * of pbuf->carry. Unused bytes are zeroed. It is expected that the extra
219 * read does not overfill carry.
222 * o from may _not_ be u64 aligned
223 * o nbytes may span a QW boundary
225 static inline void read_extra_bytes(struct pio_buf *pbuf,
226 const void *from, unsigned int nbytes)
228 unsigned long off = (unsigned long)from & 0x7;
229 unsigned int room, xbytes;
231 /* align our pointer */
232 from = (void *)((unsigned long)from & ~0x7l);
234 /* check count first - don't read anything if count is zero */
236 /* find the number of bytes in this u64 */
237 room = 8 - off; /* this u64 has room for this many bytes */
238 xbytes = min(room, nbytes);
241 * shift down to zero lower bytes, shift up to zero upper
242 * bytes, shift back down to move into place
244 pbuf->carry.val64 |= (((*(u64 *)from)
247 >> zshift(xbytes+pbuf->carry_bytes);
249 pbuf->carry_bytes += xbytes;
256 * Zero extra bytes from the end of pbuf->carry.
259 * o zbytes <= old_bytes
261 static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
263 unsigned int remaining;
265 if (zbytes == 0) /* nothing to do */
268 remaining = pbuf->carry_bytes - zbytes; /* remaining bytes */
270 /* NOTE: zshift only guaranteed to work if remaining != 0 */
272 pbuf->carry.val64 = (pbuf->carry.val64 << zshift(remaining))
273 >> zshift(remaining);
275 pbuf->carry.val64 = 0;
276 pbuf->carry_bytes = remaining;
280 * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
281 * Put the unused part of the next 8 bytes of src into the LSB bytes of
282 * pbuf->carry with the upper bytes zeroed..
285 * o result must keep unused bytes zeroed
286 * o src must be u64 aligned
288 static inline void merge_write8(
289 struct pio_buf *pbuf,
296 temp = pbuf->carry.val64 | (new << mshift(pbuf->carry_bytes));
298 pbuf->carry.val64 = new >> zshift(pbuf->carry_bytes);
302 * Write a quad word using all bytes of carry.
304 static inline void carry8_write8(union mix carry, void __iomem *dest)
306 writeq(carry.val64, dest);
310 * Write a quad word using all the valid bytes of carry. If carry
311 * has zero valid bytes, nothing is written.
312 * Returns 0 on nothing written, non-zero on quad word written.
314 static inline int carry_write8(struct pio_buf *pbuf, void __iomem *dest)
316 if (pbuf->carry_bytes) {
317 /* unused bytes are always kept zeroed, so just write */
318 writeq(pbuf->carry.val64, dest);
325 #else /* USE_SHIFTS */
327 * Handle carry bytes using byte copies.
329 * NOTE: the value the unused portion of carry is left uninitialized.
333 * Jump copy - no-loop copy for < 8 bytes.
335 static inline void jcopy(u8 *dest, const u8 *src, u32 n)
356 * Read nbytes from "from" and and place them in the low bytes
357 * of pbuf->carry. Other bytes are left as-is. Any previous
358 * value in pbuf->carry is lost.
361 * o do not read from from if nbytes is zero
362 * o from may _not_ be u64 aligned.
364 static inline void read_low_bytes(struct pio_buf *pbuf, const void *from,
367 jcopy(&pbuf->carry.val8[0], from, nbytes);
368 pbuf->carry_bytes = nbytes;
372 * Read nbytes bytes from "from" and put them at the end of pbuf->carry.
373 * It is expected that the extra read does not overfill carry.
376 * o from may _not_ be u64 aligned
377 * o nbytes may span a QW boundary
379 static inline void read_extra_bytes(struct pio_buf *pbuf,
380 const void *from, unsigned int nbytes)
382 jcopy(&pbuf->carry.val8[pbuf->carry_bytes], from, nbytes);
383 pbuf->carry_bytes += nbytes;
387 * Zero extra bytes from the end of pbuf->carry.
389 * We do not care about the value of unused bytes in carry, so just
390 * reduce the byte count.
393 * o zbytes <= old_bytes
395 static inline void zero_extra_bytes(struct pio_buf *pbuf, unsigned int zbytes)
397 pbuf->carry_bytes -= zbytes;
401 * Write a quad word using parts of pbuf->carry and the next 8 bytes of src.
402 * Put the unused part of the next 8 bytes of src into the low bytes of
405 static inline void merge_write8(
406 struct pio_buf *pbuf,
410 u32 remainder = 8 - pbuf->carry_bytes;
412 jcopy(&pbuf->carry.val8[pbuf->carry_bytes], src, remainder);
413 writeq(pbuf->carry.val64, dest);
414 jcopy(&pbuf->carry.val8[0], src+remainder, pbuf->carry_bytes);
418 * Write a quad word using all bytes of carry.
420 static inline void carry8_write8(union mix carry, void *dest)
422 writeq(carry.val64, dest);
426 * Write a quad word using all the valid bytes of carry. If carry
427 * has zero valid bytes, nothing is written.
428 * Returns 0 on nothing written, non-zero on quad word written.
430 static inline int carry_write8(struct pio_buf *pbuf, void *dest)
432 if (pbuf->carry_bytes) {
435 jcopy(&pbuf->carry.val8[pbuf->carry_bytes], (u8 *)&zero,
436 8 - pbuf->carry_bytes);
437 writeq(pbuf->carry.val64, dest);
443 #endif /* USE_SHIFTS */
446 * Segmented PIO Copy - start
450 * @pbuf: destination buffer
451 * @pbc: the PBC for the PIO buffer
452 * @from: data source, QWORD aligned
453 * @nbytes: bytes to copy
455 void seg_pio_copy_start(struct pio_buf *pbuf, u64 pbc,
456 const void *from, size_t nbytes)
458 void __iomem *dest = pbuf->start + SOP_DISTANCE;
459 void __iomem *send = dest + PIO_BLOCK_SIZE;
460 void __iomem *dend; /* 8-byte data end */
465 /* calculate where the QWORD data ends - in SOP=1 space */
466 dend = dest + ((nbytes>>3) * sizeof(u64));
469 /* all QWORD data is within the SOP block, does *not*
470 reach the end of the SOP block */
472 while (dest < dend) {
473 writeq(*(u64 *)from, dest);
478 * No boundary checks are needed here:
479 * 0. We're not on the SOP block boundary
480 * 1. The possible DWORD dangle will still be within
482 * 2. We cannot wrap except on a block boundary.
485 /* QWORD data extends _to_ or beyond the SOP block */
487 /* write 8-byte SOP chunk data */
488 while (dest < send) {
489 writeq(*(u64 *)from, dest);
493 /* drop out of the SOP range */
494 dest -= SOP_DISTANCE;
495 dend -= SOP_DISTANCE;
498 * If the wrap comes before or matches the data end,
499 * copy until until the wrap, then wrap.
501 * If the data ends at the end of the SOP above and
502 * the buffer wraps, then pbuf->end == dend == dest
503 * and nothing will get written, but we will wrap in
504 * case there is a dangling DWORD.
506 if (pbuf->end <= dend) {
507 while (dest < pbuf->end) {
508 writeq(*(u64 *)from, dest);
517 /* write 8-byte non-SOP, non-wrap chunk data */
518 while (dest < dend) {
519 writeq(*(u64 *)from, dest);
524 /* at this point we have wrapped if we are going to wrap */
526 /* ...but it doesn't matter as we're done writing */
528 /* save dangling bytes, if any */
529 read_low_bytes(pbuf, from, nbytes & 0x7);
531 pbuf->qw_written = 1 /*PBC*/ + (nbytes >> 3);
535 * Mid copy helper, "mixed case" - source is 64-bit aligned but carry
536 * bytes are non-zero.
538 * Whole u64s must be written to the chip, so bytes must be manually merged.
540 * @pbuf: destination buffer
541 * @from: data source, is QWORD aligned.
542 * @nbytes: bytes to copy
544 * Must handle nbytes < 8.
546 static void mid_copy_mix(struct pio_buf *pbuf, const void *from, size_t nbytes)
548 void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
549 void __iomem *dend; /* 8-byte data end */
550 unsigned long qw_to_write = (pbuf->carry_bytes + nbytes) >> 3;
551 unsigned long bytes_left = (pbuf->carry_bytes + nbytes) & 0x7;
553 /* calculate 8-byte data end */
554 dend = dest + (qw_to_write * sizeof(u64));
556 if (pbuf->qw_written < PIO_BLOCK_QWS) {
558 * Still within SOP block. We don't need to check for
559 * wrap because we are still in the first block and
560 * can only wrap on block boundaries.
562 void __iomem *send; /* SOP end */
565 /* calculate the end of data or end of block, whichever
567 send = pbuf->start + PIO_BLOCK_SIZE;
568 xend = min(send, dend);
570 /* shift up to SOP=1 space */
571 dest += SOP_DISTANCE;
572 xend += SOP_DISTANCE;
574 /* write 8-byte chunk data */
575 while (dest < xend) {
576 merge_write8(pbuf, dest, from);
581 /* shift down to SOP=0 space */
582 dest -= SOP_DISTANCE;
585 * At this point dest could be (either, both, or neither):
591 * If the wrap comes before or matches the data end,
592 * copy until until the wrap, then wrap.
594 * If dest is at the wrap, we will fall into the if,
595 * not do the loop, when wrap.
597 * If the data ends at the end of the SOP above and
598 * the buffer wraps, then pbuf->end == dend == dest
599 * and nothing will get written.
601 if (pbuf->end <= dend) {
602 while (dest < pbuf->end) {
603 merge_write8(pbuf, dest, from);
612 /* write 8-byte non-SOP, non-wrap chunk data */
613 while (dest < dend) {
614 merge_write8(pbuf, dest, from);
620 if (pbuf->carry_bytes < bytes_left) {
621 /* need to read more */
622 read_extra_bytes(pbuf, from, bytes_left - pbuf->carry_bytes);
624 /* remove invalid bytes */
625 zero_extra_bytes(pbuf, pbuf->carry_bytes - bytes_left);
628 pbuf->qw_written += qw_to_write;
632 * Mid copy helper, "straight case" - source pointer is 64-bit aligned
633 * with no carry bytes.
635 * @pbuf: destination buffer
636 * @from: data source, is QWORD aligned
637 * @nbytes: bytes to copy
639 * Must handle nbytes < 8.
641 static void mid_copy_straight(struct pio_buf *pbuf,
642 const void *from, size_t nbytes)
644 void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
645 void __iomem *dend; /* 8-byte data end */
647 /* calculate 8-byte data end */
648 dend = dest + ((nbytes>>3) * sizeof(u64));
650 if (pbuf->qw_written < PIO_BLOCK_QWS) {
652 * Still within SOP block. We don't need to check for
653 * wrap because we are still in the first block and
654 * can only wrap on block boundaries.
656 void __iomem *send; /* SOP end */
659 /* calculate the end of data or end of block, whichever
661 send = pbuf->start + PIO_BLOCK_SIZE;
662 xend = min(send, dend);
664 /* shift up to SOP=1 space */
665 dest += SOP_DISTANCE;
666 xend += SOP_DISTANCE;
668 /* write 8-byte chunk data */
669 while (dest < xend) {
670 writeq(*(u64 *)from, dest);
675 /* shift down to SOP=0 space */
676 dest -= SOP_DISTANCE;
679 * At this point dest could be (either, both, or neither):
685 * If the wrap comes before or matches the data end,
686 * copy until until the wrap, then wrap.
688 * If dest is at the wrap, we will fall into the if,
689 * not do the loop, when wrap.
691 * If the data ends at the end of the SOP above and
692 * the buffer wraps, then pbuf->end == dend == dest
693 * and nothing will get written.
695 if (pbuf->end <= dend) {
696 while (dest < pbuf->end) {
697 writeq(*(u64 *)from, dest);
706 /* write 8-byte non-SOP, non-wrap chunk data */
707 while (dest < dend) {
708 writeq(*(u64 *)from, dest);
713 /* we know carry_bytes was zero on entry to this routine */
714 read_low_bytes(pbuf, from, nbytes & 0x7);
716 pbuf->qw_written += nbytes>>3;
720 * Segmented PIO Copy - middle
722 * Must handle any aligned tail and any aligned source with any byte count.
724 * @pbuf: a number of blocks allocated within a PIO send context
726 * @nbytes: number of bytes to copy
728 void seg_pio_copy_mid(struct pio_buf *pbuf, const void *from, size_t nbytes)
730 unsigned long from_align = (unsigned long)from & 0x7;
732 if (pbuf->carry_bytes + nbytes < 8) {
733 /* not enough bytes to fill a QW */
734 read_extra_bytes(pbuf, from, nbytes);
739 /* misaligned source pointer - align it */
740 unsigned long to_align;
742 /* bytes to read to align "from" */
743 to_align = 8 - from_align;
746 * In the advance-to-alignment logic below, we do not need
747 * to check if we are using more than nbytes. This is because
748 * if we are here, we already know that carry+nbytes will
749 * fill at least one QW.
751 if (pbuf->carry_bytes + to_align < 8) {
752 /* not enough align bytes to fill a QW */
753 read_extra_bytes(pbuf, from, to_align);
757 /* bytes to fill carry */
758 unsigned long to_fill = 8 - pbuf->carry_bytes;
759 /* bytes left over to be read */
760 unsigned long extra = to_align - to_fill;
764 read_extra_bytes(pbuf, from, to_fill);
768 /* ...now write carry */
769 dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
772 * The two checks immediately below cannot both be
773 * true, hence the else. If we have wrapped, we
774 * cannot still be within the first block.
775 * Conversely, if we are still in the first block, we
776 * cannot have wrapped. We do the wrap check first
777 * as that is more likely.
779 /* adjust if we've wrapped */
780 if (dest >= pbuf->end)
782 /* jump to SOP range if within the first block */
783 else if (pbuf->qw_written < PIO_BLOCK_QWS)
784 dest += SOP_DISTANCE;
786 carry8_write8(pbuf->carry, dest);
789 /* read any extra bytes to do final alignment */
790 /* this will overwrite anything in pbuf->carry */
791 read_low_bytes(pbuf, from, extra);
796 /* at this point, from is QW aligned */
799 if (pbuf->carry_bytes)
800 mid_copy_mix(pbuf, from, nbytes);
802 mid_copy_straight(pbuf, from, nbytes);
806 * Segmented PIO Copy - end
808 * Write any remainder (in pbuf->carry) and finish writing the whole block.
810 * @pbuf: a number of blocks allocated within a PIO send context
812 void seg_pio_copy_end(struct pio_buf *pbuf)
814 void __iomem *dest = pbuf->start + (pbuf->qw_written * sizeof(u64));
817 * The two checks immediately below cannot both be true, hence the
818 * else. If we have wrapped, we cannot still be within the first
819 * block. Conversely, if we are still in the first block, we
820 * cannot have wrapped. We do the wrap check first as that is
823 /* adjust if we have wrapped */
824 if (dest >= pbuf->end)
826 /* jump to the SOP range if within the first block */
827 else if (pbuf->qw_written < PIO_BLOCK_QWS)
828 dest += SOP_DISTANCE;
830 /* write final bytes, if any */
831 if (carry_write8(pbuf, dest)) {
834 * NOTE: We do not need to recalculate whether dest needs
835 * SOP_DISTANCE or not.
837 * If we are in the first block and the dangle write
838 * keeps us in the same block, dest will need
839 * to retain SOP_DISTANCE in the loop below.
841 * If we are in the first block and the dangle write pushes
842 * us to the next block, then loop below will not run
843 * and dest is not used. Hence we do not need to update
846 * If we are past the first block, then SOP_DISTANCE
847 * was never added, so there is nothing to do.
851 /* fill in rest of block */
852 while (((unsigned long)dest & PIO_BLOCK_MASK) != 0) {
857 /* finished with this buffer */
858 this_cpu_dec(*pbuf->sc->buffers_allocated);