IB/rdmavt: Add srq functionality to rdmavt
[linux-2.6-block.git] / drivers / infiniband / sw / rdmavt / dma.c
CommitLineData
c1b332bc 1/*
fe314195 2 * Copyright(c) 2016 Intel Corporation.
c1b332bc
DD
3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
6 *
7 * GPL LICENSE SUMMARY
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * BSD LICENSE
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
22 * are met:
23 *
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
29 * distribution.
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 *
46 */
47#include <linux/types.h>
48#include <linux/scatterlist.h>
49#include <rdma/ib_verbs.h>
50
51#include "dma.h"
52
53#define BAD_DMA_ADDRESS ((u64)0)
54
55/*
56 * The following functions implement driver specific replacements
57 * for the ib_dma_*() functions.
58 *
59 * These functions return kernel virtual addresses instead of
60 * device bus addresses since the driver uses the CPU to copy
61 * data instead of using hardware DMA.
62 */
63
64static int rvt_mapping_error(struct ib_device *dev, u64 dma_addr)
65{
66 return dma_addr == BAD_DMA_ADDRESS;
67}
68
69static u64 rvt_dma_map_single(struct ib_device *dev, void *cpu_addr,
70 size_t size, enum dma_data_direction direction)
71{
72 if (WARN_ON(!valid_dma_direction(direction)))
73 return BAD_DMA_ADDRESS;
74
75 return (u64)cpu_addr;
76}
77
78static void rvt_dma_unmap_single(struct ib_device *dev, u64 addr, size_t size,
79 enum dma_data_direction direction)
80{
81 /* This is a stub, nothing to be done here */
82}
83
84static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page,
85 unsigned long offset, size_t size,
86 enum dma_data_direction direction)
87{
88 u64 addr;
89
90 if (WARN_ON(!valid_dma_direction(direction)))
91 return BAD_DMA_ADDRESS;
92
93 if (offset + size > PAGE_SIZE)
94 return BAD_DMA_ADDRESS;
95
96 addr = (u64)page_address(page);
97 if (addr)
98 addr += offset;
99
100 return addr;
101}
102
103static void rvt_dma_unmap_page(struct ib_device *dev, u64 addr, size_t size,
104 enum dma_data_direction direction)
105{
106 /* This is a stub, nothing to be done here */
107}
108
109static int rvt_map_sg(struct ib_device *dev, struct scatterlist *sgl,
110 int nents, enum dma_data_direction direction)
111{
112 struct scatterlist *sg;
113 u64 addr;
114 int i;
115 int ret = nents;
116
117 if (WARN_ON(!valid_dma_direction(direction)))
118 return 0;
119
120 for_each_sg(sgl, sg, nents, i) {
121 addr = (u64)page_address(sg_page(sg));
122 if (!addr) {
123 ret = 0;
124 break;
125 }
126 sg->dma_address = addr + sg->offset;
127#ifdef CONFIG_NEED_SG_DMA_LENGTH
128 sg->dma_length = sg->length;
129#endif
130 }
131 return ret;
132}
133
134static void rvt_unmap_sg(struct ib_device *dev,
135 struct scatterlist *sg, int nents,
136 enum dma_data_direction direction)
137{
138 /* This is a stub, nothing to be done here */
139}
140
141static void rvt_sync_single_for_cpu(struct ib_device *dev, u64 addr,
142 size_t size, enum dma_data_direction dir)
143{
144}
145
146static void rvt_sync_single_for_device(struct ib_device *dev, u64 addr,
147 size_t size,
148 enum dma_data_direction dir)
149{
150}
151
152static void *rvt_dma_alloc_coherent(struct ib_device *dev, size_t size,
153 u64 *dma_handle, gfp_t flag)
154{
155 struct page *p;
156 void *addr = NULL;
157
158 p = alloc_pages(flag, get_order(size));
159 if (p)
160 addr = page_address(p);
161 if (dma_handle)
162 *dma_handle = (u64)addr;
163 return addr;
164}
165
166static void rvt_dma_free_coherent(struct ib_device *dev, size_t size,
167 void *cpu_addr, u64 dma_handle)
168{
169 free_pages((unsigned long)cpu_addr, get_order(size));
170}
171
172struct ib_dma_mapping_ops rvt_default_dma_mapping_ops = {
173 .mapping_error = rvt_mapping_error,
174 .map_single = rvt_dma_map_single,
175 .unmap_single = rvt_dma_unmap_single,
176 .map_page = rvt_dma_map_page,
177 .unmap_page = rvt_dma_unmap_page,
178 .map_sg = rvt_map_sg,
179 .unmap_sg = rvt_unmap_sg,
180 .sync_single_for_cpu = rvt_sync_single_for_cpu,
181 .sync_single_for_device = rvt_sync_single_for_device,
182 .alloc_coherent = rvt_dma_alloc_coherent,
183 .free_coherent = rvt_dma_free_coherent
184};