Commit | Line | Data |
---|---|---|
7974dd1b | 1 | /* |
43a70661 | 2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. |
7974dd1b BS |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
43a70661 BS |
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
20 | * DEALINGS IN THE SOFTWARE. | |
7974dd1b BS |
21 | */ |
22 | #include <core/tegra.h> | |
23 | #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER | |
24 | #include "priv.h" | |
25 | ||
b59fb482 TR |
26 | #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
27 | #include <asm/dma-iommu.h> | |
28 | #endif | |
29 | ||
43a70661 BS |
30 | static int |
31 | nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) | |
32 | { | |
33 | int ret; | |
34 | ||
e6e1817a AC |
35 | if (tdev->vdd) { |
36 | ret = regulator_enable(tdev->vdd); | |
37 | if (ret) | |
38 | goto err_power; | |
39 | } | |
43a70661 BS |
40 | |
41 | ret = clk_prepare_enable(tdev->clk); | |
42 | if (ret) | |
43 | goto err_clk; | |
34440ed6 AC |
44 | if (tdev->clk_ref) { |
45 | ret = clk_prepare_enable(tdev->clk_ref); | |
46 | if (ret) | |
47 | goto err_clk_ref; | |
48 | } | |
43a70661 BS |
49 | ret = clk_prepare_enable(tdev->clk_pwr); |
50 | if (ret) | |
51 | goto err_clk_pwr; | |
52 | clk_set_rate(tdev->clk_pwr, 204000000); | |
53 | udelay(10); | |
54 | ||
b1df2425 | 55 | if (!tdev->pdev->dev.pm_domain) { |
b0b651ae TR |
56 | reset_control_assert(tdev->rst); |
57 | udelay(10); | |
58 | ||
b1df2425 MP |
59 | ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); |
60 | if (ret) | |
61 | goto err_clamp; | |
62 | udelay(10); | |
43a70661 | 63 | |
b0b651ae TR |
64 | reset_control_deassert(tdev->rst); |
65 | udelay(10); | |
66 | } | |
43a70661 BS |
67 | |
68 | return 0; | |
69 | ||
70 | err_clamp: | |
71 | clk_disable_unprepare(tdev->clk_pwr); | |
72 | err_clk_pwr: | |
34440ed6 AC |
73 | if (tdev->clk_ref) |
74 | clk_disable_unprepare(tdev->clk_ref); | |
75 | err_clk_ref: | |
43a70661 BS |
76 | clk_disable_unprepare(tdev->clk); |
77 | err_clk: | |
e6e1817a AC |
78 | if (tdev->vdd) |
79 | regulator_disable(tdev->vdd); | |
43a70661 BS |
80 | err_power: |
81 | return ret; | |
82 | } | |
83 | ||
84 | static int | |
85 | nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) | |
86 | { | |
e6e1817a AC |
87 | int ret; |
88 | ||
43a70661 | 89 | clk_disable_unprepare(tdev->clk_pwr); |
34440ed6 AC |
90 | if (tdev->clk_ref) |
91 | clk_disable_unprepare(tdev->clk_ref); | |
43a70661 BS |
92 | clk_disable_unprepare(tdev->clk); |
93 | udelay(10); | |
94 | ||
e6e1817a AC |
95 | if (tdev->vdd) { |
96 | ret = regulator_disable(tdev->vdd); | |
97 | if (ret) | |
98 | return ret; | |
99 | } | |
100 | ||
101 | return 0; | |
43a70661 BS |
102 | } |
103 | ||
104 | static void | |
105 | nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) | |
106 | { | |
107 | #if IS_ENABLED(CONFIG_IOMMU_API) | |
108 | struct device *dev = &tdev->pdev->dev; | |
109 | unsigned long pgsize_bitmap; | |
110 | int ret; | |
111 | ||
b59fb482 TR |
112 | #if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) |
113 | if (dev->archdata.mapping) { | |
114 | struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); | |
115 | ||
116 | arm_iommu_detach_device(dev); | |
117 | arm_iommu_release_mapping(mapping); | |
118 | } | |
119 | #endif | |
120 | ||
e396ecd1 AC |
121 | if (!tdev->func->iommu_bit) |
122 | return; | |
123 | ||
43a70661 BS |
124 | mutex_init(&tdev->iommu.mutex); |
125 | ||
87fd2b09 | 126 | if (device_iommu_mapped(dev)) { |
43a70661 | 127 | tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); |
91cf301f | 128 | if (!tdev->iommu.domain) |
43a70661 BS |
129 | goto error; |
130 | ||
131 | /* | |
132 | * A IOMMU is only usable if it supports page sizes smaller | |
133 | * or equal to the system's PAGE_SIZE, with a preference if | |
134 | * both are equal. | |
135 | */ | |
7eef7f67 | 136 | pgsize_bitmap = tdev->iommu.domain->pgsize_bitmap; |
43a70661 BS |
137 | if (pgsize_bitmap & PAGE_SIZE) { |
138 | tdev->iommu.pgshift = PAGE_SHIFT; | |
139 | } else { | |
140 | tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); | |
141 | if (tdev->iommu.pgshift == 0) { | |
142 | dev_warn(dev, "unsupported IOMMU page size\n"); | |
143 | goto free_domain; | |
144 | } | |
145 | tdev->iommu.pgshift -= 1; | |
146 | } | |
147 | ||
148 | ret = iommu_attach_device(tdev->iommu.domain, dev); | |
149 | if (ret) | |
150 | goto free_domain; | |
151 | ||
4d058fab | 152 | ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0, |
e396ecd1 AC |
153 | (1ULL << tdev->func->iommu_bit) >> |
154 | tdev->iommu.pgshift, 1); | |
43a70661 BS |
155 | if (ret) |
156 | goto detach_device; | |
157 | } | |
158 | ||
159 | return; | |
160 | ||
161 | detach_device: | |
162 | iommu_detach_device(tdev->iommu.domain, dev); | |
163 | ||
164 | free_domain: | |
165 | iommu_domain_free(tdev->iommu.domain); | |
166 | ||
167 | error: | |
168 | tdev->iommu.domain = NULL; | |
169 | tdev->iommu.pgshift = 0; | |
170 | dev_err(dev, "cannot initialize IOMMU MM\n"); | |
171 | #endif | |
172 | } | |
173 | ||
174 | static void | |
175 | nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) | |
176 | { | |
177 | #if IS_ENABLED(CONFIG_IOMMU_API) | |
178 | if (tdev->iommu.domain) { | |
179 | nvkm_mm_fini(&tdev->iommu.mm); | |
180 | iommu_detach_device(tdev->iommu.domain, tdev->device.dev); | |
181 | iommu_domain_free(tdev->iommu.domain); | |
182 | } | |
183 | #endif | |
184 | } | |
185 | ||
7974dd1b | 186 | static struct nvkm_device_tegra * |
7e8820fe | 187 | nvkm_device_tegra(struct nvkm_device *device) |
7974dd1b | 188 | { |
7e8820fe BS |
189 | return container_of(device, struct nvkm_device_tegra, device); |
190 | } | |
191 | ||
192 | static struct resource * | |
193 | nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) | |
194 | { | |
195 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
196 | return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); | |
197 | } | |
198 | ||
199 | static resource_size_t | |
200 | nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) | |
201 | { | |
202 | struct resource *res = nvkm_device_tegra_resource(device, bar); | |
203 | return res ? res->start : 0; | |
204 | } | |
205 | ||
206 | static resource_size_t | |
207 | nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) | |
208 | { | |
209 | struct resource *res = nvkm_device_tegra_resource(device, bar); | |
210 | return res ? resource_size(res) : 0; | |
7974dd1b BS |
211 | } |
212 | ||
2b700825 BS |
213 | static irqreturn_t |
214 | nvkm_device_tegra_intr(int irq, void *arg) | |
215 | { | |
216 | struct nvkm_device_tegra *tdev = arg; | |
d3981190 | 217 | struct nvkm_device *device = &tdev->device; |
2b700825 | 218 | bool handled = false; |
d3981190 BS |
219 | nvkm_mc_intr_unarm(device); |
220 | nvkm_mc_intr(device, &handled); | |
221 | nvkm_mc_intr_rearm(device); | |
2b700825 BS |
222 | return handled ? IRQ_HANDLED : IRQ_NONE; |
223 | } | |
224 | ||
225 | static void | |
226 | nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) | |
227 | { | |
228 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
229 | if (tdev->irq) { | |
230 | free_irq(tdev->irq, tdev); | |
231 | tdev->irq = 0; | |
f5a5b523 | 232 | } |
2b700825 BS |
233 | } |
234 | ||
235 | static int | |
236 | nvkm_device_tegra_init(struct nvkm_device *device) | |
237 | { | |
238 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
239 | int irq, ret; | |
240 | ||
241 | irq = platform_get_irq_byname(tdev->pdev, "stall"); | |
242 | if (irq < 0) | |
243 | return irq; | |
244 | ||
245 | ret = request_irq(irq, nvkm_device_tegra_intr, | |
246 | IRQF_SHARED, "nvkm", tdev); | |
247 | if (ret) | |
248 | return ret; | |
249 | ||
250 | tdev->irq = irq; | |
251 | return 0; | |
252 | } | |
253 | ||
43a70661 BS |
254 | static void * |
255 | nvkm_device_tegra_dtor(struct nvkm_device *device) | |
256 | { | |
257 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
258 | nvkm_device_tegra_power_down(tdev); | |
259 | nvkm_device_tegra_remove_iommu(tdev); | |
260 | return tdev; | |
261 | } | |
262 | ||
7974dd1b BS |
263 | static const struct nvkm_device_func |
264 | nvkm_device_tegra_func = { | |
265 | .tegra = nvkm_device_tegra, | |
43a70661 | 266 | .dtor = nvkm_device_tegra_dtor, |
2b700825 BS |
267 | .init = nvkm_device_tegra_init, |
268 | .fini = nvkm_device_tegra_fini, | |
7e8820fe BS |
269 | .resource_addr = nvkm_device_tegra_resource_addr, |
270 | .resource_size = nvkm_device_tegra_resource_size, | |
bad3d80f | 271 | .cpu_coherent = false, |
7974dd1b BS |
272 | }; |
273 | ||
274 | int | |
e396ecd1 AC |
275 | nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, |
276 | struct platform_device *pdev, | |
7974dd1b BS |
277 | const char *cfg, const char *dbg, |
278 | bool detect, bool mmio, u64 subdev_mask, | |
279 | struct nvkm_device **pdevice) | |
280 | { | |
281 | struct nvkm_device_tegra *tdev; | |
fc12262b | 282 | unsigned long rate; |
43a70661 | 283 | int ret; |
7974dd1b BS |
284 | |
285 | if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) | |
286 | return -ENOMEM; | |
870571a5 | 287 | |
e396ecd1 | 288 | tdev->func = func; |
7974dd1b BS |
289 | tdev->pdev = pdev; |
290 | ||
e6e1817a AC |
291 | if (func->require_vdd) { |
292 | tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); | |
293 | if (IS_ERR(tdev->vdd)) { | |
294 | ret = PTR_ERR(tdev->vdd); | |
295 | goto free; | |
296 | } | |
870571a5 | 297 | } |
43a70661 BS |
298 | |
299 | tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); | |
870571a5 TR |
300 | if (IS_ERR(tdev->rst)) { |
301 | ret = PTR_ERR(tdev->rst); | |
302 | goto free; | |
303 | } | |
43a70661 BS |
304 | |
305 | tdev->clk = devm_clk_get(&pdev->dev, "gpu"); | |
870571a5 TR |
306 | if (IS_ERR(tdev->clk)) { |
307 | ret = PTR_ERR(tdev->clk); | |
308 | goto free; | |
309 | } | |
43a70661 | 310 | |
fc12262b TR |
311 | rate = clk_get_rate(tdev->clk); |
312 | if (rate == 0) { | |
313 | ret = clk_set_rate(tdev->clk, ULONG_MAX); | |
314 | if (ret < 0) | |
315 | goto free; | |
316 | ||
317 | rate = clk_get_rate(tdev->clk); | |
318 | ||
319 | dev_dbg(&pdev->dev, "GPU clock set to %lu\n", rate); | |
320 | } | |
321 | ||
34440ed6 AC |
322 | if (func->require_ref_clk) |
323 | tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); | |
324 | if (IS_ERR(tdev->clk_ref)) { | |
325 | ret = PTR_ERR(tdev->clk_ref); | |
326 | goto free; | |
327 | } | |
328 | ||
43a70661 | 329 | tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); |
870571a5 TR |
330 | if (IS_ERR(tdev->clk_pwr)) { |
331 | ret = PTR_ERR(tdev->clk_pwr); | |
332 | goto free; | |
333 | } | |
43a70661 | 334 | |
9d0394c6 AC |
335 | /** |
336 | * The IOMMU bit defines the upper limit of the GPU-addressable space. | |
9d0394c6 AC |
337 | */ |
338 | ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit)); | |
339 | if (ret) | |
340 | goto free; | |
341 | ||
43a70661 BS |
342 | nvkm_device_tegra_probe_iommu(tdev); |
343 | ||
344 | ret = nvkm_device_tegra_power_up(tdev); | |
345 | if (ret) | |
870571a5 | 346 | goto remove; |
43a70661 BS |
347 | |
348 | tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; | |
d2680907 | 349 | tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; |
43a70661 BS |
350 | ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, |
351 | NVKM_DEVICE_TEGRA, pdev->id, NULL, | |
352 | cfg, dbg, detect, mmio, subdev_mask, | |
353 | &tdev->device); | |
354 | if (ret) | |
870571a5 TR |
355 | goto powerdown; |
356 | ||
357 | *pdevice = &tdev->device; | |
43a70661 BS |
358 | |
359 | return 0; | |
870571a5 TR |
360 | |
361 | powerdown: | |
362 | nvkm_device_tegra_power_down(tdev); | |
363 | remove: | |
364 | nvkm_device_tegra_remove_iommu(tdev); | |
365 | free: | |
366 | kfree(tdev); | |
367 | return ret; | |
7974dd1b BS |
368 | } |
369 | #else | |
370 | int | |
e396ecd1 AC |
371 | nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, |
372 | struct platform_device *pdev, | |
7974dd1b BS |
373 | const char *cfg, const char *dbg, |
374 | bool detect, bool mmio, u64 subdev_mask, | |
375 | struct nvkm_device **pdevice) | |
376 | { | |
377 | return -ENOSYS; | |
378 | } | |
379 | #endif |