Commit | Line | Data |
---|---|---|
7974dd1b | 1 | /* |
43a70661 | 2 | * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved. |
7974dd1b BS |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
43a70661 BS |
17 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
19 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
20 | * DEALINGS IN THE SOFTWARE. | |
7974dd1b BS |
21 | */ |
22 | #include <core/tegra.h> | |
23 | #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER | |
24 | #include "priv.h" | |
25 | ||
43a70661 BS |
26 | static int |
27 | nvkm_device_tegra_power_up(struct nvkm_device_tegra *tdev) | |
28 | { | |
29 | int ret; | |
30 | ||
e6e1817a AC |
31 | if (tdev->vdd) { |
32 | ret = regulator_enable(tdev->vdd); | |
33 | if (ret) | |
34 | goto err_power; | |
35 | } | |
43a70661 BS |
36 | |
37 | ret = clk_prepare_enable(tdev->clk); | |
38 | if (ret) | |
39 | goto err_clk; | |
34440ed6 AC |
40 | if (tdev->clk_ref) { |
41 | ret = clk_prepare_enable(tdev->clk_ref); | |
42 | if (ret) | |
43 | goto err_clk_ref; | |
44 | } | |
43a70661 BS |
45 | ret = clk_prepare_enable(tdev->clk_pwr); |
46 | if (ret) | |
47 | goto err_clk_pwr; | |
48 | clk_set_rate(tdev->clk_pwr, 204000000); | |
49 | udelay(10); | |
50 | ||
51 | reset_control_assert(tdev->rst); | |
52 | udelay(10); | |
53 | ||
b1df2425 MP |
54 | if (!tdev->pdev->dev.pm_domain) { |
55 | ret = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D); | |
56 | if (ret) | |
57 | goto err_clamp; | |
58 | udelay(10); | |
59 | } | |
43a70661 BS |
60 | |
61 | reset_control_deassert(tdev->rst); | |
62 | udelay(10); | |
63 | ||
64 | return 0; | |
65 | ||
66 | err_clamp: | |
67 | clk_disable_unprepare(tdev->clk_pwr); | |
68 | err_clk_pwr: | |
34440ed6 AC |
69 | if (tdev->clk_ref) |
70 | clk_disable_unprepare(tdev->clk_ref); | |
71 | err_clk_ref: | |
43a70661 BS |
72 | clk_disable_unprepare(tdev->clk); |
73 | err_clk: | |
e6e1817a AC |
74 | if (tdev->vdd) |
75 | regulator_disable(tdev->vdd); | |
43a70661 BS |
76 | err_power: |
77 | return ret; | |
78 | } | |
79 | ||
80 | static int | |
81 | nvkm_device_tegra_power_down(struct nvkm_device_tegra *tdev) | |
82 | { | |
e6e1817a AC |
83 | int ret; |
84 | ||
43a70661 | 85 | clk_disable_unprepare(tdev->clk_pwr); |
34440ed6 AC |
86 | if (tdev->clk_ref) |
87 | clk_disable_unprepare(tdev->clk_ref); | |
43a70661 BS |
88 | clk_disable_unprepare(tdev->clk); |
89 | udelay(10); | |
90 | ||
e6e1817a AC |
91 | if (tdev->vdd) { |
92 | ret = regulator_disable(tdev->vdd); | |
93 | if (ret) | |
94 | return ret; | |
95 | } | |
96 | ||
97 | return 0; | |
43a70661 BS |
98 | } |
99 | ||
100 | static void | |
101 | nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev) | |
102 | { | |
103 | #if IS_ENABLED(CONFIG_IOMMU_API) | |
104 | struct device *dev = &tdev->pdev->dev; | |
105 | unsigned long pgsize_bitmap; | |
106 | int ret; | |
107 | ||
e396ecd1 AC |
108 | if (!tdev->func->iommu_bit) |
109 | return; | |
110 | ||
43a70661 BS |
111 | mutex_init(&tdev->iommu.mutex); |
112 | ||
113 | if (iommu_present(&platform_bus_type)) { | |
114 | tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type); | |
91cf301f | 115 | if (!tdev->iommu.domain) |
43a70661 BS |
116 | goto error; |
117 | ||
118 | /* | |
119 | * A IOMMU is only usable if it supports page sizes smaller | |
120 | * or equal to the system's PAGE_SIZE, with a preference if | |
121 | * both are equal. | |
122 | */ | |
123 | pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap; | |
124 | if (pgsize_bitmap & PAGE_SIZE) { | |
125 | tdev->iommu.pgshift = PAGE_SHIFT; | |
126 | } else { | |
127 | tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK); | |
128 | if (tdev->iommu.pgshift == 0) { | |
129 | dev_warn(dev, "unsupported IOMMU page size\n"); | |
130 | goto free_domain; | |
131 | } | |
132 | tdev->iommu.pgshift -= 1; | |
133 | } | |
134 | ||
135 | ret = iommu_attach_device(tdev->iommu.domain, dev); | |
136 | if (ret) | |
137 | goto free_domain; | |
138 | ||
139 | ret = nvkm_mm_init(&tdev->iommu.mm, 0, | |
e396ecd1 AC |
140 | (1ULL << tdev->func->iommu_bit) >> |
141 | tdev->iommu.pgshift, 1); | |
43a70661 BS |
142 | if (ret) |
143 | goto detach_device; | |
144 | } | |
145 | ||
146 | return; | |
147 | ||
148 | detach_device: | |
149 | iommu_detach_device(tdev->iommu.domain, dev); | |
150 | ||
151 | free_domain: | |
152 | iommu_domain_free(tdev->iommu.domain); | |
153 | ||
154 | error: | |
155 | tdev->iommu.domain = NULL; | |
156 | tdev->iommu.pgshift = 0; | |
157 | dev_err(dev, "cannot initialize IOMMU MM\n"); | |
158 | #endif | |
159 | } | |
160 | ||
161 | static void | |
162 | nvkm_device_tegra_remove_iommu(struct nvkm_device_tegra *tdev) | |
163 | { | |
164 | #if IS_ENABLED(CONFIG_IOMMU_API) | |
165 | if (tdev->iommu.domain) { | |
166 | nvkm_mm_fini(&tdev->iommu.mm); | |
167 | iommu_detach_device(tdev->iommu.domain, tdev->device.dev); | |
168 | iommu_domain_free(tdev->iommu.domain); | |
169 | } | |
170 | #endif | |
171 | } | |
172 | ||
7974dd1b | 173 | static struct nvkm_device_tegra * |
7e8820fe | 174 | nvkm_device_tegra(struct nvkm_device *device) |
7974dd1b | 175 | { |
7e8820fe BS |
176 | return container_of(device, struct nvkm_device_tegra, device); |
177 | } | |
178 | ||
179 | static struct resource * | |
180 | nvkm_device_tegra_resource(struct nvkm_device *device, unsigned bar) | |
181 | { | |
182 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
183 | return platform_get_resource(tdev->pdev, IORESOURCE_MEM, bar); | |
184 | } | |
185 | ||
186 | static resource_size_t | |
187 | nvkm_device_tegra_resource_addr(struct nvkm_device *device, unsigned bar) | |
188 | { | |
189 | struct resource *res = nvkm_device_tegra_resource(device, bar); | |
190 | return res ? res->start : 0; | |
191 | } | |
192 | ||
193 | static resource_size_t | |
194 | nvkm_device_tegra_resource_size(struct nvkm_device *device, unsigned bar) | |
195 | { | |
196 | struct resource *res = nvkm_device_tegra_resource(device, bar); | |
197 | return res ? resource_size(res) : 0; | |
7974dd1b BS |
198 | } |
199 | ||
2b700825 BS |
200 | static irqreturn_t |
201 | nvkm_device_tegra_intr(int irq, void *arg) | |
202 | { | |
203 | struct nvkm_device_tegra *tdev = arg; | |
d3981190 | 204 | struct nvkm_device *device = &tdev->device; |
2b700825 | 205 | bool handled = false; |
d3981190 BS |
206 | nvkm_mc_intr_unarm(device); |
207 | nvkm_mc_intr(device, &handled); | |
208 | nvkm_mc_intr_rearm(device); | |
2b700825 BS |
209 | return handled ? IRQ_HANDLED : IRQ_NONE; |
210 | } | |
211 | ||
212 | static void | |
213 | nvkm_device_tegra_fini(struct nvkm_device *device, bool suspend) | |
214 | { | |
215 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
216 | if (tdev->irq) { | |
217 | free_irq(tdev->irq, tdev); | |
218 | tdev->irq = 0; | |
219 | }; | |
220 | } | |
221 | ||
222 | static int | |
223 | nvkm_device_tegra_init(struct nvkm_device *device) | |
224 | { | |
225 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
226 | int irq, ret; | |
227 | ||
228 | irq = platform_get_irq_byname(tdev->pdev, "stall"); | |
229 | if (irq < 0) | |
230 | return irq; | |
231 | ||
232 | ret = request_irq(irq, nvkm_device_tegra_intr, | |
233 | IRQF_SHARED, "nvkm", tdev); | |
234 | if (ret) | |
235 | return ret; | |
236 | ||
237 | tdev->irq = irq; | |
238 | return 0; | |
239 | } | |
240 | ||
43a70661 BS |
241 | static void * |
242 | nvkm_device_tegra_dtor(struct nvkm_device *device) | |
243 | { | |
244 | struct nvkm_device_tegra *tdev = nvkm_device_tegra(device); | |
245 | nvkm_device_tegra_power_down(tdev); | |
246 | nvkm_device_tegra_remove_iommu(tdev); | |
247 | return tdev; | |
248 | } | |
249 | ||
7974dd1b BS |
250 | static const struct nvkm_device_func |
251 | nvkm_device_tegra_func = { | |
252 | .tegra = nvkm_device_tegra, | |
43a70661 | 253 | .dtor = nvkm_device_tegra_dtor, |
2b700825 BS |
254 | .init = nvkm_device_tegra_init, |
255 | .fini = nvkm_device_tegra_fini, | |
7e8820fe BS |
256 | .resource_addr = nvkm_device_tegra_resource_addr, |
257 | .resource_size = nvkm_device_tegra_resource_size, | |
bad3d80f | 258 | .cpu_coherent = false, |
7974dd1b BS |
259 | }; |
260 | ||
261 | int | |
e396ecd1 AC |
262 | nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, |
263 | struct platform_device *pdev, | |
7974dd1b BS |
264 | const char *cfg, const char *dbg, |
265 | bool detect, bool mmio, u64 subdev_mask, | |
266 | struct nvkm_device **pdevice) | |
267 | { | |
268 | struct nvkm_device_tegra *tdev; | |
43a70661 | 269 | int ret; |
7974dd1b BS |
270 | |
271 | if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) | |
272 | return -ENOMEM; | |
870571a5 | 273 | |
e396ecd1 | 274 | tdev->func = func; |
7974dd1b BS |
275 | tdev->pdev = pdev; |
276 | ||
e6e1817a AC |
277 | if (func->require_vdd) { |
278 | tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); | |
279 | if (IS_ERR(tdev->vdd)) { | |
280 | ret = PTR_ERR(tdev->vdd); | |
281 | goto free; | |
282 | } | |
870571a5 | 283 | } |
43a70661 BS |
284 | |
285 | tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); | |
870571a5 TR |
286 | if (IS_ERR(tdev->rst)) { |
287 | ret = PTR_ERR(tdev->rst); | |
288 | goto free; | |
289 | } | |
43a70661 BS |
290 | |
291 | tdev->clk = devm_clk_get(&pdev->dev, "gpu"); | |
870571a5 TR |
292 | if (IS_ERR(tdev->clk)) { |
293 | ret = PTR_ERR(tdev->clk); | |
294 | goto free; | |
295 | } | |
43a70661 | 296 | |
34440ed6 AC |
297 | if (func->require_ref_clk) |
298 | tdev->clk_ref = devm_clk_get(&pdev->dev, "ref"); | |
299 | if (IS_ERR(tdev->clk_ref)) { | |
300 | ret = PTR_ERR(tdev->clk_ref); | |
301 | goto free; | |
302 | } | |
303 | ||
43a70661 | 304 | tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); |
870571a5 TR |
305 | if (IS_ERR(tdev->clk_pwr)) { |
306 | ret = PTR_ERR(tdev->clk_pwr); | |
307 | goto free; | |
308 | } | |
43a70661 | 309 | |
9d0394c6 AC |
310 | /** |
311 | * The IOMMU bit defines the upper limit of the GPU-addressable space. | |
312 | * This will be refined in nouveau_ttm_init but we need to do it early | |
313 | * for instmem to behave properly | |
314 | */ | |
315 | ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(tdev->func->iommu_bit)); | |
316 | if (ret) | |
317 | goto free; | |
318 | ||
43a70661 BS |
319 | nvkm_device_tegra_probe_iommu(tdev); |
320 | ||
321 | ret = nvkm_device_tegra_power_up(tdev); | |
322 | if (ret) | |
870571a5 | 323 | goto remove; |
43a70661 BS |
324 | |
325 | tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; | |
d2680907 | 326 | tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; |
43a70661 BS |
327 | ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, |
328 | NVKM_DEVICE_TEGRA, pdev->id, NULL, | |
329 | cfg, dbg, detect, mmio, subdev_mask, | |
330 | &tdev->device); | |
331 | if (ret) | |
870571a5 TR |
332 | goto powerdown; |
333 | ||
334 | *pdevice = &tdev->device; | |
43a70661 BS |
335 | |
336 | return 0; | |
870571a5 TR |
337 | |
338 | powerdown: | |
339 | nvkm_device_tegra_power_down(tdev); | |
340 | remove: | |
341 | nvkm_device_tegra_remove_iommu(tdev); | |
342 | free: | |
343 | kfree(tdev); | |
344 | return ret; | |
7974dd1b BS |
345 | } |
346 | #else | |
347 | int | |
e396ecd1 AC |
348 | nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, |
349 | struct platform_device *pdev, | |
7974dd1b BS |
350 | const char *cfg, const char *dbg, |
351 | bool detect, bool mmio, u64 subdev_mask, | |
352 | struct nvkm_device **pdevice) | |
353 | { | |
354 | return -ENOSYS; | |
355 | } | |
356 | #endif |