2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
23 #include <linux/clk.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
28 #include <linux/reset.h>
29 #include <linux/regulator/consumer.h>
30 #include <linux/iommu.h>
31 #include <soc/tegra/fuse.h>
32 #include <soc/tegra/pmc.h>
34 #include "nouveau_drm.h"
35 #include "nouveau_platform.h"
37 static int nouveau_platform_power_up(struct nouveau_platform_gpu *gpu)
41 err = regulator_enable(gpu->vdd);
45 err = clk_prepare_enable(gpu->clk);
48 err = clk_prepare_enable(gpu->clk_pwr);
51 clk_set_rate(gpu->clk_pwr, 204000000);
54 reset_control_assert(gpu->rst);
57 err = tegra_powergate_remove_clamping(TEGRA_POWERGATE_3D);
62 reset_control_deassert(gpu->rst);
68 clk_disable_unprepare(gpu->clk_pwr);
70 clk_disable_unprepare(gpu->clk);
72 regulator_disable(gpu->vdd);
77 static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
81 reset_control_assert(gpu->rst);
84 clk_disable_unprepare(gpu->clk_pwr);
85 clk_disable_unprepare(gpu->clk);
88 err = regulator_disable(gpu->vdd);
95 static void nouveau_platform_probe_iommu(struct device *dev,
96 struct nouveau_platform_gpu *gpu)
99 unsigned long pgsize_bitmap;
101 mutex_init(&gpu->iommu.mutex);
103 if (iommu_present(&platform_bus_type)) {
104 gpu->iommu.domain = iommu_domain_alloc(&platform_bus_type);
105 if (IS_ERR(gpu->iommu.domain))
109 * A IOMMU is only usable if it supports page sizes smaller
110 * or equal to the system's PAGE_SIZE, with a preference if
113 pgsize_bitmap = gpu->iommu.domain->ops->pgsize_bitmap;
114 if (pgsize_bitmap & PAGE_SIZE) {
115 gpu->iommu.pgshift = PAGE_SHIFT;
117 gpu->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
118 if (gpu->iommu.pgshift == 0) {
119 dev_warn(dev, "unsupported IOMMU page size\n");
122 gpu->iommu.pgshift -= 1;
125 err = iommu_attach_device(gpu->iommu.domain, dev);
129 err = nvkm_mm_init(&gpu->iommu._mm, 0,
130 (1ULL << 40) >> gpu->iommu.pgshift, 1);
134 gpu->iommu.mm = &gpu->iommu._mm;
140 iommu_detach_device(gpu->iommu.domain, dev);
143 iommu_domain_free(gpu->iommu.domain);
146 gpu->iommu.domain = NULL;
147 gpu->iommu.pgshift = 0;
148 dev_err(dev, "cannot initialize IOMMU MM\n");
151 static void nouveau_platform_remove_iommu(struct device *dev,
152 struct nouveau_platform_gpu *gpu)
154 if (gpu->iommu.domain) {
155 nvkm_mm_fini(&gpu->iommu._mm);
156 iommu_detach_device(gpu->iommu.domain, dev);
157 iommu_domain_free(gpu->iommu.domain);
161 static int nouveau_platform_probe(struct platform_device *pdev)
163 struct nouveau_platform_gpu *gpu;
164 struct nouveau_platform_device *device;
165 struct drm_device *drm;
168 gpu = devm_kzalloc(&pdev->dev, sizeof(*gpu), GFP_KERNEL);
172 gpu->vdd = devm_regulator_get(&pdev->dev, "vdd");
173 if (IS_ERR(gpu->vdd))
174 return PTR_ERR(gpu->vdd);
176 gpu->rst = devm_reset_control_get(&pdev->dev, "gpu");
177 if (IS_ERR(gpu->rst))
178 return PTR_ERR(gpu->rst);
180 gpu->clk = devm_clk_get(&pdev->dev, "gpu");
181 if (IS_ERR(gpu->clk))
182 return PTR_ERR(gpu->clk);
184 gpu->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
185 if (IS_ERR(gpu->clk_pwr))
186 return PTR_ERR(gpu->clk_pwr);
188 nouveau_platform_probe_iommu(&pdev->dev, gpu);
190 err = nouveau_platform_power_up(gpu);
194 drm = nouveau_platform_device_create(pdev, &device);
201 device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
203 err = drm_dev_register(drm, 0);
213 nouveau_platform_power_down(gpu);
214 nouveau_platform_remove_iommu(&pdev->dev, gpu);
219 static int nouveau_platform_remove(struct platform_device *pdev)
221 struct drm_device *drm_dev = platform_get_drvdata(pdev);
222 struct nouveau_drm *drm = nouveau_drm(drm_dev);
223 struct nvkm_device *device = nvxx_device(&drm->device);
224 struct nouveau_platform_gpu *gpu = nv_device_to_platform(device)->gpu;
227 nouveau_drm_device_remove(drm_dev);
229 err = nouveau_platform_power_down(gpu);
231 nouveau_platform_remove_iommu(&pdev->dev, gpu);
236 #if IS_ENABLED(CONFIG_OF)
237 static const struct of_device_id nouveau_platform_match[] = {
238 { .compatible = "nvidia,gk20a" },
242 MODULE_DEVICE_TABLE(of, nouveau_platform_match);
245 struct platform_driver nouveau_platform_driver = {
248 .of_match_table = of_match_ptr(nouveau_platform_match),
250 .probe = nouveau_platform_probe,
251 .remove = nouveau_platform_remove,