2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
26 #ifdef CONFIG_MSM_BUS_SCALING
27 #include <mach/board.h>
28 #include <mach/kgsl.h>
29 static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
31 struct drm_device *dev = gpu->dev;
32 struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
35 dev_err(dev->dev, "could not find dtv pdata\n");
39 if (pdata->bus_scale_table) {
40 gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
41 DBG("bus scale client: %08x", gpu->bsc);
45 static void bs_fini(struct msm_gpu *gpu)
48 msm_bus_scale_unregister_client(gpu->bsc);
53 static void bs_set(struct msm_gpu *gpu, int idx)
56 DBG("set bus scaling: %d", idx);
57 msm_bus_scale_client_update_request(gpu->bsc, idx);
61 static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
62 static void bs_fini(struct msm_gpu *gpu) {}
63 static void bs_set(struct msm_gpu *gpu, int idx) {}
66 static int enable_pwrrail(struct msm_gpu *gpu)
68 struct drm_device *dev = gpu->dev;
72 ret = regulator_enable(gpu->gpu_reg);
74 dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
80 ret = regulator_enable(gpu->gpu_cx);
82 dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
90 static int disable_pwrrail(struct msm_gpu *gpu)
93 regulator_disable(gpu->gpu_cx);
95 regulator_disable(gpu->gpu_reg);
99 static int enable_clk(struct msm_gpu *gpu)
101 struct clk *rate_clk = NULL;
104 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
105 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
106 if (gpu->grp_clks[i]) {
107 clk_prepare(gpu->grp_clks[i]);
108 rate_clk = gpu->grp_clks[i];
112 if (rate_clk && gpu->fast_rate)
113 clk_set_rate(rate_clk, gpu->fast_rate);
115 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
116 if (gpu->grp_clks[i])
117 clk_enable(gpu->grp_clks[i]);
122 static int disable_clk(struct msm_gpu *gpu)
124 struct clk *rate_clk = NULL;
127 /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
128 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
129 if (gpu->grp_clks[i]) {
130 clk_disable(gpu->grp_clks[i]);
131 rate_clk = gpu->grp_clks[i];
135 if (rate_clk && gpu->slow_rate)
136 clk_set_rate(rate_clk, gpu->slow_rate);
138 for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
139 if (gpu->grp_clks[i])
140 clk_unprepare(gpu->grp_clks[i]);
145 static int enable_axi(struct msm_gpu *gpu)
148 clk_prepare_enable(gpu->ebi1_clk);
150 bs_set(gpu, gpu->bus_freq);
154 static int disable_axi(struct msm_gpu *gpu)
157 clk_disable_unprepare(gpu->ebi1_clk);
163 int msm_gpu_pm_resume(struct msm_gpu *gpu)
167 DBG("%s", gpu->name);
169 ret = enable_pwrrail(gpu);
173 ret = enable_clk(gpu);
177 ret = enable_axi(gpu);
184 int msm_gpu_pm_suspend(struct msm_gpu *gpu)
188 DBG("%s", gpu->name);
190 ret = disable_axi(gpu);
194 ret = disable_clk(gpu);
198 ret = disable_pwrrail(gpu);
206 * Cmdstream submission/retirement:
209 static void retire_worker(struct work_struct *work)
211 struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
212 struct drm_device *dev = gpu->dev;
213 uint32_t fence = gpu->funcs->last_fence(gpu);
215 mutex_lock(&dev->struct_mutex);
217 while (!list_empty(&gpu->active_list)) {
218 struct msm_gem_object *obj;
220 obj = list_first_entry(&gpu->active_list,
221 struct msm_gem_object, mm_list);
223 if (obj->fence <= fence) {
224 /* move to inactive: */
225 msm_gem_move_to_inactive(&obj->base);
226 msm_gem_put_iova(&obj->base, gpu->id);
227 drm_gem_object_unreference(&obj->base);
233 msm_update_fence(gpu->dev, fence);
235 mutex_unlock(&dev->struct_mutex);
238 /* call from irq handler to schedule work to retire bo's */
239 void msm_gpu_retire(struct msm_gpu *gpu)
241 struct msm_drm_private *priv = gpu->dev->dev_private;
242 queue_work(priv->wq, &gpu->retire_work);
245 /* add bo's to gpu's ring, and kick gpu: */
246 int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
247 struct msm_file_private *ctx)
249 struct drm_device *dev = gpu->dev;
250 struct msm_drm_private *priv = dev->dev_private;
253 mutex_lock(&dev->struct_mutex);
255 submit->fence = ++priv->next_fence;
257 ret = gpu->funcs->submit(gpu, submit, ctx);
260 for (i = 0; i < submit->nr_bos; i++) {
261 struct msm_gem_object *msm_obj = submit->bos[i].obj;
263 /* can't happen yet.. but when we add 2d support we'll have
264 * to deal w/ cross-ring synchronization:
266 WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
268 if (!is_active(msm_obj)) {
271 /* ring takes a reference to the bo and iova: */
272 drm_gem_object_reference(&msm_obj->base);
273 msm_gem_get_iova_locked(&msm_obj->base,
274 submit->gpu->id, &iova);
277 msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
279 mutex_unlock(&dev->struct_mutex);
288 static irqreturn_t irq_handler(int irq, void *data)
290 struct msm_gpu *gpu = data;
291 return gpu->funcs->irq(gpu);
294 static const char *clk_names[] = {
295 "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
298 int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
299 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
300 const char *name, const char *ioname, const char *irqname, int ringsz)
308 INIT_LIST_HEAD(&gpu->active_list);
309 INIT_WORK(&gpu->retire_work, retire_worker);
311 BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
314 gpu->mmio = msm_ioremap(pdev, ioname, name);
315 if (IS_ERR(gpu->mmio)) {
316 ret = PTR_ERR(gpu->mmio);
321 gpu->irq = platform_get_irq_byname(pdev, irqname);
324 dev_err(drm->dev, "failed to get irq: %d\n", ret);
328 ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
329 IRQF_TRIGGER_HIGH, gpu->name, gpu);
331 dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
335 /* Acquire clocks: */
336 for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
337 gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
338 DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
339 if (IS_ERR(gpu->grp_clks[i]))
340 gpu->grp_clks[i] = NULL;
343 gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
344 DBG("ebi1_clk: %p", gpu->ebi1_clk);
345 if (IS_ERR(gpu->ebi1_clk))
346 gpu->ebi1_clk = NULL;
348 /* Acquire regulators: */
349 gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
350 DBG("gpu_reg: %p", gpu->gpu_reg);
351 if (IS_ERR(gpu->gpu_reg))
354 gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
355 DBG("gpu_cx: %p", gpu->gpu_cx);
356 if (IS_ERR(gpu->gpu_cx))
359 /* Setup IOMMU.. eventually we will (I think) do this once per context
360 * and have separate page tables per context. For now, to keep things
361 * simple and to get something working, just use a single address space:
363 gpu->iommu = iommu_domain_alloc(&platform_bus_type);
365 dev_err(drm->dev, "failed to allocate IOMMU\n");
369 gpu->id = msm_register_iommu(drm, gpu->iommu);
371 /* Create ringbuffer: */
372 gpu->rb = msm_ringbuffer_new(gpu, ringsz);
373 if (IS_ERR(gpu->rb)) {
374 ret = PTR_ERR(gpu->rb);
376 dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
380 ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
383 dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
395 void msm_gpu_cleanup(struct msm_gpu *gpu)
397 DBG("%s", gpu->name);
399 WARN_ON(!list_empty(&gpu->active_list));
405 msm_gem_put_iova(gpu->rb->bo, gpu->id);
406 msm_ringbuffer_destroy(gpu->rb);
410 iommu_domain_free(gpu->iommu);