1 /* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
2 /* vi: set ts=8 sw=8 sts=8: */
3 /*************************************************************************/ /*!
5 @Codingstyle LinuxKernel
6 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
46 * This is an example ADF display driver for the testchip's PDP output
49 /* #define SUPPORT_ADF_PDP_FBDEV */
51 #include <linux/module.h>
52 #include <linux/device.h>
53 #include <linux/pci.h>
54 #include <linux/interrupt.h>
55 #include <linux/delay.h>
56 #include <linux/platform_device.h>
57 #include <linux/wait.h>
59 #include <drm/drm_fourcc.h>
61 #include <video/adf.h>
62 #include <video/adf_client.h>
64 #ifdef SUPPORT_ADF_PDP_FBDEV
65 #include <video/adf_fbdev.h>
68 #include PVR_ANDROID_ION_HEADER
70 /* for sync_fence_put */
71 #include PVR_ANDROID_SYNC_HEADER
73 #include "apollo_drv.h"
74 #include "adf_common.h"
75 #include "debugfs_dma_buf.h"
78 #include "tcf_rgbpdp_regs.h"
81 #include "pvrmodule.h"
83 #define DRV_NAME APOLLO_DEVICE_NAME_PDP
86 #define ADF_PDP_WIDTH 1280
89 #ifndef ADF_PDP_HEIGHT
90 #define ADF_PDP_HEIGHT 720
93 MODULE_DESCRIPTION("APOLLO PDP display driver");
95 static int pdp_display_width = ADF_PDP_WIDTH;
96 static int pdp_display_height = ADF_PDP_HEIGHT;
97 module_param(pdp_display_width, int, S_IRUSR | S_IRGRP | S_IROTH);
98 MODULE_PARM_DESC(pdp_display_width, "PDP display width");
99 module_param(pdp_display_height, int, S_IRUSR | S_IRGRP | S_IROTH);
100 MODULE_PARM_DESC(pdp_display_height, "PDP display height");
102 static DEFINE_SPINLOCK(gFlipLock);
104 struct pdp_timing_data {
124 static const struct pdp_timing_data pdp_supported_modes[] = {
129 .h_active_start = 144,
130 .h_left_border = 144,
131 .h_right_border = 784,
132 .h_front_porch = 784,
137 .v_active_start = 16,
139 .v_bottom_border = 496,
140 .v_front_porch = 496,
143 .clock_freq = 23856000,
149 .h_active_start = 192,
150 .h_left_border = 192,
151 .h_right_border = 992,
152 .h_front_porch = 992,
157 .v_active_start = 20,
159 .v_bottom_border = 620,
160 .v_front_porch = 620,
163 .clock_freq = 38154000,
169 .h_active_start = 264,
170 .h_left_border = 264,
171 .h_right_border = 1288,
172 .h_front_porch = 1288,
177 .v_active_start = 26,
179 .v_bottom_border = 794,
180 .v_front_porch = 794,
183 .clock_freq = 64108000,
189 .h_active_start = 328,
190 .h_left_border = 328,
191 .h_right_border = 1608,
192 .h_front_porch = 1608,
197 .v_active_start = 24,
199 .v_bottom_border = 744,
200 .v_front_porch = 744,
203 .clock_freq = 74380000,
209 .h_active_start = 336,
210 .h_left_border = 336,
211 .h_right_border = 1616,
212 .h_front_porch = 1616,
217 .v_active_start = 26,
219 .v_bottom_border = 794,
220 .v_front_porch = 794,
223 .clock_freq = 80136000,
229 .h_active_start = 336,
230 .h_left_border = 336,
231 .h_right_border = 1616,
232 .h_front_porch = 1616,
237 .v_active_start = 27,
239 .v_bottom_border = 827,
240 .v_front_porch = 827,
243 .clock_freq = 83462000,
249 .h_active_start = 352,
250 .h_left_border = 352,
251 .h_right_border = 1632,
252 .h_front_porch = 1632,
257 .v_active_start = 34,
259 .v_bottom_border = 1058,
260 .v_front_porch = 1058,
263 .clock_freq = 108780000,
269 struct adf_pdp_device {
270 struct ion_client *ion_client;
272 struct adf_device adf_device;
273 struct adf_interface adf_interface;
274 struct adf_overlay_engine adf_overlay;
275 #ifdef SUPPORT_ADF_PDP_FBDEV
276 struct adf_fbdev adf_fbdev;
279 struct platform_device *pdev;
281 struct apollo_pdp_platform_data *pdata;
284 resource_size_t regs_size;
286 void __iomem *pll_regs;
287 resource_size_t pll_regs_size;
289 struct drm_mode_modeinfo *supported_modes;
290 int num_supported_modes;
292 const struct pdp_timing_data *current_timings;
296 atomic_t num_validates;
299 atomic_t vsync_triggered;
300 wait_queue_head_t vsync_wait_queue;
301 atomic_t requested_vsync_state;
302 atomic_t vsync_state;
304 /* This is set when the last client has released this device, causing
305 * all outstanding posts to be ignored
316 static const u32 pdp_supported_formats[] = {
319 #define NUM_SUPPORTED_FORMATS 1
321 static const struct {
325 } pdp_format_table[] = {
326 { DRM_FORMAT_BGRA8888, 4, DCPDP_STR1SURF_FORMAT_ARGB8888 },
330 static int pdp_mode_count(struct adf_pdp_device *pdp)
334 while (pdp_supported_modes[i].h_display)
339 static int pdp_mode_id(struct adf_pdp_device *pdp, u32 height, u32 width)
343 for (i = 0; pdp_supported_modes[i].h_display; i++) {
344 const struct pdp_timing_data *tdata = &pdp_supported_modes[i];
346 if (tdata->h_display == width && tdata->v_display == height)
349 dev_err(&pdp->pdev->dev, "Failed to find matching mode for %dx%d\n",
354 static const struct pdp_timing_data *pdp_timing_data(
355 struct adf_pdp_device *pdp, int mode_id)
357 if (mode_id >= pdp_mode_count(pdp) || mode_id < 0)
359 return &pdp_supported_modes[mode_id];
362 static void pdp_mode_to_drm_mode(struct adf_pdp_device *pdp, int mode_id,
363 struct drm_mode_modeinfo *drm_mode)
365 const struct pdp_timing_data *pdp_mode = pdp_timing_data(pdp, mode_id);
367 BUG_ON(pdp_mode == NULL);
368 memset(drm_mode, 0, sizeof(*drm_mode));
370 drm_mode->hdisplay = pdp_mode->h_display;
371 drm_mode->vdisplay = pdp_mode->v_display;
372 drm_mode->vrefresh = pdp_mode->v_refresh;
374 adf_modeinfo_set_name(drm_mode);
377 static u32 pdp_read_reg(struct adf_pdp_device *pdp, resource_size_t reg_offset)
379 BUG_ON(reg_offset > pdp->regs_size-4);
380 return ioread32(pdp->regs + reg_offset);
383 static void pdp_write_reg(struct adf_pdp_device *pdp,
384 resource_size_t reg_offset, u32 reg_value)
386 BUG_ON(reg_offset > pdp->regs_size-4);
387 iowrite32(reg_value, pdp->regs + reg_offset);
390 static void pll_write_reg(struct adf_pdp_device *pdp,
391 resource_size_t reg_offset, u32 reg_value)
393 BUG_ON(reg_offset < TCF_PLL_PLL_PDP_CLK0);
394 BUG_ON(reg_offset > pdp->pll_regs_size + TCF_PLL_PLL_PDP_CLK0 - 4);
395 iowrite32(reg_value, pdp->pll_regs +
396 reg_offset - TCF_PLL_PLL_PDP_CLK0);
399 static void pdp_devres_release(struct device *dev, void *res)
401 /* No extra cleanup needed */
404 static u32 pdp_format_bpp(u32 drm_format)
408 for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
409 if (pdp_format_table[i].drm_format == drm_format)
410 return pdp_format_table[i].bytes_per_pixel;
412 WARN(1, "Unsupported drm format");
416 static u32 pdp_format(u32 drm_format)
420 for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
421 if (pdp_format_table[i].drm_format == drm_format)
422 return pdp_format_table[i].pixfmt_word;
424 WARN(1, "Unsupported drm format");
428 static void pdp_enable_scanout(struct adf_pdp_device *pdp)
431 /* Turn on scanout */
432 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
433 reg_value &= ~(STR1STREN_MASK);
434 reg_value |= 0x1 << STR1STREN_SHIFT;
435 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, reg_value);
438 static void pdp_disable_scanout(struct adf_pdp_device *pdp)
440 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, 0);
443 static bool pdp_vsync_triggered(struct adf_pdp_device *pdp)
445 return atomic_read(&pdp->vsync_triggered) == 1;
448 static void pdp_enable_vsync(struct adf_pdp_device *pdp)
450 u32 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB);
452 reg_value |= (0x1 << INTEN_VBLNK1_SHIFT);
453 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, reg_value);
456 static void pdp_disable_vsync(struct adf_pdp_device *pdp)
458 u32 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB);
460 reg_value &= ~(0x1 << INTEN_VBLNK1_SHIFT);
461 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTENAB, reg_value);
464 static void pdp_enable_interrupt(struct adf_pdp_device *pdp)
467 err = apollo_enable_interrupt(pdp->pdev->dev.parent,
468 APOLLO_INTERRUPT_PDP);
470 dev_err(&pdp->pdev->dev,
471 "apollo_enable_interrupt failed (%d)\n", err);
475 static void pdp_disable_interrupt(struct adf_pdp_device *pdp)
478 err = apollo_disable_interrupt(pdp->pdev->dev.parent,
479 APOLLO_INTERRUPT_PDP);
481 dev_err(&pdp->pdev->dev,
482 "apollo_disable_interrupt failed (%d)\n", err);
486 static void pdp_post(struct adf_device *adf_dev, struct adf_post *cfg,
489 int num_validates_snapshot = *(int *)driver_state;
494 /* Set vsync wait timeout to 4x expected vsync */
495 struct adf_pdp_device *pdp = devres_find(adf_dev->dev,
496 pdp_devres_release, NULL, NULL);
498 msecs_to_jiffies((1000 / pdp->current_timings->v_refresh) * 4);
500 /* Null-flip handling, used to push buffers off screen during an error
501 * state to stop them blocking subsequent rendering
503 if (cfg->n_bufs == 0 || atomic_read(&pdp->released) == 1)
504 goto out_update_num_posts;
506 WARN_ON(cfg->n_bufs != 1);
507 WARN_ON(cfg->mappings->sg_tables[0]->nents != 1);
509 spin_lock_irqsave(&gFlipLock, flags);
511 buf_addr = sg_phys(cfg->mappings->sg_tables[0]->sgl);
512 /* Convert the cpu address to a device address */
513 buf_addr -= pdp->pdata->memory_base;
515 debugfs_dma_buf_set(cfg->bufs[0].dma_bufs[0]);
517 /* Set surface register w/height, width & format */
518 reg_value = (cfg->bufs[0].w-1) << STR1WIDTH_SHIFT;
519 reg_value |= (cfg->bufs[0].h-1) << STR1HEIGHT_SHIFT;
520 reg_value |= pdp_format(cfg->bufs[0].format) << STR1PIXFMT_SHIFT;
521 pdp->flip_registers.str1surf = reg_value;
523 /* Set stride register */
524 reg_value = (cfg->bufs[0].pitch[0] >> DCPDP_STR1POSN_STRIDE_SHIFT)-1;
525 pdp->flip_registers.str1posn = reg_value;
527 /* Set surface address without resetting any other bits in the
530 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL);
531 reg_value &= ~(STR1BASE_MASK);
532 reg_value |= (buf_addr >> DCPDP_STR1ADDRCTRL_BASE_ADDR_SHIFT)
535 pdp->flip_registers.str1addrctrl = reg_value;
536 atomic_set(&pdp->vsync_triggered, 0);
538 spin_unlock_irqrestore(&gFlipLock, flags);
540 /* Wait until the buffer is on-screen, so we know the previous buffer
541 * has been retired and off-screen.
543 * If vsync was already off when this post was serviced, we need to
544 * enable the vsync again briefly so the register updates we shadowed
545 * above get applied and we don't signal the fence prematurely. One
546 * vsync afterwards, we'll disable the vsync again.
548 if (!atomic_xchg(&pdp->vsync_state, 1))
549 pdp_enable_vsync(pdp);
551 if (wait_event_timeout(pdp->vsync_wait_queue,
552 pdp_vsync_triggered(pdp), timeout) == 0) {
553 dev_err(&pdp->pdev->dev, "Post VSync wait timeout");
554 /* Undefined behaviour if this times out */
556 out_update_num_posts:
557 pdp->num_posts = num_validates_snapshot;
560 static bool pdp_supports_event(struct adf_obj *obj, enum adf_event_type type)
563 case ADF_OBJ_INTERFACE:
566 case ADF_EVENT_VSYNC:
577 static void pdp_irq_handler(void *data)
579 struct adf_pdp_device *pdp = data;
581 u32 int_status = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTSTAT);
583 if (int_status & INTS_VBLNK1_MASK)
584 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_INTCLEAR,
585 (0x1 << INTCLR_VBLNK1_SHIFT));
587 spin_lock_irqsave(&gFlipLock, flags);
589 /* If we're idle, and a vsync disable was requested, do it now.
590 * This code assumes that the HWC will always re-enable vsync
591 * explicitly before posting new configurations.
593 if (atomic_read(&pdp->num_validates) == pdp->num_posts) {
594 if (!atomic_read(&pdp->requested_vsync_state)) {
595 pdp_disable_vsync(pdp);
596 atomic_set(&pdp->vsync_state, 0);
600 if (int_status & INTS_VBLNK1_MASK) {
601 /* Write the registers for the next buffer to display */
602 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1SURF,
603 pdp->flip_registers.str1surf);
604 pdp_write_reg(pdp, TCF_RGBPDP_PVR_PDP_STR1POSN,
605 pdp->flip_registers.str1posn);
606 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL,
607 pdp->flip_registers.str1addrctrl);
608 pdp_enable_scanout(pdp);
610 adf_vsync_notify(&pdp->adf_interface, ktime_get());
611 atomic_set(&pdp->vsync_triggered, 1);
612 wake_up(&pdp->vsync_wait_queue);
615 spin_unlock_irqrestore(&gFlipLock, flags);
618 static void pdp_set_event(struct adf_obj *obj, enum adf_event_type type,
621 struct adf_pdp_device *pdp;
625 case ADF_EVENT_VSYNC:
627 pdp = devres_find(obj->parent->dev, pdp_devres_release,
629 atomic_set(&pdp->requested_vsync_state, enabled);
631 old = atomic_xchg(&pdp->vsync_state, enabled);
633 pdp_enable_vsync(pdp);
642 static void pdp_set_clocks(struct adf_pdp_device *pdp, u32 clock_freq_hz)
644 u32 clock_freq_mhz = (clock_freq_hz + 500000) / 1000000;
646 pll_write_reg(pdp, TCF_PLL_PLL_PDP_CLK0, clock_freq_mhz);
647 if (clock_freq_mhz >= 50)
648 pll_write_reg(pdp, TCF_PLL_PLL_PDP_CLK1TO5, 0);
650 pll_write_reg(pdp, TCF_PLL_PLL_PDP_CLK1TO5, 0x3);
652 pll_write_reg(pdp, TCF_PLL_PLL_PDP_DRP_GO, 1);
654 pll_write_reg(pdp, TCF_PLL_PLL_PDP_DRP_GO, 0);
657 static int pdp_modeset(struct adf_interface *intf,
658 struct drm_mode_modeinfo *mode)
662 struct adf_pdp_device *pdp = devres_find(intf->base.parent->dev,
663 pdp_devres_release, NULL, NULL);
664 int mode_id = pdp_mode_id(pdp, mode->vdisplay, mode->hdisplay);
665 const struct pdp_timing_data *tdata = pdp_timing_data(pdp, mode_id);
669 dev_err(&pdp->pdev->dev, "Failed to find mode for %ux%u\n",
670 mode->hdisplay, mode->vdisplay);
674 /* Disable scanout */
675 pdp_disable_scanout(pdp);
676 /* Disable sync gen */
677 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
678 reg_value &= ~(SYNCACTIVE_MASK);
679 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, reg_value);
681 pdp_set_clocks(pdp, tdata->clock_freq);
683 if (pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL)
685 /* Buffer request threshold */
686 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STRCTRL,
691 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_BORDCOL, 0x00005544);
694 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_UPDCTRL, 0);
697 reg_value = tdata->h_back_porch << HBPS_SHIFT;
698 reg_value |= tdata->h_total << HT_SHIFT;
699 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC1, reg_value);
701 reg_value = tdata->h_active_start << HAS_SHIFT;
702 reg_value |= tdata->h_left_border << HLBS_SHIFT;
703 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC2, reg_value);
705 reg_value = tdata->h_front_porch << HFPS_SHIFT;
706 reg_value |= tdata->h_right_border << HRBS_SHIFT;
707 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HSYNC3, reg_value);
710 reg_value = tdata->v_back_porch << VBPS_SHIFT;
711 reg_value |= tdata->v_total << VT_SHIFT;
712 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC1, reg_value);
714 reg_value = tdata->v_active_start << VAS_SHIFT;
715 reg_value |= tdata->v_top_border << VTBS_SHIFT;
716 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC2, reg_value);
718 reg_value = tdata->v_front_porch << VFPS_SHIFT;
719 reg_value |= tdata->v_bottom_border << VBBS_SHIFT;
720 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VSYNC3, reg_value);
722 /* Horizontal data enable */
723 reg_value = tdata->h_active_start << HDES_SHIFT;
724 reg_value |= tdata->h_front_porch << HDEF_SHIFT;
725 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_HDECTRL, reg_value);
727 /* Vertical data enable */
728 reg_value = tdata->v_active_start << VDES_SHIFT;
729 reg_value |= tdata->v_front_porch << VDEF_SHIFT;
730 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VDECTRL, reg_value);
732 /* Vertical event start and vertical fetch start */
733 reg_value = tdata->v_back_porch << VFETCH_SHIFT;
734 reg_value |= tdata->v_front_porch << VEVENT_SHIFT;
735 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_VEVENT, reg_value);
737 /* Enable sync gen last and set up polarities of sync/blank */
738 reg_value = 0x1 << SYNCACTIVE_SHIFT;
739 reg_value |= 0x1 << FIELDPOL_SHIFT;
740 reg_value |= 0x1 << BLNKPOL_SHIFT;
741 reg_value |= 0x1 << VSPOL_SHIFT;
742 reg_value |= 0x1 << HSPOL_SHIFT;
743 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, reg_value);
745 intf->current_mode = *mode;
746 pdp->current_timings = tdata;
752 static int pdp_blank(struct adf_interface *intf,
756 struct adf_pdp_device *pdp = devres_find(intf->base.parent->dev,
757 pdp_devres_release, NULL, NULL);
759 if (state != DRM_MODE_DPMS_OFF &&
760 state != DRM_MODE_DPMS_ON)
763 reg_value = pdp_read_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL);
765 case DRM_MODE_DPMS_OFF:
766 reg_value |= 0x1 << POWERDN_SHIFT;
768 case DRM_MODE_DPMS_ON:
769 reg_value &= ~(POWERDN_MASK);
772 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_SYNCCTRL, reg_value);
777 static int pdp_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
778 u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
780 struct adf_pdp_device *pdp = devres_find(intf->base.parent->dev,
781 pdp_devres_release, NULL, NULL);
783 u32 size = w * h * pdp_format_bpp(format);
784 struct ion_handle *hdl = ion_alloc(pdp->ion_client, size, 0,
785 (1 << pdp->pdata->ion_heap_id), 0);
788 dev_err(&pdp->pdev->dev, "ion_alloc failed (%d)\n", err);
791 *dma_buf = ion_share_dma_buf(pdp->ion_client, hdl);
792 if (IS_ERR(*dma_buf)) {
794 dev_err(&pdp->pdev->dev,
795 "ion_share_dma_buf failed (%d)\n", err);
796 goto err_free_buffer;
798 *pitch = w * pdp_format_bpp(format);
801 ion_free(pdp->ion_client, hdl);
806 static int pdp_describe_simple_post(struct adf_interface *intf,
807 struct adf_buffer *fb, void *data, size_t *size)
809 struct adf_post_ext *post_ext = data;
812 struct drm_clip_rect full_screen = {
814 .y2 = ADF_PDP_HEIGHT,
817 /* NOTE: an upstream ADF bug means we can't test *size instead */
818 BUG_ON(sizeof(struct adf_post_ext) +
819 1 * sizeof(struct adf_buffer_config_ext)
820 > ADF_MAX_CUSTOM_DATA_SIZE);
822 *size = sizeof(struct adf_post_ext) +
823 1 * sizeof(struct adf_buffer_config_ext);
825 post_ext->post_id = ++post_id;
827 post_ext->bufs_ext[0].crop = full_screen;
828 post_ext->bufs_ext[0].display = full_screen;
829 post_ext->bufs_ext[0].transform = ADF_BUFFER_TRANSFORM_NONE_EXT;
830 post_ext->bufs_ext[0].blend_type = ADF_BUFFER_BLENDING_PREMULT_EXT;
831 post_ext->bufs_ext[0].plane_alpha = 0xff;
837 adf_pdp_open(struct adf_obj *obj, struct inode *inode, struct file *file)
839 struct adf_device *dev =
840 (struct adf_device *)obj->parent;
841 struct adf_pdp_device *pdp = devres_find(dev->dev,
842 pdp_devres_release, NULL, NULL);
843 atomic_inc(&pdp->refcount);
844 atomic_set(&pdp->released, 0);
849 adf_pdp_release(struct adf_obj *obj, struct inode *inode, struct file *file)
851 struct adf_device *dev =
852 (struct adf_device *)obj->parent;
853 struct adf_pdp_device *pdp = devres_find(dev->dev,
854 pdp_devres_release, NULL, NULL);
855 struct sync_fence *release_fence;
857 if (atomic_dec_return(&pdp->refcount))
860 /* Make sure we have no outstanding posts waiting */
861 atomic_set(&pdp->released, 1);
862 atomic_set(&pdp->requested_vsync_state, 0);
863 atomic_set(&pdp->vsync_triggered, 1);
864 wake_up_all(&pdp->vsync_wait_queue);
865 /* This special "null" flip works around a problem with ADF
866 * which leaves buffers pinned by the display engine even
867 * after all ADF clients have closed.
869 * The "null" flip is pipelined like any other. The user won't
870 * be able to unload this module until it has been posted.
872 release_fence = adf_device_post(dev, NULL, 0, NULL, 0, NULL, 0);
873 if (IS_ERR_OR_NULL(release_fence)) {
875 "Failed to queue null flip command (err=%d).\n",
876 (int)PTR_ERR(release_fence));
880 sync_fence_put(release_fence);
883 static int pdp_validate(struct adf_device *dev, struct adf_post *cfg,
886 struct adf_pdp_device *pdp = devres_find(dev->dev,
887 pdp_devres_release, NULL, NULL);
888 int err = adf_img_validate_simple(dev, cfg, driver_state);
890 if (err == 0 && cfg->mappings) {
891 /* We store a snapshot of num_validates in driver_state at the
892 * time validate was called, which will be passed to the post
893 * function. This snapshot is copied into (i.e. overwrites)
894 * num_posts, rather then simply incrementing num_posts, to
895 * handle cases e.g. during fence timeouts where validates
896 * are called without corresponding posts.
898 int *validates = kmalloc(sizeof(*validates), GFP_KERNEL);
899 *validates = atomic_inc_return(&pdp->num_validates);
900 *driver_state = validates;
902 *driver_state = NULL;
907 static void pdp_state_free(struct adf_device *dev, void *driver_state)
912 static struct adf_device_ops adf_pdp_device_ops = {
913 .owner = THIS_MODULE,
915 .open = adf_pdp_open,
916 .release = adf_pdp_release,
917 .ioctl = adf_img_ioctl,
919 .state_free = pdp_state_free,
920 .validate = pdp_validate,
924 static struct adf_interface_ops adf_pdp_interface_ops = {
926 .supports_event = pdp_supports_event,
927 .set_event = pdp_set_event,
929 .modeset = pdp_modeset,
931 .alloc_simple_buffer = pdp_alloc_simple_buffer,
932 .describe_simple_post = pdp_describe_simple_post,
935 static struct adf_overlay_engine_ops adf_pdp_overlay_ops = {
936 .supported_formats = &pdp_supported_formats[0],
937 .n_supported_formats = NUM_SUPPORTED_FORMATS,
940 #ifdef SUPPORT_ADF_PDP_FBDEV
941 static struct fb_ops adf_pdp_fb_ops = {
942 .owner = THIS_MODULE,
943 .fb_open = adf_fbdev_open,
944 .fb_release = adf_fbdev_release,
945 .fb_check_var = adf_fbdev_check_var,
946 .fb_set_par = adf_fbdev_set_par,
947 .fb_blank = adf_fbdev_blank,
948 .fb_pan_display = adf_fbdev_pan_display,
949 .fb_fillrect = cfb_fillrect,
950 .fb_copyarea = cfb_copyarea,
951 .fb_imageblit = cfb_imageblit,
952 .fb_mmap = adf_fbdev_mmap,
956 static int adf_pdp_probe_device(struct platform_device *pdev)
958 struct adf_pdp_device *pdp;
960 int i, default_mode_id;
961 struct resource *registers;
962 struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
963 struct apollo_pdp_platform_data *pdata = pdev->dev.platform_data;
965 pdp = devres_alloc(pdp_devres_release, sizeof(*pdp),
971 devres_add(&pdev->dev, pdp);
976 err = pci_enable_device(pci_dev);
979 "Failed to enable PDP pci device (%d)\n", err);
983 atomic_set(&pdp->refcount, 0);
984 atomic_set(&pdp->num_validates, 0);
987 pdp->ion_client = ion_client_create(pdata->ion_device, "adf_pdp");
988 if (IS_ERR(pdp->ion_client)) {
989 err = PTR_ERR(pdp->ion_client);
991 "Failed to create PDP ION client (%d)\n", err);
992 goto err_disable_pci;
995 registers = platform_get_resource_byname(pdev,
998 pdp->regs = devm_ioremap_resource(&pdev->dev, registers);
999 if (IS_ERR(pdp->regs)) {
1000 err = PTR_ERR(pdp->regs);
1001 dev_err(&pdev->dev, "Failed to map PDP registers (%d)\n", err);
1002 goto err_destroy_ion_client;
1004 pdp->regs_size = resource_size(registers);
1006 registers = platform_get_resource_byname(pdev,
1009 pdp->pll_regs = devm_ioremap_resource(&pdev->dev, registers);
1010 if (IS_ERR(pdp->pll_regs)) {
1011 err = PTR_ERR(pdp->pll_regs);
1012 dev_err(&pdev->dev, "Failed to map PDP registers (%d)\n", err);
1013 goto err_destroy_ion_client;
1015 pdp->pll_regs_size = resource_size(registers);
1017 err = adf_device_init(&pdp->adf_device, &pdp->pdev->dev,
1018 &adf_pdp_device_ops, "pdp_device");
1020 dev_err(&pdev->dev, "Failed to init ADF device (%d)\n", err);
1021 goto err_destroy_ion_client;
1024 err = adf_interface_init(&pdp->adf_interface, &pdp->adf_device,
1025 ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY, &adf_pdp_interface_ops,
1028 dev_err(&pdev->dev, "Failed to init ADF interface (%d)\n", err);
1029 goto err_destroy_adf_device;
1032 err = adf_overlay_engine_init(&pdp->adf_overlay, &pdp->adf_device,
1033 &adf_pdp_overlay_ops, "pdp_overlay");
1035 dev_err(&pdev->dev, "Failed to init ADF overlay (%d)\n", err);
1036 goto err_destroy_adf_interface;
1039 err = adf_attachment_allow(&pdp->adf_device, &pdp->adf_overlay,
1040 &pdp->adf_interface);
1042 dev_err(&pdev->dev, "Failed to attach overlay (%d)\n", err);
1043 goto err_destroy_adf_overlay;
1046 pdp->num_supported_modes = pdp_mode_count(pdp);
1047 pdp->supported_modes = kzalloc(sizeof(*pdp->supported_modes)
1048 * pdp->num_supported_modes, GFP_KERNEL);
1050 if (!pdp->supported_modes) {
1051 dev_err(&pdev->dev, "Failed to allocate supported modeinfo structs\n");
1053 goto err_destroy_adf_overlay;
1056 for (i = 0; i < pdp->num_supported_modes; i++)
1057 pdp_mode_to_drm_mode(pdp, i, &pdp->supported_modes[i]);
1059 default_mode_id = pdp_mode_id(pdp, pdp_display_height,
1061 if (default_mode_id == -1) {
1062 default_mode_id = 0;
1063 dev_err(&pdev->dev, "No modeline found for requested display size (%dx%d)\n",
1064 pdp_display_width, pdp_display_height);
1067 /* Initial modeset... */
1068 err = pdp_modeset(&pdp->adf_interface,
1069 &pdp->supported_modes[default_mode_id]);
1071 dev_err(&pdev->dev, "Initial modeset failed (%d)\n", err);
1072 goto err_destroy_modelist;
1075 err = adf_hotplug_notify_connected(&pdp->adf_interface,
1076 pdp->supported_modes, pdp->num_supported_modes);
1078 dev_err(&pdev->dev, "Initial hotplug notify failed (%d)\n",
1080 goto err_destroy_modelist;
1082 err = apollo_set_interrupt_handler(pdp->pdev->dev.parent,
1083 APOLLO_INTERRUPT_PDP,
1084 pdp_irq_handler, pdp);
1086 dev_err(&pdev->dev, "Failed to set interrupt handler (%d)\n",
1088 goto err_destroy_modelist;
1090 #ifdef SUPPORT_ADF_PDP_FBDEV
1091 err = adf_fbdev_init(&pdp->adf_fbdev, &pdp->adf_interface,
1092 &pdp->adf_overlay, pdp_display_width,
1093 pdp_display_height, DRM_FORMAT_BGRA8888,
1094 &adf_pdp_fb_ops, "adf_pdp_fb");
1096 dev_err(&pdev->dev, "Failed to init ADF fbdev (%d)\n", err);
1097 goto err_destroy_modelist;
1101 init_waitqueue_head(&pdp->vsync_wait_queue);
1102 atomic_set(&pdp->requested_vsync_state, 0);
1103 atomic_set(&pdp->vsync_state, 0);
1105 if (debugfs_dma_buf_init("pdp_raw"))
1106 dev_err(&pdev->dev, "Failed to create debug fs file for raw access\n");
1108 pdp_enable_interrupt(pdp);
1111 err_destroy_modelist:
1112 kfree(pdp->supported_modes);
1113 err_destroy_adf_overlay:
1114 adf_overlay_engine_destroy(&pdp->adf_overlay);
1115 err_destroy_adf_interface:
1116 adf_interface_destroy(&pdp->adf_interface);
1117 err_destroy_adf_device:
1118 adf_device_destroy(&pdp->adf_device);
1119 err_destroy_ion_client:
1120 ion_client_destroy(pdp->ion_client);
1122 pci_disable_device(pci_dev);
1124 dev_err(&pdev->dev, "Failed to initialise PDP device\n");
1128 static int adf_pdp_remove_device(struct platform_device *pdev)
1131 struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
1132 struct adf_pdp_device *pdp = devres_find(&pdev->dev, pdp_devres_release,
1135 debugfs_dma_buf_deinit();
1137 pdp_disable_scanout(pdp);
1139 pdp_disable_vsync(pdp);
1140 pdp_disable_interrupt(pdp);
1141 apollo_set_interrupt_handler(pdp->pdev->dev.parent,
1142 APOLLO_INTERRUPT_PDP,
1144 /* Disable scanout */
1145 pdp_write_reg(pdp, TCF_RGBPDP_PVR_TCF_RGBPDP_STR1ADDRCTRL, 0);
1146 kfree(pdp->supported_modes);
1147 #ifdef SUPPORT_ADF_PDP_FBDEV
1148 adf_fbdev_destroy(&pdp->adf_fbdev);
1150 adf_overlay_engine_destroy(&pdp->adf_overlay);
1151 adf_interface_destroy(&pdp->adf_interface);
1152 adf_device_destroy(&pdp->adf_device);
1153 ion_client_destroy(pdp->ion_client);
1154 pci_disable_device(pci_dev);
1158 static void adf_pdp_shutdown_device(struct platform_device *pdev)
1160 /* No cleanup needed, all done in remove_device */
1163 static struct platform_device_id pdp_platform_device_id_table[] = {
1164 { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = 0 },
1168 static struct platform_driver pdp_platform_driver = {
1169 .probe = adf_pdp_probe_device,
1170 .remove = adf_pdp_remove_device,
1171 .shutdown = adf_pdp_shutdown_device,
1175 .id_table = pdp_platform_device_id_table,
1178 static int __init adf_pdp_init(void)
1180 return platform_driver_register(&pdp_platform_driver);
1183 static void __exit adf_pdp_exit(void)
1185 platform_driver_unregister(&pdp_platform_driver);
1188 module_init(adf_pdp_init);
1189 module_exit(adf_pdp_exit);