1 /* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
2 /* vi: set ts=8 sw=8 sts=8: */
3 /*************************************************************************/ /*!
5 @Codingstyle LinuxKernel
6 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
46 * This is an example ADF display driver for the testchip's 5 PDP with fbdc
50 #include <linux/module.h>
51 #include <linux/device.h>
52 #include <linux/pci.h>
53 #include <linux/interrupt.h>
54 #include <linux/delay.h>
55 #include <linux/platform_device.h>
56 #include <linux/wait.h>
58 #include <drm/drm_fourcc.h>
60 #include <video/adf.h>
61 #include <video/adf_client.h>
63 #include PVR_ANDROID_ION_HEADER
65 /* for sync_fence_put */
66 #include PVR_ANDROID_SYNC_HEADER
68 #include "apollo_drv.h"
69 #include "adf_common.h"
70 #include "debugfs_dma_buf.h"
72 #include "pvrmodule.h"
74 #include "pdp_tc5_regs.h"
75 #include "pdp_tc5_fbdc_regs.h"
77 #define DRV_NAME APOLLO_DEVICE_NAME_PDP
80 #define ADF_PDP_WIDTH 1280
83 #ifndef ADF_PDP_HEIGHT
84 #define ADF_PDP_HEIGHT 720
87 #define DRM_FORMAT_BGRA8888_DIRECT_16x4 fourcc_code('I', 'M', 'G', '0')
89 MODULE_DESCRIPTION("APOLLO TC5 PDP display driver");
91 static int pdp_display_width = ADF_PDP_WIDTH;
92 static int pdp_display_height = ADF_PDP_HEIGHT;
93 module_param(pdp_display_width, int, S_IRUSR | S_IRGRP | S_IROTH);
94 MODULE_PARM_DESC(pdp_display_width, "PDP display width");
95 module_param(pdp_display_height, int, S_IRUSR | S_IRGRP | S_IROTH);
96 MODULE_PARM_DESC(pdp_display_height, "PDP display height");
98 static DEFINE_SPINLOCK(gFlipLock);
100 struct pdp_timing_data {
119 static const struct pdp_timing_data pdp_supported_modes[] = {
124 .h_active_start = 260,
125 .h_left_border = 260,
126 .h_right_border = 1540,
127 .h_front_porch = 1540,
132 .v_active_start = 25,
134 .v_bottom_border = 745,
135 .v_front_porch = 745,
142 struct adf_pdp_device {
143 struct ion_client *ion_client;
145 struct adf_device adf_device;
146 struct adf_interface adf_interface;
147 struct adf_overlay_engine adf_overlay;
149 struct platform_device *pdev;
151 struct apollo_pdp_platform_data *pdata;
154 resource_size_t regs_size;
156 void __iomem *fbdc_regs;
157 resource_size_t fbdc_regs_size;
159 void __iomem *i2c_regs;
160 resource_size_t i2c_regs_size;
162 struct drm_mode_modeinfo *supported_modes;
163 int num_supported_modes;
167 atomic_t num_validates;
170 atomic_t vsync_triggered;
171 wait_queue_head_t vsync_wait_queue;
172 atomic_t requested_vsync_state;
173 atomic_t vsync_state;
175 const struct pdp_timing_data *current_timings;
176 u32 current_drm_format;
181 static const u32 pdp_supported_formats[] = {
182 DRM_FORMAT_BGRA8888_DIRECT_16x4,
184 #define NUM_SUPPORTED_FORMATS 1
186 static const struct {
190 } pdp_format_table[] = {
191 /* 01000b / 8h 8-bit alpha + 24-bit rgb888 [RGBA] */
192 { DRM_FORMAT_BGRA8888_DIRECT_16x4, 4, 0x8 },
196 static int pdp_mode_count(struct adf_pdp_device *pdp)
200 while (pdp_supported_modes[i].h_display)
205 static int pdp_mode_id(struct adf_pdp_device *pdp, u32 height, u32 width)
209 for (i = 0; pdp_supported_modes[i].h_display; i++) {
210 const struct pdp_timing_data *tdata = &pdp_supported_modes[i];
212 if (tdata->h_display == width && tdata->v_display == height)
215 dev_err(&pdp->pdev->dev, "Failed to find matching mode for %dx%d\n",
220 static const struct pdp_timing_data *pdp_timing_data(
221 struct adf_pdp_device *pdp, int mode_id)
223 if (mode_id >= pdp_mode_count(pdp) || mode_id < 0)
225 return &pdp_supported_modes[mode_id];
228 static void pdp_mode_to_drm_mode(struct adf_pdp_device *pdp, int mode_id,
229 struct drm_mode_modeinfo *drm_mode)
231 const struct pdp_timing_data *pdp_mode;
233 pdp_mode = pdp_timing_data(pdp, mode_id);
234 BUG_ON(pdp_mode == NULL);
236 memset(drm_mode, 0, sizeof(*drm_mode));
238 drm_mode->hdisplay = pdp_mode->h_display;
239 drm_mode->vdisplay = pdp_mode->v_display;
240 drm_mode->vrefresh = pdp_mode->v_refresh;
242 adf_modeinfo_set_name(drm_mode);
245 static u32 pdp_read_reg(struct adf_pdp_device *pdp, resource_size_t reg_offset)
247 BUG_ON(reg_offset > pdp->regs_size-4);
248 return ioread32(pdp->regs + reg_offset);
251 static void pdp_write_reg(struct adf_pdp_device *pdp,
252 resource_size_t reg_offset, u32 reg_value)
254 BUG_ON(reg_offset > pdp->regs_size-4);
255 iowrite32(reg_value, pdp->regs + reg_offset);
258 static void pdp_write_fbdc_reg(struct adf_pdp_device *pdp,
259 resource_size_t reg_offset, u32 reg_value)
261 BUG_ON(reg_offset > pdp->fbdc_regs_size-4);
262 iowrite32(reg_value, pdp->fbdc_regs + reg_offset);
265 #define I2C_TIMEOUT 10000
267 static void pdp_write_i2c(struct adf_pdp_device *pdp, u32 reg_addr, u32 data)
271 iowrite32(0x7a, pdp->i2c_regs + 0x04);
272 iowrite32(reg_addr, pdp->i2c_regs + 0x08);
273 iowrite32(data, pdp->i2c_regs + 0x0c);
274 iowrite32(0x1, pdp->i2c_regs + 0x14);
276 for (i = 0; i < I2C_TIMEOUT; i++) {
277 if (ioread32(pdp->i2c_regs + 0x18) == 0)
281 if (i == I2C_TIMEOUT)
282 dev_err(&pdp->pdev->dev, "i2c write timeout\n");
285 static u32 pdp_read_i2c(struct adf_pdp_device *pdp, u32 reg_addr)
289 iowrite32(0x7b, pdp->i2c_regs + 0x04);
290 iowrite32(reg_addr, pdp->i2c_regs + 0x08);
291 iowrite32(0x1, pdp->i2c_regs + 0x14);
293 for (i = 0; i < I2C_TIMEOUT; i++) {
294 if (ioread32(pdp->i2c_regs + 0x18) == 0)
298 if (i == I2C_TIMEOUT) {
299 dev_err(&pdp->pdev->dev, "i2c read timeout\n");
302 return ioread32(pdp->i2c_regs + 0x10);
305 static void pdp_devres_release(struct device *dev, void *res)
307 /* No extra cleanup needed */
310 static u32 pdp_format_bpp(u32 drm_format)
314 for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
315 if (pdp_format_table[i].drm_format == drm_format)
316 return pdp_format_table[i].bytes_per_pixel;
318 WARN(1, "Unsupported drm format");
322 static u32 pdp_format(u32 drm_format)
326 for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
327 if (pdp_format_table[i].drm_format == drm_format)
328 return pdp_format_table[i].pixfmt_word;
330 WARN(1, "Unsupported drm format");
334 static void pdp_enable_scanout(struct adf_pdp_device *pdp, u32 base_addr)
338 /* Set the base address to the fbdc module */
339 pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_BASE_ADDRESS,
341 /* Turn on scanout */
342 reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL);
343 reg_value &= ~(PVR5__GRPH1STREN_MASK);
344 reg_value |= 0x1 << PVR5__GRPH1STREN_SHIFT;
345 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL, reg_value);
348 static void pdp_disable_scanout(struct adf_pdp_device *pdp)
352 /* Turn off scanout */
353 reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL);
354 reg_value &= ~(PVR5__GRPH1STREN_MASK);
355 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL, reg_value);
356 /* Reset the base address in the fbdc module */
357 pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_BASE_ADDRESS,
361 static bool pdp_vsync_triggered(struct adf_pdp_device *pdp)
363 return atomic_read(&pdp->vsync_triggered) == 1;
366 static void pdp_enable_ints(struct adf_pdp_device *pdp)
371 reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB);
372 reg_value &= ~(PVR5__INTEN_VBLNK0_MASK);
373 reg_value |= 0x1 << PVR5__INTEN_VBLNK0_SHIFT;
374 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB, reg_value);
376 err = apollo_enable_interrupt(pdp->pdev->dev.parent,
377 APOLLO_INTERRUPT_TC5_PDP);
379 dev_err(&pdp->pdev->dev,
380 "apollo_enable_interrupt failed (%d)\n", err);
384 static void pdp_disable_ints(struct adf_pdp_device *pdp)
389 reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB);
390 reg_value &= ~(PVR5__INTEN_VBLNK0_MASK);
391 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_INTENAB, reg_value);
393 err = apollo_disable_interrupt(pdp->pdev->dev.parent,
394 APOLLO_INTERRUPT_TC5_PDP);
396 dev_err(&pdp->pdev->dev,
397 "apollo_disable_interrupt failed (%d)\n", err);
401 static void pdp_post(struct adf_device *adf_dev, struct adf_post *cfg,
404 int num_validates_snapshot = *(int *)driver_state;
407 /* Set vsync wait timeout to 4x expected vsync */
408 struct adf_pdp_device *pdp = devres_find(adf_dev->dev,
409 pdp_devres_release, NULL, NULL);
411 msecs_to_jiffies((1000 / pdp->current_timings->v_refresh) * 4);
413 /* Null-flip handling, used to push buffers off screen during an error
414 * state to stop them blocking subsequent rendering */
415 if (cfg->n_bufs == 0) {
416 pdp_disable_scanout(pdp);
420 /* We don't support changing the configuration on the fly */
421 if (pdp->current_timings->h_display != cfg->bufs[0].w ||
422 pdp->current_timings->v_display != cfg->bufs[0].h ||
423 pdp->current_drm_format != cfg->bufs[0].format) {
424 dev_err(&pdp->pdev->dev, "Unsupported configuration on post\n");
428 WARN_ON(cfg->n_bufs != 1);
429 WARN_ON(cfg->mappings->sg_tables[0]->nents != 1);
431 spin_lock_irqsave(&gFlipLock, flags);
433 debugfs_dma_buf_set(cfg->bufs[0].dma_bufs[0]);
435 /* Set surface address and enable the scanouts */
436 pdp_enable_scanout(pdp, sg_phys(cfg->mappings->sg_tables[0]->sgl) -
437 pdp->pdata->memory_base);
439 atomic_set(&pdp->vsync_triggered, 0);
441 spin_unlock_irqrestore(&gFlipLock, flags);
443 /* Wait until the buffer is on-screen, so we know the previous buffer
444 * has been retired and off-screen.
446 * If vsync was already off when this post was serviced, we need to
447 * enable the vsync again briefly so the register updates we shadowed
448 * above get applied and we don't signal the fence prematurely. One
449 * vsync afterwards, we'll disable the vsync again.
451 if (!atomic_xchg(&pdp->vsync_state, 1))
452 pdp_enable_ints(pdp);
454 if (wait_event_timeout(pdp->vsync_wait_queue,
455 pdp_vsync_triggered(pdp), timeout) == 0) {
456 dev_err(&pdp->pdev->dev, "Post VSync wait timeout");
457 /* Undefined behaviour if this times out */
460 pdp->num_posts = num_validates_snapshot;
463 static bool pdp_supports_event(struct adf_obj *obj, enum adf_event_type type)
466 case ADF_OBJ_INTERFACE:
469 case ADF_EVENT_VSYNC:
480 static void pdp_irq_handler(void *data)
482 struct adf_pdp_device *pdp = data;
486 int_status = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_INTSTAT);
488 spin_lock_irqsave(&gFlipLock, flags);
490 /* If we're idle, and a vsync disable was requested, do it now.
491 * This code assumes that the HWC will always re-enable vsync
492 * explicitly before posting new configurations.
494 if (atomic_read(&pdp->num_validates) == pdp->num_posts) {
495 if (!atomic_read(&pdp->requested_vsync_state)) {
496 pdp_disable_ints(pdp);
497 atomic_set(&pdp->vsync_state, 0);
501 if ((int_status & PVR5__INTS_VBLNK0_MASK)) {
502 /* Notify the framework of the just occurred vblank */
503 adf_vsync_notify(&pdp->adf_interface, ktime_get());
504 atomic_set(&pdp->vsync_triggered, 1);
505 wake_up(&pdp->vsync_wait_queue);
508 spin_unlock_irqrestore(&gFlipLock, flags);
511 static void pdp_set_event(struct adf_obj *obj, enum adf_event_type type,
514 struct adf_pdp_device *pdp;
518 case ADF_EVENT_VSYNC:
520 pdp = devres_find(obj->parent->dev, pdp_devres_release,
522 atomic_set(&pdp->requested_vsync_state, enabled);
524 old = atomic_xchg(&pdp->vsync_state, enabled);
526 pdp_enable_ints(pdp);
535 static int pdp_unblank_hdmi(struct adf_pdp_device *pdp)
540 /* Powering up the ADV7511 sometimes doesn't come up immediately, so
541 * give multiple power ons.
543 for (i = 0; i < 6; i++) {
544 pdp_write_i2c(pdp, 0x41, 0x10);
548 reg_value = pdp_read_i2c(pdp, 0x41);
549 if (reg_value == 0x10) {
550 dev_err(&pdp->pdev->dev, "i2c: ADV7511 powered up\n");
552 dev_err(&pdp->pdev->dev, "i2c: Failed to power up ADV7511\n");
559 static void pdp_blank_hdmi(struct adf_pdp_device *pdp)
561 pdp_write_i2c(pdp, 0x41, 0x50);
564 static void pdp_enable_hdmi(struct adf_pdp_device *pdp)
570 Assuming i2c_master clock is at 50 MHz */
571 iowrite32(0x18, pdp->i2c_regs);
573 reg_value = pdp_read_i2c(pdp, 0xf5);
574 if (reg_value != 0x75) {
575 dev_err(&pdp->pdev->dev, "i2c: 1st register read failed: %x\n",
580 reg_value = pdp_read_i2c(pdp, 0xf6);
581 if (reg_value != 0x11) {
582 dev_err(&pdp->pdev->dev, "i2c: 2nd register read failed: %x\n",
587 /* Check the HPD and Monitor Sense */
588 for (i = 0; i < 50; i++) {
589 reg_value = pdp_read_i2c(pdp, 0x42);
590 if (reg_value == 0x70) {
591 dev_err(&pdp->pdev->dev, "i2c: Hot Plug and Monitor Sense detected ...\n");
593 } else if (reg_value == 0x50) {
594 dev_err(&pdp->pdev->dev, "i2c: Only Hot Plug detected ...\n");
595 } else if (reg_value == 0x03) {
596 dev_err(&pdp->pdev->dev, "i2c: Only Monitor Sense detected ...\n");
600 if (pdp_unblank_hdmi(pdp))
603 /* Writing the fixed registers */
604 pdp_write_i2c(pdp, 0x98, 0x03);
605 pdp_write_i2c(pdp, 0x9a, 0xe0);
606 pdp_write_i2c(pdp, 0x9c, 0x30);
607 pdp_write_i2c(pdp, 0x9d, 0x61);
608 pdp_write_i2c(pdp, 0xa2, 0xa4);
609 pdp_write_i2c(pdp, 0xa3, 0xa4);
610 pdp_write_i2c(pdp, 0xe0, 0xd0);
611 pdp_write_i2c(pdp, 0xf9, 0x00);
613 /* Starting video input */
615 pdp_write_i2c(pdp, 0x0c, 0x80);
617 /* Select input video format */
618 pdp_write_i2c(pdp, 0x15, 0x10);
620 /* Select Colour Depth and output format */
621 pdp_write_i2c(pdp, 0x16, 0x30);
623 /* Select Aspect Ratio */
624 pdp_write_i2c(pdp, 0x17, 0x02);
627 pdp_write_i2c(pdp, 0x48, 0x00);
628 pdp_write_i2c(pdp, 0x55, 0x12);
630 /* Select Picture Aspect Ratio */
631 pdp_write_i2c(pdp, 0x56, 0x28);
634 pdp_write_i2c(pdp, 0x40, 0x80);
637 pdp_write_i2c(pdp, 0x4c, 0x04);
639 /* Select HDMI Mode */
640 pdp_write_i2c(pdp, 0xaf, 0x16);
642 /* Set VIC to Receiver */
643 pdp_write_i2c(pdp, 0x3d, 0x04);
645 for (i = 0; i < 50; i++) {
646 reg_value = pdp_read_i2c(pdp, 0x3e);
647 if (reg_value == 0x10) {
648 dev_err(&pdp->pdev->dev, "i2c: VIC detected as 720P, 60 Hz, 16:9...\n");
654 dev_err(&pdp->pdev->dev, "i2c: Desired VIC not detected\n");
656 /* Write to PD register again */
657 pdp_write_i2c(pdp, 0x41, 0x10);
663 static int pdp_modeset(struct adf_interface *intf,
664 struct drm_mode_modeinfo *mode)
666 const struct pdp_timing_data *tdata;
667 struct adf_pdp_device *pdp;
668 int mode_id, err = 0;
671 pdp = devres_find(intf->base.parent->dev, pdp_devres_release,
673 mode_id = pdp_mode_id(pdp, mode->vdisplay, mode->hdisplay);
674 tdata = pdp_timing_data(pdp, mode_id);
677 dev_err(&pdp->pdev->dev, "Failed to find mode for %ux%u\n",
678 mode->hdisplay, mode->vdisplay);
683 /* Make sure all the following register writes are applied instantly */
684 reg_value = 0x1 << PVR5__BYPASS_DOUBLE_BUFFERING_SHIFT;
685 reg_value |= 0x1 << PVR5__REGISTERS_VALID_SHIFT;
686 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_REGISTER_UPDATE_CTRL, reg_value);
688 /* Power down mode */
689 reg_value = 0x1 << PVR5__POWERDN_SHIFT;
690 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL, reg_value);
692 /* Background color (green) */
693 reg_value = 0x0099FF66;
694 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_BGNDCOL, reg_value);
696 /* Set alpha blend mode to global alpha blending (10b / 2h) and
697 * disable everything else.
699 reg_value = 0x2 << PVR5__GRPH1BLEND_SHIFT;
700 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1CTRL, reg_value);
703 reg_value = 0xff << PVR5__GRPH1GALPHA_SHIFT;
704 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1BLND, reg_value);
706 /* Reset base addr of the non-FBCDC part. This is not used. */
707 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1_BASEADDR, 0);
709 /* Graphics video pixel format:
710 * 01000b / 8h 8-bit alpha + 24-bit rgb888 [RGBA].
712 pdp->current_drm_format = DRM_FORMAT_BGRA8888_DIRECT_16x4;
713 reg_value = pdp_format(pdp->current_drm_format)
714 << PVR5__GRPH1PIXFMT_SHIFT;
715 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1SURF, reg_value);
717 /* Reset position of the plane */
718 reg_value = 0 << PVR5__GRPH1XSTART_SHIFT;
719 reg_value |= 0 << PVR5__GRPH1YSTART_SHIFT;
720 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1POSN, reg_value);
722 /* Stride of surface in 16byte words - 1 */
723 reg_value = (tdata->h_display * 4 / 16 - 1) << PVR5__GRPH1STRIDE_SHIFT;
724 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1STRIDE, reg_value);
727 * Width of surface in pixels - 1
728 * Height of surface in lines - 1 */
729 reg_value = (tdata->h_display - 1) << PVR5__GRPH1WIDTH_SHIFT;
730 reg_value |= (tdata->v_display - 1) << PVR5__GRPH1HEIGHT_SHIFT;
731 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_GRPH1SIZE, reg_value);
734 reg_value = tdata->h_back_porch << PVR5__HBPS_SHIFT;
735 reg_value |= tdata->h_total << PVR5__HT_SHIFT;
736 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HSYNC1, reg_value);
737 reg_value = tdata->h_active_start << PVR5__HAS_SHIFT;
738 reg_value |= tdata->h_left_border << PVR5__HLBS_SHIFT;
739 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HSYNC2, reg_value);
740 reg_value = tdata->h_front_porch << PVR5__HFPS_SHIFT;
741 reg_value |= tdata->h_right_border << PVR5__HRBS_SHIFT;
742 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HSYNC3, reg_value);
745 reg_value = tdata->v_back_porch << PVR5__VBPS_SHIFT;
746 reg_value |= tdata->v_total << PVR5__VT_SHIFT;
747 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VSYNC1, reg_value);
748 reg_value = tdata->v_active_start << PVR5__VAS_SHIFT;
749 reg_value |= tdata->v_top_border << PVR5__VTBS_SHIFT;
750 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VSYNC2, reg_value);
751 reg_value = tdata->v_front_porch << PVR5__VFPS_SHIFT;
752 reg_value |= tdata->v_bottom_border << PVR5__VBBS_SHIFT;
753 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VSYNC3, reg_value);
755 /* Horizontal data enable */
756 reg_value = tdata->h_left_border << PVR5__HDES_SHIFT;
757 reg_value |= tdata->h_front_porch << PVR5__HDEF_SHIFT;
758 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_HDECTRL, reg_value);
760 /* Vertical data enable */
761 reg_value = tdata->v_top_border << PVR5__VDES_SHIFT;
762 reg_value |= tdata->v_front_porch << PVR5__VDEF_SHIFT;
763 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VDECTRL, reg_value);
765 /* Vertical event start and vertical fetch start */
766 reg_value = tdata->v_back_porch << PVR5__VFETCH_SHIFT;
767 reg_value |= tdata->v_bottom_border << PVR5__VEVENT_SHIFT;
768 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_VEVENT, reg_value);
770 /* Now enable the fbdc module (direct_16x4) */
771 /* Set the number of tiles per plane */
772 pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_NUM_TILES,
773 (tdata->h_display * tdata->v_display) / (16 * 4));
774 /* Set the number of the tile per line */
775 pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_PER_LINE,
776 tdata->h_display / 16);
777 /* Set the color format */
778 pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_PIXEL_FORMAT, 0xc);
779 /* Reset base address */
780 pdp_write_fbdc_reg(pdp, PVR5__PDP_FBDC_INTRFC_BASE_ADDRESS, 0x0);
781 /* Set invalidate request */
782 reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL);
783 if ((reg_value & PVR5__VSPOL_MASK) >> PVR5__VSPOL_SHIFT == 0x1) {
784 pdp_write_fbdc_reg(pdp,
785 PVR5__PDP_FBDC_INTRFC_INVALIDATE_REQUEST, 0x1);
787 pdp_write_fbdc_reg(pdp,
788 PVR5__PDP_FBDC_INTRFC_INVALIDATE_REQUEST, 0x0);
791 /* Enable vsync again */
792 reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL);
793 reg_value &= ~(PVR5__SYNCACTIVE_MASK);
794 reg_value |= 0x1 << PVR5__SYNCACTIVE_SHIFT;
795 reg_value &= ~(PVR5__BLNKPOL_MASK);
796 reg_value |= 0x1 << PVR5__BLNKPOL_SHIFT;
797 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL, reg_value);
800 reg_value = 0x1 << PVR5__USE_VBLANK_SHIFT;
801 reg_value |= 0x1 << PVR5__REGISTERS_VALID_SHIFT;
802 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_REGISTER_UPDATE_CTRL, reg_value);
804 intf->current_mode = *mode;
805 pdp->current_timings = tdata;
807 pdp_enable_hdmi(pdp);
813 static int pdp_blank(struct adf_interface *intf,
816 struct adf_pdp_device *pdp;
819 pdp = devres_find(intf->base.parent->dev, pdp_devres_release,
822 if (state != DRM_MODE_DPMS_OFF && state != DRM_MODE_DPMS_ON)
825 reg_value = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL);
827 case DRM_MODE_DPMS_OFF:
828 reg_value &= ~(PVR5__POWERDN_MASK);
829 reg_value |= 0x1 << PVR5__POWERDN_SHIFT;
830 /* pdp_blank_hdmi(pdp);*/
832 case DRM_MODE_DPMS_ON:
833 reg_value &= ~(PVR5__POWERDN_MASK);
834 /* pdp_unblank_hdmi(pdp);*/
837 pdp_write_reg(pdp, PVR5__PDP_PVR_PDP_SYNCCTRL, reg_value);
842 static int pdp_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
843 u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
845 u32 size = w * h * pdp_format_bpp(format);
846 struct adf_pdp_device *pdp;
847 struct ion_handle *hdl;
850 pdp = devres_find(intf->base.parent->dev, pdp_devres_release,
852 hdl = ion_alloc(pdp->ion_client, size, 0,
853 (1 << pdp->pdata->ion_heap_id), 0);
856 dev_err(&pdp->pdev->dev, "ion_alloc failed (%d)\n", err);
859 *dma_buf = ion_share_dma_buf(pdp->ion_client, hdl);
860 if (IS_ERR(*dma_buf)) {
862 dev_err(&pdp->pdev->dev,
863 "ion_share_dma_buf failed (%d)\n", err);
864 goto err_free_buffer;
866 *pitch = w * pdp_format_bpp(format);
869 ion_free(pdp->ion_client, hdl);
874 static int pdp_describe_simple_post(struct adf_interface *intf,
875 struct adf_buffer *fb, void *data, size_t *size)
877 struct adf_post_ext *post_ext = data;
880 struct drm_clip_rect full_screen = {
882 .y2 = ADF_PDP_HEIGHT,
885 /* NOTE: an upstream ADF bug means we can't test *size instead */
886 BUG_ON(ADF_MAX_CUSTOM_DATA_SIZE < sizeof(struct adf_post_ext) +
887 1 * sizeof(struct adf_buffer_config_ext));
889 *size = sizeof(struct adf_post_ext) +
890 1 * sizeof(struct adf_buffer_config_ext);
892 post_ext->post_id = ++post_id;
894 post_ext->bufs_ext[0].crop = full_screen;
895 post_ext->bufs_ext[0].display = full_screen;
896 post_ext->bufs_ext[0].transform = ADF_BUFFER_TRANSFORM_NONE_EXT;
897 post_ext->bufs_ext[0].blend_type = ADF_BUFFER_BLENDING_PREMULT_EXT;
898 post_ext->bufs_ext[0].plane_alpha = 0xff;
904 adf_pdp_open(struct adf_obj *obj, struct inode *inode, struct file *file)
906 struct adf_device *dev = (struct adf_device *)obj->parent;
907 struct adf_pdp_device *pdp;
909 pdp = devres_find(dev->dev, pdp_devres_release, NULL, NULL);
911 atomic_inc(&pdp->refcount);
916 adf_pdp_release(struct adf_obj *obj, struct inode *inode, struct file *file)
918 struct adf_device *dev = (struct adf_device *)obj->parent;
919 struct sync_fence *release_fence;
920 struct adf_pdp_device *pdp;
922 pdp = devres_find(dev->dev, pdp_devres_release, NULL, NULL);
924 if (atomic_dec_return(&pdp->refcount))
927 /* Make sure we have no outstanding posts waiting */
928 atomic_set(&pdp->vsync_triggered, 1);
929 wake_up_all(&pdp->vsync_wait_queue);
930 /* This special "null" flip works around a problem with ADF
931 * which leaves buffers pinned by the display engine even
932 * after all ADF clients have closed.
934 * The "null" flip is pipelined like any other. The user won't
935 * be able to unload this module until it has been posted.
937 release_fence = adf_device_post(dev, NULL, 0, NULL, 0, NULL, 0);
938 if (IS_ERR_OR_NULL(release_fence)) {
940 "Failed to queue null flip command (err=%d).\n",
941 (int)PTR_ERR(release_fence));
945 sync_fence_put(release_fence);
948 static int adf_img_validate_custom_format(struct adf_device *dev,
949 struct adf_buffer *buf)
953 for (i = 0; pdp_format_table[i].drm_format != 0; i++) {
954 if (pdp_format_table[i].drm_format == buf->format)
960 static int pdp_validate(struct adf_device *dev, struct adf_post *cfg,
963 struct adf_pdp_device *pdp;
966 pdp = devres_find(dev->dev, pdp_devres_release, NULL, NULL);
968 err = adf_img_validate_simple(dev, cfg, driver_state);
969 if (err == 0 && cfg->mappings) {
970 /* We store a snapshot of num_validates in driver_state at the
971 * time validate was called, which will be passed to the post
972 * function. This snapshot is copied into (i.e. overwrites)
973 * num_posts, rather then simply incrementing num_posts, to
974 * handle cases e.g. during fence timeouts where validates
975 * are called without corresponding posts.
977 int *validates = kmalloc(sizeof(*validates), GFP_KERNEL);
978 *validates = atomic_inc_return(&pdp->num_validates);
979 *driver_state = validates;
981 *driver_state = NULL;
986 static void pdp_state_free(struct adf_device *dev, void *driver_state)
991 static struct adf_device_ops adf_pdp_device_ops = {
992 .owner = THIS_MODULE,
994 .open = adf_pdp_open,
995 .release = adf_pdp_release,
996 .ioctl = adf_img_ioctl,
998 .validate_custom_format = adf_img_validate_custom_format,
999 .validate = pdp_validate,
1001 .state_free = pdp_state_free,
1004 static struct adf_interface_ops adf_pdp_interface_ops = {
1006 .supports_event = pdp_supports_event,
1007 .set_event = pdp_set_event,
1009 .modeset = pdp_modeset,
1011 .alloc_simple_buffer = pdp_alloc_simple_buffer,
1012 .describe_simple_post = pdp_describe_simple_post,
1015 static struct adf_overlay_engine_ops adf_pdp_overlay_ops = {
1016 .supported_formats = &pdp_supported_formats[0],
1017 .n_supported_formats = NUM_SUPPORTED_FORMATS,
1020 static int adf_pdp_probe_device(struct platform_device *pdev)
1022 struct apollo_pdp_platform_data *pdata = pdev->dev.platform_data;
1023 struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
1024 int err = 0, i, default_mode_id;
1025 struct adf_pdp_device *pdp;
1026 struct resource *registers;
1027 u32 core_id, core_rev;
1029 pdp = devres_alloc(pdp_devres_release, sizeof(*pdp), GFP_KERNEL);
1034 devres_add(&pdev->dev, pdp);
1039 err = pci_enable_device(pci_dev);
1042 "Failed to enable PDP pci device (%d)\n", err);
1046 atomic_set(&pdp->refcount, 0);
1047 atomic_set(&pdp->num_validates, 0);
1050 pdp->ion_client = ion_client_create(pdata->ion_device, "adf_pdp");
1051 if (IS_ERR(pdp->ion_client)) {
1052 err = PTR_ERR(pdp->ion_client);
1054 "Failed to create PDP ION client (%d)\n", err);
1055 goto err_disable_pci;
1058 registers = platform_get_resource_byname(pdev,
1061 pdp->regs = devm_ioremap_resource(&pdev->dev, registers);
1062 if (IS_ERR(pdp->regs)) {
1063 err = PTR_ERR(pdp->regs);
1064 dev_err(&pdev->dev, "Failed to map PDP registers (%d)\n", err);
1065 goto err_destroy_ion_client;
1067 pdp->regs_size = resource_size(registers);
1069 registers = platform_get_resource_byname(pdev,
1071 "tc5-pdp2-fbdc-regs");
1072 pdp->fbdc_regs = devm_ioremap_resource(&pdev->dev, registers);
1073 if (IS_ERR(pdp->fbdc_regs)) {
1074 err = PTR_ERR(pdp->fbdc_regs);
1075 dev_err(&pdev->dev, "Failed to map PDP fbdc registers (%d)\n",
1077 goto err_destroy_ion_client;
1079 pdp->fbdc_regs_size = resource_size(registers);
1081 registers = platform_get_resource_byname(pdev,
1083 "tc5-adv5711-regs");
1084 pdp->i2c_regs = devm_ioremap_resource(&pdev->dev, registers);
1085 if (IS_ERR(pdp->i2c_regs)) {
1086 err = PTR_ERR(pdp->i2c_regs);
1087 dev_err(&pdev->dev, "Failed to map ADV5711 i2c registers (%d)\n",
1089 goto err_destroy_ion_client;
1091 pdp->i2c_regs_size = resource_size(registers);
1093 core_id = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_CORE_ID);
1094 core_rev = pdp_read_reg(pdp, PVR5__PDP_PVR_PDP_CORE_REV);
1096 dev_err(&pdev->dev, "pdp2 core id/rev: %d.%d.%d/%d.%d.%d\n",
1097 (core_id & PVR5__GROUP_ID_MASK) >> PVR5__GROUP_ID_SHIFT,
1098 (core_id & PVR5__CORE_ID_MASK) >> PVR5__CORE_ID_SHIFT,
1099 (core_id & PVR5__CONFIG_ID_MASK) >> PVR5__CONFIG_ID_SHIFT,
1100 (core_rev & PVR5__MAJOR_REV_MASK) >> PVR5__MAJOR_REV_SHIFT,
1101 (core_rev & PVR5__MINOR_REV_MASK) >> PVR5__MINOR_REV_SHIFT,
1102 (core_rev & PVR5__MAINT_REV_MASK) >> PVR5__MAINT_REV_SHIFT);
1105 err = adf_device_init(&pdp->adf_device, &pdp->pdev->dev,
1106 &adf_pdp_device_ops, "pdp_device");
1108 dev_err(&pdev->dev, "Failed to init ADF device (%d)\n", err);
1109 goto err_destroy_ion_client;
1112 err = adf_interface_init(&pdp->adf_interface, &pdp->adf_device,
1113 ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY, &adf_pdp_interface_ops,
1116 dev_err(&pdev->dev, "Failed to init ADF interface (%d)\n", err);
1117 goto err_destroy_adf_device;
1120 err = adf_overlay_engine_init(&pdp->adf_overlay, &pdp->adf_device,
1121 &adf_pdp_overlay_ops, "pdp_overlay");
1123 dev_err(&pdev->dev, "Failed to init ADF overlay (%d)\n", err);
1124 goto err_destroy_adf_interface;
1127 err = adf_attachment_allow(&pdp->adf_device, &pdp->adf_overlay,
1128 &pdp->adf_interface);
1130 dev_err(&pdev->dev, "Failed to attach overlay (%d)\n", err);
1131 goto err_destroy_adf_overlay;
1134 pdp->num_supported_modes = pdp_mode_count(pdp);
1135 pdp->supported_modes = kzalloc(sizeof(*pdp->supported_modes)
1136 * pdp->num_supported_modes, GFP_KERNEL);
1138 if (!pdp->supported_modes) {
1139 dev_err(&pdev->dev, "Failed to allocate supported modeinfo structs\n");
1141 goto err_destroy_adf_overlay;
1144 for (i = 0; i < pdp->num_supported_modes; i++)
1145 pdp_mode_to_drm_mode(pdp, i, &pdp->supported_modes[i]);
1147 default_mode_id = pdp_mode_id(pdp, pdp_display_height,
1149 if (default_mode_id == -1) {
1150 default_mode_id = 0;
1151 dev_err(&pdev->dev, "No modeline found for requested display size (%dx%d)\n",
1152 pdp_display_width, pdp_display_height);
1155 /* Initial modeset... */
1156 err = pdp_modeset(&pdp->adf_interface,
1157 &pdp->supported_modes[default_mode_id]);
1159 dev_err(&pdev->dev, "Initial modeset failed (%d)\n", err);
1160 goto err_destroy_modelist;
1163 err = adf_hotplug_notify_connected(&pdp->adf_interface,
1164 pdp->supported_modes, pdp->num_supported_modes);
1166 dev_err(&pdev->dev, "Initial hotplug notify failed (%d)\n",
1168 goto err_destroy_modelist;
1170 err = apollo_set_interrupt_handler(pdp->pdev->dev.parent,
1171 APOLLO_INTERRUPT_TC5_PDP,
1172 pdp_irq_handler, pdp);
1174 dev_err(&pdev->dev, "Failed to set interrupt handler (%d)\n",
1176 goto err_destroy_modelist;
1179 init_waitqueue_head(&pdp->vsync_wait_queue);
1180 atomic_set(&pdp->requested_vsync_state, 0);
1181 atomic_set(&pdp->vsync_state, 0);
1183 if (debugfs_dma_buf_init("pdp_raw"))
1184 dev_err(&pdev->dev, "Failed to create debug fs file for raw access\n");
1187 err_destroy_modelist:
1188 kfree(pdp->supported_modes);
1189 err_destroy_adf_overlay:
1190 adf_overlay_engine_destroy(&pdp->adf_overlay);
1191 err_destroy_adf_interface:
1192 adf_interface_destroy(&pdp->adf_interface);
1193 err_destroy_adf_device:
1194 adf_device_destroy(&pdp->adf_device);
1195 err_destroy_ion_client:
1196 ion_client_destroy(pdp->ion_client);
1198 pci_disable_device(pci_dev);
1200 dev_err(&pdev->dev, "Failed to initialise PDP device\n");
1204 static int adf_pdp_remove_device(struct platform_device *pdev)
1206 struct pci_dev *pci_dev = to_pci_dev(pdev->dev.parent);
1207 struct adf_pdp_device *pdp;
1210 pdp = devres_find(&pdev->dev, pdp_devres_release, NULL, NULL);
1212 debugfs_dma_buf_deinit();
1214 /* Disable scanout */
1215 pdp_disable_scanout(pdp);
1216 pdp_disable_ints(pdp);
1217 apollo_set_interrupt_handler(pdp->pdev->dev.parent,
1218 APOLLO_INTERRUPT_TC5_PDP,
1221 pdp_blank_hdmi(pdp);
1222 kfree(pdp->supported_modes);
1223 adf_overlay_engine_destroy(&pdp->adf_overlay);
1224 adf_interface_destroy(&pdp->adf_interface);
1225 adf_device_destroy(&pdp->adf_device);
1226 ion_client_destroy(pdp->ion_client);
1227 pci_disable_device(pci_dev);
1231 static void adf_pdp_shutdown_device(struct platform_device *pdev)
1233 /* No cleanup needed, all done in remove_device */
1236 static struct platform_device_id pdp_platform_device_id_table[] = {
1237 { .name = APOLLO_DEVICE_NAME_PDP, .driver_data = 0 },
1241 static struct platform_driver pdp_platform_driver = {
1242 .probe = adf_pdp_probe_device,
1243 .remove = adf_pdp_remove_device,
1244 .shutdown = adf_pdp_shutdown_device,
1248 .id_table = pdp_platform_device_id_table,
1251 static int __init adf_pdp_init(void)
1253 return platform_driver_register(&pdp_platform_driver);
1256 static void __exit adf_pdp_exit(void)
1258 platform_driver_unregister(&pdp_platform_driver);
1261 module_init(adf_pdp_init);
1262 module_exit(adf_pdp_exit);