1 /* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
2 /* vi: set ts=8 sw=8 sts=8: */
3 /*************************************************************************/ /*!
5 @Codingstyle LinuxKernel
6 @Copyright Copyright (c) Imagination Technologies Ltd. All Rights Reserved
7 @License Dual MIT/GPLv2
9 The contents of this file are subject to the MIT license as set out below.
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
33 This License is also included in this distribution in the file called
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
45 #include <linux/version.h>
46 #include <linux/console.h>
47 #include <linux/dma-buf.h>
48 #include <linux/uaccess.h>
49 #include <linux/module.h>
52 #include <drm/drm_fourcc.h>
54 #include <video/adf.h>
55 #include <video/adf_fbdev.h>
56 #include <video/adf_client.h>
58 #include <adf/adf_ext.h>
60 /* for sync_fence_put */
61 #include PVR_ANDROID_SYNC_HEADER
63 #include "adf_common.h"
66 #error adf_fbdev needs Linux framebuffer support. Enable it in your kernel.
69 MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
70 MODULE_LICENSE("Dual MIT/GPL");
72 /* NOTE: This is just an example of how to use adf. You should NOT use this
73 * module in a production environment. It is meaningless to layer adf
74 * on top of fbdev, as adf is more flexible than fbdev and adf itself
75 * provides fbdev emulation. Do not use this implementation generally!
78 #define DRVNAME "adf_fbdev"
80 #define FALLBACK_REFRESH_RATE 60
81 #define FALLBACK_DPI 160
83 #if defined(ADF_FBDEV_NUM_PREFERRED_BUFFERS)
84 #define NUM_PREFERRED_BUFFERS ADF_FBDEV_NUM_PREFERRED_BUFFERS
86 #define NUM_PREFERRED_BUFFERS 3
89 struct adf_fbdev_dmabuf {
90 struct sg_table sg_table;
95 /* Used for cleanup of dmabuf private data */
96 spinlock_t *alloc_lock;
101 struct adf_fbdev_device {
102 struct adf_device base;
103 struct fb_info *fb_info;
107 struct adf_fbdev_interface {
108 struct adf_interface base;
109 struct drm_mode_modeinfo fb_mode;
110 u16 width_mm, height_mm;
111 struct fb_info *fb_info;
112 spinlock_t alloc_lock;
116 /* SIMPLE BUFFER MANAGER *****************************************************/
118 /* Handle alloc/free from the fbdev carveout (fix.smem_start -> fix.smem_size)
119 * region. This simple allocator sets a bit in the alloc_mask when a buffer is
120 * owned by dmabuf. When the dmabuf ->release() is called, the alloc_mask bit
121 * is cleared and the adf_fbdev_dmabuf object is freed.
123 * Since dmabuf relies on sg_table/scatterlists, and hence struct page*, this
124 * code may have problems if your framebuffer uses memory that is not in the
125 * kernel's page tables.
128 static struct adf_fbdev_dmabuf *
129 adf_fbdev_alloc_buffer(struct adf_fbdev_interface *interface)
131 struct adf_fbdev_dmabuf *fbdev_dmabuf;
132 struct scatterlist *sg;
139 spin_lock(&interface->alloc_lock);
141 for (id = 0; id < NUM_PREFERRED_BUFFERS; id++) {
142 if (!(interface->alloc_mask & (1UL << id))) {
143 interface->alloc_mask |= (1UL << id);
148 spin_unlock(&interface->alloc_lock);
150 if (id == NUM_PREFERRED_BUFFERS)
151 return ERR_PTR(-ENOMEM);
153 unitary_size = interface->fb_info->fix.line_length *
154 interface->fb_info->var.yres;
156 /* PAGE_SIZE alignment has been checked already, do NOT allow it
157 * through here. We are about to allocate an sg_list.
159 BUG_ON((unitary_size % PAGE_SIZE) != 0);
161 fbdev_dmabuf = kmalloc(sizeof(*fbdev_dmabuf), GFP_KERNEL);
163 return ERR_PTR(-ENOMEM);
165 err = sg_alloc_table(&fbdev_dmabuf->sg_table, unitary_size / PAGE_SIZE,
172 /* Increment the reference count of this module as long as the
173 * adb_fbdev_dmabuf object exists. This prevents this module from
174 * being unloaded if the buffer is passed around by dmabuf.
176 if (!try_module_get(THIS_MODULE)) {
177 pr_err("try_module_get(THIS_MODULE) failed");
179 return ERR_PTR(-EFAULT);
182 fbdev_dmabuf->offset = id * unitary_size;
183 fbdev_dmabuf->length = unitary_size;
184 fbdev_dmabuf->vaddr = interface->fb_info->screen_base +
185 fbdev_dmabuf->offset;
187 for_each_sg(fbdev_dmabuf->sg_table.sgl, sg,
188 fbdev_dmabuf->sg_table.nents, i) {
189 page = vmalloc_to_page(fbdev_dmabuf->vaddr + offset);
191 pr_err("Failed to map fbdev vaddr to pages\n");
193 return ERR_PTR(-EFAULT);
195 sg_set_page(sg, page, PAGE_SIZE, 0);
198 /* Shadow what ion is doing currently to ensure sg_dma_address()
199 * is valid. This is not strictly correct as the dma address
200 * should only be valid after mapping (ownership changed), and
201 * we haven't mapped the scatter list yet.
203 sg_dma_address(sg) = sg_phys(sg);
206 fbdev_dmabuf->alloc_mask = &interface->alloc_mask;
207 fbdev_dmabuf->alloc_lock = &interface->alloc_lock;
208 fbdev_dmabuf->id = id;
213 static void adf_fbdev_free_buffer(struct adf_fbdev_dmabuf *fbdev_dmabuf)
217 spin_lock_irqsave(fbdev_dmabuf->alloc_lock, flags);
218 (*fbdev_dmabuf->alloc_mask) &= ~(1UL << fbdev_dmabuf->id);
219 spin_unlock_irqrestore(fbdev_dmabuf->alloc_lock, flags);
221 sg_free_table(&fbdev_dmabuf->sg_table);
224 module_put(THIS_MODULE);
227 /* DMA BUF LAYER *************************************************************/
229 static struct sg_table *
230 adf_fbdev_d_map_dma_buf(struct dma_buf_attachment *attachment,
231 enum dma_data_direction direction)
233 struct adf_fbdev_dmabuf *fbdev_dmabuf = attachment->dmabuf->priv;
235 return &fbdev_dmabuf->sg_table;
238 static void adf_fbdev_d_unmap_dma_buf(struct dma_buf_attachment *attachment,
239 struct sg_table *table,
240 enum dma_data_direction direction)
245 static int adf_fbdev_d_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
247 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
248 unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
249 unsigned long addr = vma->vm_start;
250 unsigned long remainder, len;
251 struct scatterlist *sg;
255 for_each_sg(fbdev_dmabuf->sg_table.sgl, sg,
256 fbdev_dmabuf->sg_table.nents, i) {
259 pr_err("Failed to retrieve pages\n");
262 remainder = vma->vm_end - addr;
263 len = sg_dma_len(sg);
264 if (offset >= sg_dma_len(sg)) {
265 offset -= sg_dma_len(sg);
268 page += offset / PAGE_SIZE;
269 len = sg_dma_len(sg) - offset;
272 len = min(len, remainder);
273 remap_pfn_range(vma, addr, page_to_pfn(page), len,
276 if (addr >= vma->vm_end)
283 static void adf_fbdev_d_release(struct dma_buf *dmabuf)
285 adf_fbdev_free_buffer(dmabuf->priv);
288 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
289 !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
292 adf_fbdev_d_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
293 enum dma_data_direction dir)
295 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
297 if (start + len > fbdev_dmabuf->length)
302 static void adf_fbdev_d_end_cpu_access(struct dma_buf *dmabuf, size_t start,
303 size_t len, enum dma_data_direction dir)
305 /* Framebuffer memory is cache coherent. No-op. */
308 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) &&
309 !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318) */
312 adf_fbdev_d_kmap(struct dma_buf *dmabuf, unsigned long page_offset)
314 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
317 if (page_offset * PAGE_SIZE >= fbdev_dmabuf->length)
318 return ERR_PTR(-EINVAL);
319 vaddr = fbdev_dmabuf->vaddr + page_offset * PAGE_SIZE;
324 adf_fbdev_d_kunmap(struct dma_buf *dmabuf, unsigned long page_offset,
330 static void *adf_fbdev_d_vmap(struct dma_buf *dmabuf)
332 struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
334 return fbdev_dmabuf->vaddr;
337 static void adf_fbdev_d_vunmap(struct dma_buf *dmabuf, void *vaddr)
342 static const struct dma_buf_ops adf_fbdev_dma_buf_ops = {
343 .map_dma_buf = adf_fbdev_d_map_dma_buf,
344 .unmap_dma_buf = adf_fbdev_d_unmap_dma_buf,
345 .mmap = adf_fbdev_d_mmap,
346 .release = adf_fbdev_d_release,
347 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
348 !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
349 .begin_cpu_access = adf_fbdev_d_begin_cpu_access,
350 .end_cpu_access = adf_fbdev_d_end_cpu_access,
352 .kmap_atomic = adf_fbdev_d_kmap,
353 .kunmap_atomic = adf_fbdev_d_kunmap,
354 .kmap = adf_fbdev_d_kmap,
355 .kunmap = adf_fbdev_d_kunmap,
356 .vmap = adf_fbdev_d_vmap,
357 .vunmap = adf_fbdev_d_vunmap,
360 /* ADF LAYER *****************************************************************/
362 static u32 adf_fbdev_supported_format;
364 static int adf_fbdev_validate(struct adf_device *dev, struct adf_post *cfg,
367 int err = adf_img_validate_simple(dev, cfg, driver_state);
369 if (cfg->n_bufs == 0 || err != 0)
372 /* Everything checked out in the generic validation, but we
373 * additionally want to check that the dmabuf came from the
374 * adf_fbdev module, which the generic code can't check.
376 if (cfg->bufs[0].dma_bufs[0]->ops != &adf_fbdev_dma_buf_ops)
382 static void adf_fbdev_post(struct adf_device *dev, struct adf_post *cfg,
385 struct adf_fbdev_device *device = (struct adf_fbdev_device *)dev;
386 struct fb_var_screeninfo new_var = device->fb_info->var;
387 struct adf_fbdev_dmabuf *fbdev_dmabuf;
388 struct adf_buffer *buffer;
391 /* "Null" flip handling */
392 if (cfg->n_bufs == 0)
395 if (!lock_fb_info(device->fb_info)) {
396 pr_err("Failed to lock fb_info structure.\n");
402 buffer = &cfg->bufs[0];
403 fbdev_dmabuf = buffer->dma_bufs[0]->priv;
404 new_var.yoffset = new_var.yres * fbdev_dmabuf->id;
406 /* If we're supposed to be able to flip, but the yres_virtual has been
407 * changed to an unsupported (smaller) value, we need to change it back
408 * (this is a workaround for some Linux fbdev drivers that seem to lose
409 * any modifications to yres_virtual after a blank.)
411 if (new_var.yres_virtual < new_var.yres * NUM_PREFERRED_BUFFERS) {
412 new_var.activate = FB_ACTIVATE_NOW;
413 new_var.yres_virtual = new_var.yres * NUM_PREFERRED_BUFFERS;
415 err = fb_set_var(device->fb_info, &new_var);
417 pr_err("fb_set_var failed (err=%d)\n", err);
419 err = fb_pan_display(device->fb_info, &new_var);
421 pr_err("fb_pan_display failed (err=%d)\n", err);
426 unlock_fb_info(device->fb_info);
430 adf_fbdev_open2(struct adf_obj *obj, struct inode *inode, struct file *file)
432 struct adf_fbdev_device *dev =
433 (struct adf_fbdev_device *)obj->parent;
434 atomic_inc(&dev->refcount);
439 adf_fbdev_release2(struct adf_obj *obj, struct inode *inode, struct file *file)
441 struct adf_fbdev_device *dev =
442 (struct adf_fbdev_device *)obj->parent;
443 struct sync_fence *release_fence;
445 if (atomic_dec_return(&dev->refcount))
448 /* This special "null" flip works around a problem with ADF
449 * which leaves buffers pinned by the display engine even
450 * after all ADF clients have closed.
452 * The "null" flip is pipelined like any other. The user won't
453 * be able to unload this module until it has been posted.
455 release_fence = adf_device_post(&dev->base, NULL, 0, NULL, 0, NULL, 0);
456 if (IS_ERR_OR_NULL(release_fence)) {
457 pr_err("Failed to queue null flip command (err=%d).\n",
458 (int)PTR_ERR(release_fence));
462 sync_fence_put(release_fence);
465 static const struct adf_device_ops adf_fbdev_device_ops = {
466 .owner = THIS_MODULE,
468 .open = adf_fbdev_open2,
469 .release = adf_fbdev_release2,
470 .ioctl = adf_img_ioctl,
472 .validate = adf_fbdev_validate,
473 .post = adf_fbdev_post,
477 adf_fbdev_supports_event(struct adf_obj *obj, enum adf_event_type type)
480 case ADF_EVENT_VSYNC:
481 case ADF_EVENT_HOTPLUG:
489 adf_fbdev_set_event(struct adf_obj *obj, enum adf_event_type type,
493 case ADF_EVENT_VSYNC:
494 case ADF_EVENT_HOTPLUG:
501 static int adf_fbdev_blank2(struct adf_interface *intf, u8 state)
503 struct adf_fbdev_interface *interface =
504 (struct adf_fbdev_interface *)intf;
505 struct fb_info *fb_info = interface->fb_info;
507 if (!fb_info->fbops->fb_blank)
510 return fb_info->fbops->fb_blank(state, fb_info);
514 adf_fbdev_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
515 u32 format, struct dma_buf **dma_buf,
516 u32 *offset, u32 *pitch)
518 struct adf_fbdev_interface *interface =
519 (struct adf_fbdev_interface *)intf;
520 struct fb_var_screeninfo *var = &interface->fb_info->var;
521 struct adf_fbdev_dmabuf *fbdev_dmabuf;
523 if (w != var->xres) {
524 pr_err("Simple alloc request w=%u does not match w=%u.\n",
529 if (h != var->yres) {
530 pr_err("Simple alloc request h=%u does not match h=%u.\n",
535 if (format != adf_fbdev_supported_format) {
536 pr_err("Simple alloc request f=0x%x does not match f=0x%x.\n",
537 format, adf_fbdev_supported_format);
541 fbdev_dmabuf = adf_fbdev_alloc_buffer(interface);
542 if (IS_ERR_OR_NULL(fbdev_dmabuf))
543 return PTR_ERR(fbdev_dmabuf);
545 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
547 DEFINE_DMA_BUF_EXPORT_INFO(export_info);
549 export_info.ops = &adf_fbdev_dma_buf_ops;
550 export_info.size = fbdev_dmabuf->length;
551 export_info.flags = O_RDWR;
552 export_info.priv = fbdev_dmabuf;
554 *dma_buf = dma_buf_export(&export_info);
556 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
557 *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
558 fbdev_dmabuf->length, O_RDWR, NULL);
560 *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
561 fbdev_dmabuf->length, O_RDWR);
563 if (IS_ERR(*dma_buf)) {
564 adf_fbdev_free_buffer(fbdev_dmabuf);
565 return PTR_ERR(*dma_buf);
568 *pitch = interface->fb_info->fix.line_length;
574 adf_fbdev_screen_size(struct adf_interface *intf, u16 *width_mm,
577 struct adf_fbdev_interface *interface =
578 (struct adf_fbdev_interface *)intf;
579 *width_mm = interface->width_mm;
580 *height_mm = interface->height_mm;
584 static int adf_fbdev_modeset(struct adf_interface *intf,
585 struct drm_mode_modeinfo *mode)
587 struct adf_fbdev_interface *interface =
588 (struct adf_fbdev_interface *)intf;
589 return mode == &interface->fb_mode ? 0 : -EINVAL;
592 static const struct adf_interface_ops adf_fbdev_interface_ops = {
594 .supports_event = adf_fbdev_supports_event,
595 .set_event = adf_fbdev_set_event,
597 .blank = adf_fbdev_blank2,
598 .alloc_simple_buffer = adf_fbdev_alloc_simple_buffer,
599 .screen_size = adf_fbdev_screen_size,
600 .modeset = adf_fbdev_modeset,
603 struct adf_overlay_engine_ops adf_fbdev_overlay_engine_ops = {
604 .supported_formats = &adf_fbdev_supported_format,
605 .n_supported_formats = 1,
608 /* If we can flip, we need to make sure we have the memory to do so.
610 * We'll assume that the fbdev device provides extra space in
611 * yres_virtual for panning; xres_virtual is theoretically supported,
612 * but it involves more work.
614 * If the fbdev device doesn't have yres_virtual > yres, we'll try
615 * requesting it before bailing. Userspace applications commonly do
616 * this with an FBIOPUT_VSCREENINFO ioctl().
618 * Another problem is with a limitation in PowerVR services -- it
619 * needs framebuffers to be page aligned (this is a SW limitation,
620 * the HW can support non-page-aligned buffers). So we have to
621 * check that stride * height for a single buffer is page aligned.
623 static bool adf_fbdev_flip_possible(struct fb_info *fb_info)
625 struct fb_var_screeninfo var = fb_info->var;
628 if (!fb_info->fix.xpanstep && !fb_info->fix.ypanstep &&
629 !fb_info->fix.ywrapstep) {
630 pr_err("The fbdev device detected does not support ypan/ywrap.\n");
634 if ((fb_info->fix.line_length * var.yres) % PAGE_SIZE != 0) {
635 pr_err("Line length (in bytes) x yres is not a multiple of page size.\n");
639 /* We might already have enough space */
640 if (var.yres * NUM_PREFERRED_BUFFERS <= var.yres_virtual)
643 pr_err("No buffer space for flipping; asking for more.\n");
645 var.activate = FB_ACTIVATE_NOW;
646 var.yres_virtual = var.yres * NUM_PREFERRED_BUFFERS;
648 err = fb_set_var(fb_info, &var);
650 pr_err("fb_set_var failed (err=%d).\n", err);
654 if (var.yres * NUM_PREFERRED_BUFFERS > var.yres_virtual) {
655 pr_err("Failed to obtain additional buffer space.\n");
659 /* Some fbdev drivers allow the yres_virtual modification through,
660 * but don't actually update the fix. We need the fix to be updated
661 * and more memory allocated, so we can actually take advantage of
662 * the increased yres_virtual.
664 if (fb_info->fix.smem_len < fb_info->fix.line_length *
666 pr_err("'fix' not re-allocated with sufficient buffer space.\n");
667 pr_err("Check NUM_PREFERRED_BUFFERS (%u) is as intended.\n",
668 NUM_PREFERRED_BUFFERS);
675 /* Could use devres here? */
677 struct adf_fbdev_device device;
678 struct adf_fbdev_interface interface;
679 struct adf_overlay_engine engine;
682 static int __init init_adf_fbdev(void)
684 struct drm_mode_modeinfo *mode = &dev_data.interface.fb_mode;
685 char format_str[ADF_FORMAT_STR_SIZE];
686 struct fb_info *fb_info;
689 fb_info = registered_fb[0];
691 pr_err("No Linux framebuffer (fbdev) device is registered!\n");
692 pr_err("Check you have a framebuffer driver compiled into your kernel\n");
693 pr_err("and that it is enabled on the cmdline.\n");
697 if (!lock_fb_info(fb_info))
702 /* Filter out broken FB devices */
703 if (!fb_info->fix.smem_len || !fb_info->fix.line_length) {
704 pr_err("The fbdev device detected had a zero smem_len or line_length,\n");
705 pr_err("which suggests it is a broken driver.\n");
709 if (fb_info->fix.type != FB_TYPE_PACKED_PIXELS ||
710 fb_info->fix.visual != FB_VISUAL_TRUECOLOR) {
711 pr_err("The fbdev device detected is not truecolor with packed pixels.\n");
715 if (fb_info->var.bits_per_pixel == 32) {
716 if (fb_info->var.red.length == 8 ||
717 fb_info->var.green.length == 8 ||
718 fb_info->var.blue.length == 8 ||
719 fb_info->var.red.offset == 16 ||
720 fb_info->var.green.offset == 8 ||
721 fb_info->var.blue.offset == 0) {
722 #if defined(ADF_FBDEV_FORCE_XRGB8888)
723 adf_fbdev_supported_format = DRM_FORMAT_BGRX8888;
725 adf_fbdev_supported_format = DRM_FORMAT_BGRA8888;
727 } else if (fb_info->var.red.length == 8 ||
728 fb_info->var.green.length == 8 ||
729 fb_info->var.blue.length == 8 ||
730 fb_info->var.red.offset == 0 ||
731 fb_info->var.green.offset == 8 ||
732 fb_info->var.blue.offset == 16) {
733 adf_fbdev_supported_format = DRM_FORMAT_RGBA8888;
735 pr_err("The fbdev device detected uses an unrecognized 32bit pixel format (%u/%u/%u, %u/%u/%u)\n",
736 fb_info->var.red.length,
737 fb_info->var.green.length,
738 fb_info->var.blue.length,
739 fb_info->var.red.offset,
740 fb_info->var.green.offset,
741 fb_info->var.blue.offset);
744 } else if (fb_info->var.bits_per_pixel == 16) {
745 if (fb_info->var.red.length != 5 ||
746 fb_info->var.green.length != 6 ||
747 fb_info->var.blue.length != 5 ||
748 fb_info->var.red.offset != 11 ||
749 fb_info->var.green.offset != 5 ||
750 fb_info->var.blue.offset != 0) {
751 pr_err("The fbdev device detected uses an unrecognized 16bit pixel format (%u/%u/%u, %u/%u/%u)\n",
752 fb_info->var.red.length,
753 fb_info->var.green.length,
754 fb_info->var.blue.length,
755 fb_info->var.red.offset,
756 fb_info->var.green.offset,
757 fb_info->var.blue.offset);
760 adf_fbdev_supported_format = DRM_FORMAT_BGR565;
762 pr_err("The fbdev device detected uses an unsupported bpp (%u).\n",
763 fb_info->var.bits_per_pixel);
767 #if defined(CONFIG_ARCH_MT8173)
768 /* Workaround for broken framebuffer driver. The wrong pixel format
769 * is reported to this module. It is always really RGBA8888.
771 adf_fbdev_supported_format = DRM_FORMAT_RGBA8888;
774 if (!try_module_get(fb_info->fbops->owner)) {
775 pr_err("try_module_get() failed");
779 if (fb_info->fbops->fb_open &&
780 fb_info->fbops->fb_open(fb_info, 0) != 0) {
781 pr_err("fb_open() failed");
785 if (!adf_fbdev_flip_possible(fb_info)) {
786 pr_err("Flipping must be supported for ADF. Aborting.\n");
790 err = adf_device_init(&dev_data.device.base, fb_info->dev,
791 &adf_fbdev_device_ops, "fbdev");
793 pr_err("adf_device_init failed (%d)", err);
797 dev_data.device.fb_info = fb_info;
799 err = adf_interface_init(&dev_data.interface.base,
800 &dev_data.device.base,
801 ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY,
802 &adf_fbdev_interface_ops, "fbdev_interface");
804 pr_err("adf_interface_init failed (%d)", err);
805 goto err_device_destroy;
808 spin_lock_init(&dev_data.interface.alloc_lock);
809 dev_data.interface.fb_info = fb_info;
811 /* If the fbdev mode looks viable, try to inherit from it */
813 adf_modeinfo_from_fb_videomode(fb_info->mode, mode);
815 /* Framebuffer drivers aren't always very good at filling out their
816 * mode information, so fake up anything that's missing so we don't
817 * need to accommodate it in userspace.
821 mode->hdisplay = fb_info->var.xres;
823 mode->vdisplay = fb_info->var.yres;
825 mode->vrefresh = FALLBACK_REFRESH_RATE;
827 if (fb_info->var.width > 0 && fb_info->var.width < 1000) {
828 dev_data.interface.width_mm = fb_info->var.width;
830 dev_data.interface.width_mm = (fb_info->var.xres * 25400) /
831 (FALLBACK_DPI * 1000);
834 if (fb_info->var.height > 0 && fb_info->var.height < 1000) {
835 dev_data.interface.height_mm = fb_info->var.height;
837 dev_data.interface.height_mm = (fb_info->var.yres * 25400) /
838 (FALLBACK_DPI * 1000);
841 err = adf_hotplug_notify_connected(&dev_data.interface.base, mode, 1);
843 pr_err("adf_hotplug_notify_connected failed (%d)", err);
844 goto err_interface_destroy;
847 /* This doesn't really set the mode, it just updates current_mode */
848 err = adf_interface_set_mode(&dev_data.interface.base, mode);
850 pr_err("adf_interface_set_mode failed (%d)", err);
851 goto err_interface_destroy;
854 err = adf_overlay_engine_init(&dev_data.engine, &dev_data.device.base,
855 &adf_fbdev_overlay_engine_ops,
856 "fbdev_overlay_engine");
858 pr_err("adf_overlay_engine_init failed (%d)", err);
859 goto err_interface_destroy;
862 err = adf_attachment_allow(&dev_data.device.base,
864 &dev_data.interface.base);
867 pr_err("adf_attachment_allow failed (%d)", err);
868 goto err_overlay_engine_destroy;
871 adf_format_str(adf_fbdev_supported_format, format_str);
872 pr_info("Found usable fbdev device (%s):\n"
873 "range (physical) = 0x%lx-0x%lx\n"
874 "range (virtual) = %p-%p\n"
875 "size (bytes) = 0x%x\n"
876 "xres x yres = %ux%u\n"
877 "xres x yres (v) = %ux%u\n"
878 "physical (mm) = %ux%u\n"
879 "refresh (Hz) = %u\n"
880 "drm fourcc = %s (0x%x)\n",
882 fb_info->fix.smem_start,
883 fb_info->fix.smem_start + fb_info->fix.smem_len,
884 fb_info->screen_base,
885 fb_info->screen_base + fb_info->screen_size,
886 fb_info->fix.smem_len,
887 mode->hdisplay, mode->vdisplay,
888 fb_info->var.xres_virtual, fb_info->var.yres_virtual,
889 dev_data.interface.width_mm, dev_data.interface.height_mm,
891 format_str, adf_fbdev_supported_format);
895 unlock_fb_info(fb_info);
898 err_overlay_engine_destroy:
899 adf_overlay_engine_destroy(&dev_data.engine);
900 err_interface_destroy:
901 adf_interface_destroy(&dev_data.interface.base);
903 adf_device_destroy(&dev_data.device.base);
905 if (fb_info->fbops->fb_release)
906 fb_info->fbops->fb_release(fb_info, 0);
908 module_put(fb_info->fbops->owner);
912 static void __exit exit_adf_fbdev(void)
914 struct fb_info *fb_info = dev_data.device.fb_info;
916 if (!lock_fb_info(fb_info)) {
917 pr_err("Failed to lock fb_info.\n");
923 adf_overlay_engine_destroy(&dev_data.engine);
924 adf_interface_destroy(&dev_data.interface.base);
925 adf_device_destroy(&dev_data.device.base);
927 if (fb_info->fbops->fb_release)
928 fb_info->fbops->fb_release(fb_info, 0);
930 module_put(fb_info->fbops->owner);
933 unlock_fb_info(fb_info);
936 module_init(init_adf_fbdev);
937 module_exit(exit_adf_fbdev);