RK3368 GPU: Rogue N Init.
[firefly-linux-kernel-4.4.55.git] / drivers / staging / imgtec / adf_fbdev.c
1 /* -*- mode: c; indent-tabs-mode: t; c-basic-offset: 8; tab-width: 8 -*- */
2 /* vi: set ts=8 sw=8 sts=8: */
3 /*************************************************************************/ /*!
4 @File
5 @Codingstyle    LinuxKernel
6 @Copyright      Copyright (c) Imagination Technologies Ltd. All Rights Reserved
7 @License        Dual MIT/GPLv2
8
9 The contents of this file are subject to the MIT license as set out below.
10
11 Permission is hereby granted, free of charge, to any person obtaining a copy
12 of this software and associated documentation files (the "Software"), to deal
13 in the Software without restriction, including without limitation the rights
14 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
15 copies of the Software, and to permit persons to whom the Software is
16 furnished to do so, subject to the following conditions:
17
18 The above copyright notice and this permission notice shall be included in
19 all copies or substantial portions of the Software.
20
21 Alternatively, the contents of this file may be used under the terms of
22 the GNU General Public License Version 2 ("GPL") in which case the provisions
23 of GPL are applicable instead of those above.
24
25 If you wish to allow use of your version of this file only under the terms of
26 GPL, and not to allow others to use your version of this file under the terms
27 of the MIT license, indicate your decision by deleting the provisions above
28 and replace them with the notice and other provisions required by GPL as set
29 out in the file called "GPL-COPYING" included in this distribution. If you do
30 not delete the provisions above, a recipient may use your version of this file
31 under the terms of either the MIT license or GPL.
32
33 This License is also included in this distribution in the file called
34 "MIT-COPYING".
35
36 EXCEPT AS OTHERWISE STATED IN A NEGOTIATED AGREEMENT: (A) THE SOFTWARE IS
37 PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING
38 BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
39 PURPOSE AND NONINFRINGEMENT; AND (B) IN NO EVENT SHALL THE AUTHORS OR
40 COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
41 IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
42 CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 */ /**************************************************************************/
44
45 #include <linux/version.h>
46 #include <linux/console.h>
47 #include <linux/dma-buf.h>
48 #include <linux/uaccess.h>
49 #include <linux/module.h>
50 #include <linux/fb.h>
51
52 #include <drm/drm_fourcc.h>
53
54 #include <video/adf.h>
55 #include <video/adf_fbdev.h>
56 #include <video/adf_client.h>
57
58 #include <adf/adf_ext.h>
59
60 /* for sync_fence_put */
61 #include PVR_ANDROID_SYNC_HEADER
62
63 #include "adf_common.h"
64
65 #ifndef CONFIG_FB
66 #error adf_fbdev needs Linux framebuffer support. Enable it in your kernel.
67 #endif
68
69 MODULE_AUTHOR("Imagination Technologies Ltd. <gpl-support@imgtec.com>");
70 MODULE_LICENSE("Dual MIT/GPL");
71
72 /* NOTE: This is just an example of how to use adf. You should NOT use this
73  *       module in a production environment. It is meaningless to layer adf
74  *       on top of fbdev, as adf is more flexible than fbdev and adf itself
75  *       provides fbdev emulation. Do not use this implementation generally!
76  */
77
78 #define DRVNAME "adf_fbdev"
79
80 #define FALLBACK_REFRESH_RATE   60
81 #define FALLBACK_DPI            160
82
83 #if defined(ADF_FBDEV_NUM_PREFERRED_BUFFERS)
84 #define NUM_PREFERRED_BUFFERS   ADF_FBDEV_NUM_PREFERRED_BUFFERS
85 #else
86 #define NUM_PREFERRED_BUFFERS   3
87 #endif
88
89 struct adf_fbdev_dmabuf {
90         struct sg_table sg_table;
91         size_t offset;
92         size_t length;
93         void *vaddr;
94
95         /* Used for cleanup of dmabuf private data */
96         spinlock_t *alloc_lock;
97         u8 *alloc_mask;
98         u8 id;
99 };
100
101 struct adf_fbdev_device {
102         struct adf_device base;
103         struct fb_info *fb_info;
104         atomic_t refcount;
105 };
106
107 struct adf_fbdev_interface {
108         struct adf_interface base;
109         struct drm_mode_modeinfo fb_mode;
110         u16 width_mm, height_mm;
111         struct fb_info *fb_info;
112         spinlock_t alloc_lock;
113         u8 alloc_mask;
114 };
115
116 /* SIMPLE BUFFER MANAGER *****************************************************/
117
118 /* Handle alloc/free from the fbdev carveout (fix.smem_start -> fix.smem_size)
119  * region. This simple allocator sets a bit in the alloc_mask when a buffer is
120  * owned by dmabuf. When the dmabuf ->release() is called, the alloc_mask bit
121  * is cleared and the adf_fbdev_dmabuf object is freed.
122  *
123  * Since dmabuf relies on sg_table/scatterlists, and hence struct page*, this
124  * code may have problems if your framebuffer uses memory that is not in the
125  * kernel's page tables.
126  */
127
128 static struct adf_fbdev_dmabuf *
129 adf_fbdev_alloc_buffer(struct adf_fbdev_interface *interface)
130 {
131         struct adf_fbdev_dmabuf *fbdev_dmabuf;
132         struct scatterlist *sg;
133         size_t unitary_size;
134         struct page *page;
135         u32 offset = 0;
136         int i, err;
137         u32 id;
138
139         spin_lock(&interface->alloc_lock);
140
141         for (id = 0; id < NUM_PREFERRED_BUFFERS; id++) {
142                 if (!(interface->alloc_mask & (1UL << id))) {
143                         interface->alloc_mask |= (1UL << id);
144                         break;
145                 }
146         }
147
148         spin_unlock(&interface->alloc_lock);
149
150         if (id == NUM_PREFERRED_BUFFERS)
151                 return ERR_PTR(-ENOMEM);
152
153         unitary_size = interface->fb_info->fix.line_length *
154                        interface->fb_info->var.yres;
155
156         /* PAGE_SIZE alignment has been checked already, do NOT allow it
157          * through here. We are about to allocate an sg_list.
158          */
159         BUG_ON((unitary_size % PAGE_SIZE) != 0);
160
161         fbdev_dmabuf = kmalloc(sizeof(*fbdev_dmabuf), GFP_KERNEL);
162         if (!fbdev_dmabuf)
163                 return ERR_PTR(-ENOMEM);
164
165         err = sg_alloc_table(&fbdev_dmabuf->sg_table, unitary_size / PAGE_SIZE,
166                              GFP_KERNEL);
167         if (err) {
168                 kfree(fbdev_dmabuf);
169                 return ERR_PTR(err);
170         }
171
172         /* Increment the reference count of this module as long as the
173          * adb_fbdev_dmabuf object exists. This prevents this module from
174          * being unloaded if the buffer is passed around by dmabuf.
175          */
176         if (!try_module_get(THIS_MODULE)) {
177                 pr_err("try_module_get(THIS_MODULE) failed");
178                 kfree(fbdev_dmabuf);
179                 return ERR_PTR(-EFAULT);
180         }
181
182         fbdev_dmabuf->offset = id * unitary_size;
183         fbdev_dmabuf->length = unitary_size;
184         fbdev_dmabuf->vaddr  = interface->fb_info->screen_base +
185                                fbdev_dmabuf->offset;
186
187         for_each_sg(fbdev_dmabuf->sg_table.sgl, sg,
188                     fbdev_dmabuf->sg_table.nents, i) {
189                 page = vmalloc_to_page(fbdev_dmabuf->vaddr + offset);
190                 if (!page) {
191                         pr_err("Failed to map fbdev vaddr to pages\n");
192                         kfree(fbdev_dmabuf);
193                         return ERR_PTR(-EFAULT);
194                 }
195                 sg_set_page(sg, page, PAGE_SIZE, 0);
196                 offset += PAGE_SIZE;
197
198                 /* Shadow what ion is doing currently to ensure sg_dma_address()
199                  * is valid. This is not strictly correct as the dma address
200                  * should only be valid after mapping (ownership changed), and
201                  * we haven't mapped the scatter list yet.
202                  */
203                 sg_dma_address(sg) = sg_phys(sg);
204         }
205
206         fbdev_dmabuf->alloc_mask = &interface->alloc_mask;
207         fbdev_dmabuf->alloc_lock = &interface->alloc_lock;
208         fbdev_dmabuf->id         = id;
209
210         return fbdev_dmabuf;
211 }
212
213 static void adf_fbdev_free_buffer(struct adf_fbdev_dmabuf *fbdev_dmabuf)
214 {
215         unsigned long flags;
216
217         spin_lock_irqsave(fbdev_dmabuf->alloc_lock, flags);
218         (*fbdev_dmabuf->alloc_mask) &= ~(1UL << fbdev_dmabuf->id);
219         spin_unlock_irqrestore(fbdev_dmabuf->alloc_lock, flags);
220
221         sg_free_table(&fbdev_dmabuf->sg_table);
222         kfree(fbdev_dmabuf);
223
224         module_put(THIS_MODULE);
225 }
226
227 /* DMA BUF LAYER *************************************************************/
228
229 static struct sg_table *
230 adf_fbdev_d_map_dma_buf(struct dma_buf_attachment *attachment,
231                         enum dma_data_direction direction)
232 {
233         struct adf_fbdev_dmabuf *fbdev_dmabuf = attachment->dmabuf->priv;
234
235         return &fbdev_dmabuf->sg_table;
236 }
237
238 static void adf_fbdev_d_unmap_dma_buf(struct dma_buf_attachment *attachment,
239                                       struct sg_table *table,
240                                       enum dma_data_direction direction)
241 {
242         /* No-op */
243 }
244
245 static int adf_fbdev_d_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
246 {
247         struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
248         unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
249         unsigned long addr = vma->vm_start;
250         unsigned long remainder, len;
251         struct scatterlist *sg;
252         struct page *page;
253         u32 i;
254
255         for_each_sg(fbdev_dmabuf->sg_table.sgl, sg,
256                     fbdev_dmabuf->sg_table.nents, i) {
257                 page = sg_page(sg);
258                 if (!page) {
259                         pr_err("Failed to retrieve pages\n");
260                         return -EFAULT;
261                 }
262                 remainder = vma->vm_end - addr;
263                 len = sg_dma_len(sg);
264                 if (offset >= sg_dma_len(sg)) {
265                         offset -= sg_dma_len(sg);
266                         continue;
267                 } else if (offset) {
268                         page += offset / PAGE_SIZE;
269                         len = sg_dma_len(sg) - offset;
270                         offset = 0;
271                 }
272                 len = min(len, remainder);
273                 remap_pfn_range(vma, addr, page_to_pfn(page), len,
274                                 vma->vm_page_prot);
275                 addr += len;
276                 if (addr >= vma->vm_end)
277                         return 0;
278         }
279
280         return 0;
281 }
282
283 static void adf_fbdev_d_release(struct dma_buf *dmabuf)
284 {
285         adf_fbdev_free_buffer(dmabuf->priv);
286 }
287
288 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
289     !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
290
291 static int
292 adf_fbdev_d_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
293                              enum dma_data_direction dir)
294 {
295         struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
296
297         if (start + len > fbdev_dmabuf->length)
298                 return -EINVAL;
299         return 0;
300 }
301
302 static void adf_fbdev_d_end_cpu_access(struct dma_buf *dmabuf, size_t start,
303                                        size_t len, enum dma_data_direction dir)
304 {
305         /* Framebuffer memory is cache coherent. No-op. */
306 }
307
308 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) &&
309           !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318) */
310
311 static void *
312 adf_fbdev_d_kmap(struct dma_buf *dmabuf, unsigned long page_offset)
313 {
314         struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
315         void *vaddr;
316
317         if (page_offset * PAGE_SIZE >= fbdev_dmabuf->length)
318                 return ERR_PTR(-EINVAL);
319         vaddr = fbdev_dmabuf->vaddr + page_offset * PAGE_SIZE;
320         return vaddr;
321 }
322
323 static void
324 adf_fbdev_d_kunmap(struct dma_buf *dmabuf, unsigned long page_offset,
325                    void *ptr)
326 {
327         /* No-op */
328 }
329
330 static void *adf_fbdev_d_vmap(struct dma_buf *dmabuf)
331 {
332         struct adf_fbdev_dmabuf *fbdev_dmabuf = dmabuf->priv;
333
334         return fbdev_dmabuf->vaddr;
335 }
336
337 static void adf_fbdev_d_vunmap(struct dma_buf *dmabuf, void *vaddr)
338 {
339         /* No-op */
340 }
341
342 static const struct dma_buf_ops adf_fbdev_dma_buf_ops = {
343         .map_dma_buf            = adf_fbdev_d_map_dma_buf,
344         .unmap_dma_buf          = adf_fbdev_d_unmap_dma_buf,
345         .mmap                   = adf_fbdev_d_mmap,
346         .release                = adf_fbdev_d_release,
347 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)) && \
348     !defined(CHROMIUMOS_WORKAROUNDS_KERNEL318)
349         .begin_cpu_access       = adf_fbdev_d_begin_cpu_access,
350         .end_cpu_access         = adf_fbdev_d_end_cpu_access,
351 #endif
352         .kmap_atomic            = adf_fbdev_d_kmap,
353         .kunmap_atomic          = adf_fbdev_d_kunmap,
354         .kmap                   = adf_fbdev_d_kmap,
355         .kunmap                 = adf_fbdev_d_kunmap,
356         .vmap                   = adf_fbdev_d_vmap,
357         .vunmap                 = adf_fbdev_d_vunmap,
358 };
359
360 /* ADF LAYER *****************************************************************/
361
362 static u32 adf_fbdev_supported_format;
363
364 static int adf_fbdev_validate(struct adf_device *dev, struct adf_post *cfg,
365                               void **driver_state)
366 {
367         int err = adf_img_validate_simple(dev, cfg, driver_state);
368
369         if (cfg->n_bufs == 0 || err != 0)
370                 return err;
371
372         /* Everything checked out in the generic validation, but we
373          * additionally want to check that the dmabuf came from the
374          * adf_fbdev module, which the generic code can't check.
375          */
376         if (cfg->bufs[0].dma_bufs[0]->ops != &adf_fbdev_dma_buf_ops)
377                 return -EINVAL;
378
379         return 0;
380 }
381
382 static void adf_fbdev_post(struct adf_device *dev, struct adf_post *cfg,
383                            void *driver_state)
384 {
385         struct adf_fbdev_device *device = (struct adf_fbdev_device *)dev;
386         struct fb_var_screeninfo new_var = device->fb_info->var;
387         struct adf_fbdev_dmabuf *fbdev_dmabuf;
388         struct adf_buffer *buffer;
389         int err;
390
391         /* "Null" flip handling */
392         if (cfg->n_bufs == 0)
393                 return;
394
395         if (!lock_fb_info(device->fb_info)) {
396                 pr_err("Failed to lock fb_info structure.\n");
397                 return;
398         }
399
400         console_lock();
401
402         buffer = &cfg->bufs[0];
403         fbdev_dmabuf = buffer->dma_bufs[0]->priv;
404         new_var.yoffset = new_var.yres * fbdev_dmabuf->id;
405
406         /* If we're supposed to be able to flip, but the yres_virtual has been
407          * changed to an unsupported (smaller) value, we need to change it back
408          * (this is a workaround for some Linux fbdev drivers that seem to lose
409          * any modifications to yres_virtual after a blank.)
410          */
411         if (new_var.yres_virtual < new_var.yres * NUM_PREFERRED_BUFFERS) {
412                 new_var.activate = FB_ACTIVATE_NOW;
413                 new_var.yres_virtual = new_var.yres * NUM_PREFERRED_BUFFERS;
414
415                 err = fb_set_var(device->fb_info, &new_var);
416                 if (err)
417                         pr_err("fb_set_var failed (err=%d)\n", err);
418         } else {
419                 err = fb_pan_display(device->fb_info, &new_var);
420                 if (err)
421                         pr_err("fb_pan_display failed (err=%d)\n", err);
422         }
423
424         console_unlock();
425
426         unlock_fb_info(device->fb_info);
427 }
428
429 static int
430 adf_fbdev_open2(struct adf_obj *obj, struct inode *inode, struct file *file)
431 {
432         struct adf_fbdev_device *dev =
433                 (struct adf_fbdev_device *)obj->parent;
434         atomic_inc(&dev->refcount);
435         return 0;
436 }
437
438 static void
439 adf_fbdev_release2(struct adf_obj *obj, struct inode *inode, struct file *file)
440 {
441         struct adf_fbdev_device *dev =
442                 (struct adf_fbdev_device *)obj->parent;
443         struct sync_fence *release_fence;
444
445         if (atomic_dec_return(&dev->refcount))
446                 return;
447
448         /* This special "null" flip works around a problem with ADF
449          * which leaves buffers pinned by the display engine even
450          * after all ADF clients have closed.
451          *
452          * The "null" flip is pipelined like any other. The user won't
453          * be able to unload this module until it has been posted.
454          */
455         release_fence = adf_device_post(&dev->base, NULL, 0, NULL, 0, NULL, 0);
456         if (IS_ERR_OR_NULL(release_fence)) {
457                 pr_err("Failed to queue null flip command (err=%d).\n",
458                        (int)PTR_ERR(release_fence));
459                 return;
460         }
461
462         sync_fence_put(release_fence);
463 }
464
465 static const struct adf_device_ops adf_fbdev_device_ops = {
466         .owner                  = THIS_MODULE,
467         .base = {
468                 .open           = adf_fbdev_open2,
469                 .release        = adf_fbdev_release2,
470                 .ioctl          = adf_img_ioctl,
471         },
472         .validate               = adf_fbdev_validate,
473         .post                   = adf_fbdev_post,
474 };
475
476 static bool
477 adf_fbdev_supports_event(struct adf_obj *obj, enum adf_event_type type)
478 {
479         switch (type) {
480         case ADF_EVENT_VSYNC:
481         case ADF_EVENT_HOTPLUG:
482                 return true;
483         default:
484                 return false;
485         }
486 }
487
488 static void
489 adf_fbdev_set_event(struct adf_obj *obj, enum adf_event_type type,
490                     bool enabled)
491 {
492         switch (type) {
493         case ADF_EVENT_VSYNC:
494         case ADF_EVENT_HOTPLUG:
495                 break;
496         default:
497                 BUG();
498         }
499 }
500
501 static int adf_fbdev_blank2(struct adf_interface *intf, u8 state)
502 {
503         struct adf_fbdev_interface *interface =
504                 (struct adf_fbdev_interface *)intf;
505         struct fb_info *fb_info = interface->fb_info;
506
507         if (!fb_info->fbops->fb_blank)
508                 return -EOPNOTSUPP;
509
510         return fb_info->fbops->fb_blank(state, fb_info);
511 }
512
513 static int
514 adf_fbdev_alloc_simple_buffer(struct adf_interface *intf, u16 w, u16 h,
515                               u32 format, struct dma_buf **dma_buf,
516                               u32 *offset, u32 *pitch)
517 {
518         struct adf_fbdev_interface *interface =
519                 (struct adf_fbdev_interface *)intf;
520         struct fb_var_screeninfo *var = &interface->fb_info->var;
521         struct adf_fbdev_dmabuf *fbdev_dmabuf;
522
523         if (w != var->xres) {
524                 pr_err("Simple alloc request w=%u does not match w=%u.\n",
525                        w, var->xres);
526                 return -EINVAL;
527         }
528
529         if (h != var->yres) {
530                 pr_err("Simple alloc request h=%u does not match h=%u.\n",
531                        h, var->yres);
532                 return -EINVAL;
533         }
534
535         if (format != adf_fbdev_supported_format) {
536                 pr_err("Simple alloc request f=0x%x does not match f=0x%x.\n",
537                        format, adf_fbdev_supported_format);
538                 return -EINVAL;
539         }
540
541         fbdev_dmabuf = adf_fbdev_alloc_buffer(interface);
542         if (IS_ERR_OR_NULL(fbdev_dmabuf))
543                 return PTR_ERR(fbdev_dmabuf);
544
545 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0))
546         {
547                 DEFINE_DMA_BUF_EXPORT_INFO(export_info);
548
549                 export_info.ops = &adf_fbdev_dma_buf_ops;
550                 export_info.size = fbdev_dmabuf->length;
551                 export_info.flags = O_RDWR;
552                 export_info.priv = fbdev_dmabuf;
553
554                 *dma_buf = dma_buf_export(&export_info);
555         }
556 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0))
557         *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
558                                   fbdev_dmabuf->length, O_RDWR, NULL);
559 #else
560         *dma_buf = dma_buf_export(fbdev_dmabuf, &adf_fbdev_dma_buf_ops,
561                                   fbdev_dmabuf->length, O_RDWR);
562 #endif
563         if (IS_ERR(*dma_buf)) {
564                 adf_fbdev_free_buffer(fbdev_dmabuf);
565                 return PTR_ERR(*dma_buf);
566         }
567
568         *pitch = interface->fb_info->fix.line_length;
569         *offset = 0;
570         return 0;
571 }
572
573 static int
574 adf_fbdev_screen_size(struct adf_interface *intf, u16 *width_mm,
575                       u16 *height_mm)
576 {
577         struct adf_fbdev_interface *interface =
578                 (struct adf_fbdev_interface *)intf;
579         *width_mm  = interface->width_mm;
580         *height_mm = interface->height_mm;
581         return 0;
582 }
583
584 static int adf_fbdev_modeset(struct adf_interface *intf,
585                              struct drm_mode_modeinfo *mode)
586 {
587         struct adf_fbdev_interface *interface =
588                 (struct adf_fbdev_interface *)intf;
589         return mode == &interface->fb_mode ? 0 : -EINVAL;
590 }
591
592 static const struct adf_interface_ops adf_fbdev_interface_ops = {
593         .base = {
594                 .supports_event = adf_fbdev_supports_event,
595                 .set_event      = adf_fbdev_set_event,
596         },
597         .blank                  = adf_fbdev_blank2,
598         .alloc_simple_buffer    = adf_fbdev_alloc_simple_buffer,
599         .screen_size            = adf_fbdev_screen_size,
600         .modeset                = adf_fbdev_modeset,
601 };
602
603 struct adf_overlay_engine_ops adf_fbdev_overlay_engine_ops = {
604         .supported_formats      = &adf_fbdev_supported_format,
605         .n_supported_formats    = 1,
606 };
607
608 /* If we can flip, we need to make sure we have the memory to do so.
609  *
610  * We'll assume that the fbdev device provides extra space in
611  * yres_virtual for panning; xres_virtual is theoretically supported,
612  * but it involves more work.
613  *
614  * If the fbdev device doesn't have yres_virtual > yres, we'll try
615  * requesting it before bailing. Userspace applications commonly do
616  * this with an FBIOPUT_VSCREENINFO ioctl().
617  *
618  * Another problem is with a limitation in PowerVR services -- it
619  * needs framebuffers to be page aligned (this is a SW limitation,
620  * the HW can support non-page-aligned buffers). So we have to
621  * check that stride * height for a single buffer is page aligned.
622  */
623 static bool adf_fbdev_flip_possible(struct fb_info *fb_info)
624 {
625         struct fb_var_screeninfo var = fb_info->var;
626         int err;
627
628         if (!fb_info->fix.xpanstep && !fb_info->fix.ypanstep &&
629             !fb_info->fix.ywrapstep) {
630                 pr_err("The fbdev device detected does not support ypan/ywrap.\n");
631                 return false;
632         }
633
634         if ((fb_info->fix.line_length * var.yres) % PAGE_SIZE != 0) {
635                 pr_err("Line length (in bytes) x yres is not a multiple of page size.\n");
636                 return false;
637         }
638
639         /* We might already have enough space */
640         if (var.yres * NUM_PREFERRED_BUFFERS <= var.yres_virtual)
641                 return true;
642
643         pr_err("No buffer space for flipping; asking for more.\n");
644
645         var.activate = FB_ACTIVATE_NOW;
646         var.yres_virtual = var.yres * NUM_PREFERRED_BUFFERS;
647
648         err = fb_set_var(fb_info, &var);
649         if (err) {
650                 pr_err("fb_set_var failed (err=%d).\n", err);
651                 return false;
652         }
653
654         if (var.yres * NUM_PREFERRED_BUFFERS > var.yres_virtual) {
655                 pr_err("Failed to obtain additional buffer space.\n");
656                 return false;
657         }
658
659         /* Some fbdev drivers allow the yres_virtual modification through,
660          * but don't actually update the fix. We need the fix to be updated
661          * and more memory allocated, so we can actually take advantage of
662          * the increased yres_virtual.
663          */
664         if (fb_info->fix.smem_len < fb_info->fix.line_length *
665                                     var.yres_virtual) {
666                 pr_err("'fix' not re-allocated with sufficient buffer space.\n");
667                 pr_err("Check NUM_PREFERRED_BUFFERS (%u) is as intended.\n",
668                        NUM_PREFERRED_BUFFERS);
669                 return false;
670         }
671
672         return true;
673 }
674
675 /* Could use devres here? */
676 static struct {
677         struct adf_fbdev_device         device;
678         struct adf_fbdev_interface      interface;
679         struct adf_overlay_engine       engine;
680 } dev_data;
681
682 static int __init init_adf_fbdev(void)
683 {
684         struct drm_mode_modeinfo *mode = &dev_data.interface.fb_mode;
685         char format_str[ADF_FORMAT_STR_SIZE];
686         struct fb_info *fb_info;
687         int err = -ENODEV;
688
689         fb_info = registered_fb[0];
690         if (!fb_info) {
691                 pr_err("No Linux framebuffer (fbdev) device is registered!\n");
692                 pr_err("Check you have a framebuffer driver compiled into your kernel\n");
693                 pr_err("and that it is enabled on the cmdline.\n");
694                 goto err_out;
695         }
696
697         if (!lock_fb_info(fb_info))
698                 goto err_out;
699
700         console_lock();
701
702         /* Filter out broken FB devices */
703         if (!fb_info->fix.smem_len || !fb_info->fix.line_length) {
704                 pr_err("The fbdev device detected had a zero smem_len or line_length,\n");
705                 pr_err("which suggests it is a broken driver.\n");
706                 goto err_unlock;
707         }
708
709         if (fb_info->fix.type != FB_TYPE_PACKED_PIXELS ||
710             fb_info->fix.visual != FB_VISUAL_TRUECOLOR) {
711                 pr_err("The fbdev device detected is not truecolor with packed pixels.\n");
712                 goto err_unlock;
713         }
714
715         if (fb_info->var.bits_per_pixel == 32) {
716                 if (fb_info->var.red.length   == 8  ||
717                     fb_info->var.green.length == 8  ||
718                     fb_info->var.blue.length  == 8  ||
719                     fb_info->var.red.offset   == 16 ||
720                     fb_info->var.green.offset == 8  ||
721                     fb_info->var.blue.offset  == 0) {
722 #if defined(ADF_FBDEV_FORCE_XRGB8888)
723                         adf_fbdev_supported_format = DRM_FORMAT_BGRX8888;
724 #else
725                         adf_fbdev_supported_format = DRM_FORMAT_BGRA8888;
726 #endif
727                 } else if (fb_info->var.red.length   == 8  ||
728                            fb_info->var.green.length == 8  ||
729                            fb_info->var.blue.length  == 8  ||
730                            fb_info->var.red.offset   == 0  ||
731                            fb_info->var.green.offset == 8  ||
732                            fb_info->var.blue.offset  == 16) {
733                         adf_fbdev_supported_format = DRM_FORMAT_RGBA8888;
734                 } else {
735                         pr_err("The fbdev device detected uses an unrecognized 32bit pixel format (%u/%u/%u, %u/%u/%u)\n",
736                                fb_info->var.red.length,
737                                fb_info->var.green.length,
738                                fb_info->var.blue.length,
739                                fb_info->var.red.offset,
740                                fb_info->var.green.offset,
741                                fb_info->var.blue.offset);
742                         goto err_unlock;
743                 }
744         } else if (fb_info->var.bits_per_pixel == 16) {
745                 if (fb_info->var.red.length   != 5  ||
746                     fb_info->var.green.length != 6  ||
747                     fb_info->var.blue.length  != 5  ||
748                     fb_info->var.red.offset   != 11 ||
749                     fb_info->var.green.offset != 5  ||
750                     fb_info->var.blue.offset  != 0) {
751                         pr_err("The fbdev device detected uses an unrecognized 16bit pixel format (%u/%u/%u, %u/%u/%u)\n",
752                                fb_info->var.red.length,
753                                fb_info->var.green.length,
754                                fb_info->var.blue.length,
755                                fb_info->var.red.offset,
756                                fb_info->var.green.offset,
757                                fb_info->var.blue.offset);
758                         goto err_unlock;
759                 }
760                 adf_fbdev_supported_format = DRM_FORMAT_BGR565;
761         } else {
762                 pr_err("The fbdev device detected uses an unsupported bpp (%u).\n",
763                        fb_info->var.bits_per_pixel);
764                 goto err_unlock;
765         }
766
767 #if defined(CONFIG_ARCH_MT8173)
768         /* Workaround for broken framebuffer driver. The wrong pixel format
769          * is reported to this module. It is always really RGBA8888.
770          */
771         adf_fbdev_supported_format = DRM_FORMAT_RGBA8888;
772 #endif
773
774         if (!try_module_get(fb_info->fbops->owner)) {
775                 pr_err("try_module_get() failed");
776                 goto err_unlock;
777         }
778
779         if (fb_info->fbops->fb_open &&
780             fb_info->fbops->fb_open(fb_info, 0) != 0) {
781                 pr_err("fb_open() failed");
782                 goto err_module_put;
783         }
784
785         if (!adf_fbdev_flip_possible(fb_info)) {
786                 pr_err("Flipping must be supported for ADF. Aborting.\n");
787                 goto err_fb_release;
788         }
789
790         err = adf_device_init(&dev_data.device.base, fb_info->dev,
791                               &adf_fbdev_device_ops, "fbdev");
792         if (err) {
793                 pr_err("adf_device_init failed (%d)", err);
794                 goto err_fb_release;
795         }
796
797         dev_data.device.fb_info = fb_info;
798
799         err = adf_interface_init(&dev_data.interface.base,
800                                  &dev_data.device.base,
801                                  ADF_INTF_DVI, 0, ADF_INTF_FLAG_PRIMARY,
802                                  &adf_fbdev_interface_ops, "fbdev_interface");
803         if (err) {
804                 pr_err("adf_interface_init failed (%d)", err);
805                 goto err_device_destroy;
806         }
807
808         spin_lock_init(&dev_data.interface.alloc_lock);
809         dev_data.interface.fb_info = fb_info;
810
811         /* If the fbdev mode looks viable, try to inherit from it */
812         if (fb_info->mode)
813                 adf_modeinfo_from_fb_videomode(fb_info->mode, mode);
814
815         /* Framebuffer drivers aren't always very good at filling out their
816          * mode information, so fake up anything that's missing so we don't
817          * need to accommodate it in userspace.
818          */
819
820         if (!mode->hdisplay)
821                 mode->hdisplay = fb_info->var.xres;
822         if (!mode->vdisplay)
823                 mode->vdisplay = fb_info->var.yres;
824         if (!mode->vrefresh)
825                 mode->vrefresh = FALLBACK_REFRESH_RATE;
826
827         if (fb_info->var.width > 0 && fb_info->var.width < 1000) {
828                 dev_data.interface.width_mm = fb_info->var.width;
829         } else {
830                 dev_data.interface.width_mm = (fb_info->var.xres * 25400) /
831                                               (FALLBACK_DPI * 1000);
832         }
833
834         if (fb_info->var.height > 0 && fb_info->var.height < 1000) {
835                 dev_data.interface.height_mm = fb_info->var.height;
836         } else {
837                 dev_data.interface.height_mm = (fb_info->var.yres * 25400) /
838                                                (FALLBACK_DPI * 1000);
839         }
840
841         err = adf_hotplug_notify_connected(&dev_data.interface.base, mode, 1);
842         if (err) {
843                 pr_err("adf_hotplug_notify_connected failed (%d)", err);
844                 goto err_interface_destroy;
845         }
846
847         /* This doesn't really set the mode, it just updates current_mode */
848         err = adf_interface_set_mode(&dev_data.interface.base, mode);
849         if (err) {
850                 pr_err("adf_interface_set_mode failed (%d)", err);
851                 goto err_interface_destroy;
852         }
853
854         err = adf_overlay_engine_init(&dev_data.engine, &dev_data.device.base,
855                                       &adf_fbdev_overlay_engine_ops,
856                                       "fbdev_overlay_engine");
857         if (err) {
858                 pr_err("adf_overlay_engine_init failed (%d)", err);
859                 goto err_interface_destroy;
860         }
861
862         err = adf_attachment_allow(&dev_data.device.base,
863                                    &dev_data.engine,
864                                    &dev_data.interface.base);
865
866         if (err) {
867                 pr_err("adf_attachment_allow failed (%d)", err);
868                 goto err_overlay_engine_destroy;
869         }
870
871         adf_format_str(adf_fbdev_supported_format, format_str);
872         pr_info("Found usable fbdev device (%s):\n"
873                 "range (physical) = 0x%lx-0x%lx\n"
874                 "range (virtual)  = %p-%p\n"
875                 "size (bytes)     = 0x%x\n"
876                 "xres x yres      = %ux%u\n"
877                 "xres x yres (v)  = %ux%u\n"
878                 "physical (mm)    = %ux%u\n"
879                 "refresh (Hz)     = %u\n"
880                 "drm fourcc       = %s (0x%x)\n",
881                 fb_info->fix.id,
882                 fb_info->fix.smem_start,
883                 fb_info->fix.smem_start + fb_info->fix.smem_len,
884                 fb_info->screen_base,
885                 fb_info->screen_base + fb_info->screen_size,
886                 fb_info->fix.smem_len,
887                 mode->hdisplay, mode->vdisplay,
888                 fb_info->var.xres_virtual, fb_info->var.yres_virtual,
889                 dev_data.interface.width_mm, dev_data.interface.height_mm,
890                 mode->vrefresh,
891                 format_str, adf_fbdev_supported_format);
892         err = 0;
893 err_unlock:
894         console_unlock();
895         unlock_fb_info(fb_info);
896 err_out:
897         return err;
898 err_overlay_engine_destroy:
899         adf_overlay_engine_destroy(&dev_data.engine);
900 err_interface_destroy:
901         adf_interface_destroy(&dev_data.interface.base);
902 err_device_destroy:
903         adf_device_destroy(&dev_data.device.base);
904 err_fb_release:
905         if (fb_info->fbops->fb_release)
906                 fb_info->fbops->fb_release(fb_info, 0);
907 err_module_put:
908         module_put(fb_info->fbops->owner);
909         goto err_unlock;
910 }
911
912 static void __exit exit_adf_fbdev(void)
913 {
914         struct fb_info *fb_info = dev_data.device.fb_info;
915
916         if (!lock_fb_info(fb_info)) {
917                 pr_err("Failed to lock fb_info.\n");
918                 return;
919         }
920
921         console_lock();
922
923         adf_overlay_engine_destroy(&dev_data.engine);
924         adf_interface_destroy(&dev_data.interface.base);
925         adf_device_destroy(&dev_data.device.base);
926
927         if (fb_info->fbops->fb_release)
928                 fb_info->fbops->fb_release(fb_info, 0);
929
930         module_put(fb_info->fbops->owner);
931
932         console_unlock();
933         unlock_fb_info(fb_info);
934 }
935
936 module_init(init_adf_fbdev);
937 module_exit(exit_adf_fbdev);