mwifiex: fix typo in PCIe adapter NULL check
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / exynos / exynos_drm_gem.c
index b130e6d1a52922515cb6307fb64579a4050e4bd1..d48183e7e056d56a8932ae06baf1bc172ce02893 100644 (file)
@@ -95,15 +95,26 @@ static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
 {
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
        struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
+       struct scatterlist *sgl;
        unsigned long pfn;
+       int i;
 
-       if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
-               if (!buf->pages)
-                       return -EINTR;
+       if (!buf->sgt)
+               return -EINTR;
 
-               pfn = page_to_pfn(buf->pages[page_offset++]);
-       } else
-               pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
+       if (page_offset >= (buf->size >> PAGE_SHIFT)) {
+               DRM_ERROR("invalid page offset\n");
+               return -EINVAL;
+       }
+
+       sgl = buf->sgt->sgl;
+       for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
+               if (page_offset < (sgl->length >> PAGE_SHIFT))
+                       break;
+               page_offset -=  (sgl->length >> PAGE_SHIFT);
+       }
+
+       pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
 
        return vm_insert_mixed(vma, f_vaddr, pfn);
 }
@@ -327,17 +338,53 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
                        &args->offset);
 }
 
+static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
+                                                       struct file *filp)
+{
+       struct drm_file *file_priv;
+
+       mutex_lock(&drm_dev->struct_mutex);
+
+       /* find current process's drm_file from filelist. */
+       list_for_each_entry(file_priv, &drm_dev->filelist, lhead) {
+               if (file_priv->filp == filp) {
+                       mutex_unlock(&drm_dev->struct_mutex);
+                       return file_priv;
+               }
+       }
+
+       mutex_unlock(&drm_dev->struct_mutex);
+       WARN_ON(1);
+
+       return ERR_PTR(-EFAULT);
+}
+
 static int exynos_drm_gem_mmap_buffer(struct file *filp,
                                      struct vm_area_struct *vma)
 {
        struct drm_gem_object *obj = filp->private_data;
        struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+       struct drm_device *drm_dev = obj->dev;
        struct exynos_drm_gem_buf *buffer;
+       struct drm_file *file_priv;
        unsigned long vm_size;
+       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
        vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+       vma->vm_private_data = obj;
+       vma->vm_ops = drm_dev->driver->gem_vm_ops;
+
+       /* restore it to driver's fops. */
+       filp->f_op = fops_get(drm_dev->driver->fops);
+
+       file_priv = exynos_drm_find_drm_file(drm_dev, filp);
+       if (IS_ERR(file_priv))
+               return PTR_ERR(file_priv);
+
+       /* restore it to drm_file. */
+       filp->private_data = file_priv;
 
        update_vm_cache_attr(exynos_gem_obj, vma);
 
@@ -353,9 +400,25 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
        if (vm_size > buffer->size)
                return -EINVAL;
 
-       return dma_mmap_attrs(obj->dev->dev, vma, buffer->kvaddr,
+       ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
                                buffer->dma_addr, buffer->size,
                                &buffer->dma_attrs);
+       if (ret < 0) {
+               DRM_ERROR("failed to mmap.\n");
+               return ret;
+       }
+
+       /*
+        * take a reference to this mapping of the object. And this reference
+        * is unreferenced by the corresponding vm_close call.
+        */
+       drm_gem_object_reference(obj);
+
+       mutex_lock(&drm_dev->struct_mutex);
+       drm_vm_open_locked(drm_dev, vma);
+       mutex_unlock(&drm_dev->struct_mutex);
+
+       return 0;
 }
 
 static const struct file_operations exynos_drm_gem_fops = {
@@ -382,16 +445,29 @@ int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       obj->filp->f_op = &exynos_drm_gem_fops;
-       obj->filp->private_data = obj;
+       /*
+        * Set specific mmper's fops. And it will be restored by
+        * exynos_drm_gem_mmap_buffer to dev->driver->fops.
+        * This is used to call specific mapper temporarily.
+        */
+       file_priv->filp->f_op = &exynos_drm_gem_fops;
+
+       /*
+        * Set gem object to private_data so that specific mmaper
+        * can get the gem object. And it will be restored by
+        * exynos_drm_gem_mmap_buffer to drm_file.
+        */
+       file_priv->filp->private_data = obj;
 
-       addr = vm_mmap(obj->filp, 0, args->size,
+       addr = vm_mmap(file_priv->filp, 0, args->size,
                        PROT_READ | PROT_WRITE, MAP_SHARED, 0);
 
        drm_gem_object_unreference_unlocked(obj);
 
-       if (IS_ERR((void *)addr))
+       if (IS_ERR((void *)addr)) {
+               file_priv->filp->private_data = file_priv;
                return PTR_ERR((void *)addr);
+       }
 
        args->mapped = addr;
 
@@ -426,6 +502,129 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
+struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
+{
+       struct vm_area_struct *vma_copy;
+
+       vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
+       if (!vma_copy)
+               return NULL;
+
+       if (vma->vm_ops && vma->vm_ops->open)
+               vma->vm_ops->open(vma);
+
+       if (vma->vm_file)
+               get_file(vma->vm_file);
+
+       memcpy(vma_copy, vma, sizeof(*vma));
+
+       vma_copy->vm_mm = NULL;
+       vma_copy->vm_next = NULL;
+       vma_copy->vm_prev = NULL;
+
+       return vma_copy;
+}
+
+void exynos_gem_put_vma(struct vm_area_struct *vma)
+{
+       if (!vma)
+               return;
+
+       if (vma->vm_ops && vma->vm_ops->close)
+               vma->vm_ops->close(vma);
+
+       if (vma->vm_file)
+               fput(vma->vm_file);
+
+       kfree(vma);
+}
+
+int exynos_gem_get_pages_from_userptr(unsigned long start,
+                                               unsigned int npages,
+                                               struct page **pages,
+                                               struct vm_area_struct *vma)
+{
+       int get_npages;
+
+       /* the memory region mmaped with VM_PFNMAP. */
+       if (vma_is_io(vma)) {
+               unsigned int i;
+
+               for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
+                       unsigned long pfn;
+                       int ret = follow_pfn(vma, start, &pfn);
+                       if (ret)
+                               return ret;
+
+                       pages[i] = pfn_to_page(pfn);
+               }
+
+               if (i != npages) {
+                       DRM_ERROR("failed to get user_pages.\n");
+                       return -EINVAL;
+               }
+
+               return 0;
+       }
+
+       get_npages = get_user_pages(current, current->mm, start,
+                                       npages, 1, 1, pages, NULL);
+       get_npages = max(get_npages, 0);
+       if (get_npages != npages) {
+               DRM_ERROR("failed to get user_pages.\n");
+               while (get_npages)
+                       put_page(pages[--get_npages]);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+void exynos_gem_put_pages_to_userptr(struct page **pages,
+                                       unsigned int npages,
+                                       struct vm_area_struct *vma)
+{
+       if (!vma_is_io(vma)) {
+               unsigned int i;
+
+               for (i = 0; i < npages; i++) {
+                       set_page_dirty_lock(pages[i]);
+
+                       /*
+                        * undo the reference we took when populating
+                        * the table.
+                        */
+                       put_page(pages[i]);
+               }
+       }
+}
+
+int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir)
+{
+       int nents;
+
+       mutex_lock(&drm_dev->struct_mutex);
+
+       nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+       if (!nents) {
+               DRM_ERROR("failed to map sgl with dma.\n");
+               mutex_unlock(&drm_dev->struct_mutex);
+               return nents;
+       }
+
+       mutex_unlock(&drm_dev->struct_mutex);
+       return 0;
+}
+
+void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
+                               struct sg_table *sgt,
+                               enum dma_data_direction dir)
+{
+       dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
+}
+
 int exynos_drm_gem_init_object(struct drm_gem_object *obj)
 {
        DRM_DEBUG_KMS("%s\n", __FILE__);