Merge tag 'drm-intel-next-2014-09-05' of git://anongit.freedesktop.org/drm-intel...
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <drm/drmP.h>
37 #include <linux/export.h>
38 #include <linux/seq_file.h>
39 #if defined(__ia64__)
40 #include <linux/efi.h>
41 #include <linux/slab.h>
42 #endif
43 #include <asm/pgtable.h>
44 #include "drm_legacy.h"
45
46 struct drm_vma_entry {
47         struct list_head head;
48         struct vm_area_struct *vma;
49         pid_t pid;
50 };
51
52 static void drm_vm_open(struct vm_area_struct *vma);
53 static void drm_vm_close(struct vm_area_struct *vma);
54
55 static pgprot_t drm_io_prot(struct drm_local_map *map,
56                             struct vm_area_struct *vma)
57 {
58         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
59
60 #if defined(__i386__) || defined(__x86_64__)
61         if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
62                 tmp = pgprot_noncached(tmp);
63         else
64                 tmp = pgprot_writecombine(tmp);
65 #elif defined(__powerpc__)
66         pgprot_val(tmp) |= _PAGE_NO_CACHE;
67         if (map->type == _DRM_REGISTERS)
68                 pgprot_val(tmp) |= _PAGE_GUARDED;
69 #elif defined(__ia64__)
70         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
71                                     vma->vm_start))
72                 tmp = pgprot_writecombine(tmp);
73         else
74                 tmp = pgprot_noncached(tmp);
75 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
76         tmp = pgprot_noncached(tmp);
77 #endif
78         return tmp;
79 }
80
81 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
82 {
83         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
84
85 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
86         tmp |= _PAGE_NO_CACHE;
87 #endif
88         return tmp;
89 }
90
91 /**
92  * \c fault method for AGP virtual memory.
93  *
94  * \param vma virtual memory area.
95  * \param address access address.
96  * \return pointer to the page structure.
97  *
98  * Find the right map and if it's AGP memory find the real physical page to
99  * map, get the page, increment the use count and return it.
100  */
101 #if __OS_HAS_AGP
102 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
103 {
104         struct drm_file *priv = vma->vm_file->private_data;
105         struct drm_device *dev = priv->minor->dev;
106         struct drm_local_map *map = NULL;
107         struct drm_map_list *r_list;
108         struct drm_hash_item *hash;
109
110         /*
111          * Find the right map
112          */
113         if (!dev->agp)
114                 goto vm_fault_error;
115
116         if (!dev->agp || !dev->agp->cant_use_aperture)
117                 goto vm_fault_error;
118
119         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
120                 goto vm_fault_error;
121
122         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
123         map = r_list->map;
124
125         if (map && map->type == _DRM_AGP) {
126                 /*
127                  * Using vm_pgoff as a selector forces us to use this unusual
128                  * addressing scheme.
129                  */
130                 resource_size_t offset = (unsigned long)vmf->virtual_address -
131                         vma->vm_start;
132                 resource_size_t baddr = map->offset + offset;
133                 struct drm_agp_mem *agpmem;
134                 struct page *page;
135
136 #ifdef __alpha__
137                 /*
138                  * Adjust to a bus-relative address
139                  */
140                 baddr -= dev->hose->mem_space->start;
141 #endif
142
143                 /*
144                  * It's AGP memory - find the real physical page to map
145                  */
146                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
147                         if (agpmem->bound <= baddr &&
148                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
149                                 break;
150                 }
151
152                 if (&agpmem->head == &dev->agp->memory)
153                         goto vm_fault_error;
154
155                 /*
156                  * Get the page, inc the use count, and return it
157                  */
158                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
159                 page = agpmem->memory->pages[offset];
160                 get_page(page);
161                 vmf->page = page;
162
163                 DRM_DEBUG
164                     ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
165                      (unsigned long long)baddr,
166                      agpmem->memory->pages[offset],
167                      (unsigned long long)offset,
168                      page_count(page));
169                 return 0;
170         }
171 vm_fault_error:
172         return VM_FAULT_SIGBUS; /* Disallow mremap */
173 }
174 #else                           /* __OS_HAS_AGP */
175 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
176 {
177         return VM_FAULT_SIGBUS;
178 }
179 #endif                          /* __OS_HAS_AGP */
180
181 /**
182  * \c nopage method for shared virtual memory.
183  *
184  * \param vma virtual memory area.
185  * \param address access address.
186  * \return pointer to the page structure.
187  *
188  * Get the mapping, find the real physical page to map, get the page, and
189  * return it.
190  */
191 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
192 {
193         struct drm_local_map *map = vma->vm_private_data;
194         unsigned long offset;
195         unsigned long i;
196         struct page *page;
197
198         if (!map)
199                 return VM_FAULT_SIGBUS; /* Nothing allocated */
200
201         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
202         i = (unsigned long)map->handle + offset;
203         page = vmalloc_to_page((void *)i);
204         if (!page)
205                 return VM_FAULT_SIGBUS;
206         get_page(page);
207         vmf->page = page;
208
209         DRM_DEBUG("shm_fault 0x%lx\n", offset);
210         return 0;
211 }
212
213 /**
214  * \c close method for shared virtual memory.
215  *
216  * \param vma virtual memory area.
217  *
218  * Deletes map information if we are the last
219  * person to close a mapping and it's not in the global maplist.
220  */
221 static void drm_vm_shm_close(struct vm_area_struct *vma)
222 {
223         struct drm_file *priv = vma->vm_file->private_data;
224         struct drm_device *dev = priv->minor->dev;
225         struct drm_vma_entry *pt, *temp;
226         struct drm_local_map *map;
227         struct drm_map_list *r_list;
228         int found_maps = 0;
229
230         DRM_DEBUG("0x%08lx,0x%08lx\n",
231                   vma->vm_start, vma->vm_end - vma->vm_start);
232
233         map = vma->vm_private_data;
234
235         mutex_lock(&dev->struct_mutex);
236         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
237                 if (pt->vma->vm_private_data == map)
238                         found_maps++;
239                 if (pt->vma == vma) {
240                         list_del(&pt->head);
241                         kfree(pt);
242                 }
243         }
244
245         /* We were the only map that was found */
246         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
247                 /* Check to see if we are in the maplist, if we are not, then
248                  * we delete this mappings information.
249                  */
250                 found_maps = 0;
251                 list_for_each_entry(r_list, &dev->maplist, head) {
252                         if (r_list->map == map)
253                                 found_maps++;
254                 }
255
256                 if (!found_maps) {
257                         drm_dma_handle_t dmah;
258
259                         switch (map->type) {
260                         case _DRM_REGISTERS:
261                         case _DRM_FRAME_BUFFER:
262                                 arch_phys_wc_del(map->mtrr);
263                                 iounmap(map->handle);
264                                 break;
265                         case _DRM_SHM:
266                                 vfree(map->handle);
267                                 break;
268                         case _DRM_AGP:
269                         case _DRM_SCATTER_GATHER:
270                                 break;
271                         case _DRM_CONSISTENT:
272                                 dmah.vaddr = map->handle;
273                                 dmah.busaddr = map->offset;
274                                 dmah.size = map->size;
275                                 __drm_legacy_pci_free(dev, &dmah);
276                                 break;
277                         }
278                         kfree(map);
279                 }
280         }
281         mutex_unlock(&dev->struct_mutex);
282 }
283
284 /**
285  * \c fault method for DMA virtual memory.
286  *
287  * \param vma virtual memory area.
288  * \param address access address.
289  * \return pointer to the page structure.
290  *
291  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
292  */
293 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
294 {
295         struct drm_file *priv = vma->vm_file->private_data;
296         struct drm_device *dev = priv->minor->dev;
297         struct drm_device_dma *dma = dev->dma;
298         unsigned long offset;
299         unsigned long page_nr;
300         struct page *page;
301
302         if (!dma)
303                 return VM_FAULT_SIGBUS; /* Error */
304         if (!dma->pagelist)
305                 return VM_FAULT_SIGBUS; /* Nothing allocated */
306
307         offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
308         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
309         page = virt_to_page((void *)dma->pagelist[page_nr]);
310
311         get_page(page);
312         vmf->page = page;
313
314         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
315         return 0;
316 }
317
318 /**
319  * \c fault method for scatter-gather virtual memory.
320  *
321  * \param vma virtual memory area.
322  * \param address access address.
323  * \return pointer to the page structure.
324  *
325  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
326  */
327 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
328 {
329         struct drm_local_map *map = vma->vm_private_data;
330         struct drm_file *priv = vma->vm_file->private_data;
331         struct drm_device *dev = priv->minor->dev;
332         struct drm_sg_mem *entry = dev->sg;
333         unsigned long offset;
334         unsigned long map_offset;
335         unsigned long page_offset;
336         struct page *page;
337
338         if (!entry)
339                 return VM_FAULT_SIGBUS; /* Error */
340         if (!entry->pagelist)
341                 return VM_FAULT_SIGBUS; /* Nothing allocated */
342
343         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
344         map_offset = map->offset - (unsigned long)dev->sg->virtual;
345         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
346         page = entry->pagelist[page_offset];
347         get_page(page);
348         vmf->page = page;
349
350         return 0;
351 }
352
353 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
354 {
355         return drm_do_vm_fault(vma, vmf);
356 }
357
358 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
359 {
360         return drm_do_vm_shm_fault(vma, vmf);
361 }
362
363 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
364 {
365         return drm_do_vm_dma_fault(vma, vmf);
366 }
367
368 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369 {
370         return drm_do_vm_sg_fault(vma, vmf);
371 }
372
373 /** AGP virtual memory operations */
374 static const struct vm_operations_struct drm_vm_ops = {
375         .fault = drm_vm_fault,
376         .open = drm_vm_open,
377         .close = drm_vm_close,
378 };
379
380 /** Shared virtual memory operations */
381 static const struct vm_operations_struct drm_vm_shm_ops = {
382         .fault = drm_vm_shm_fault,
383         .open = drm_vm_open,
384         .close = drm_vm_shm_close,
385 };
386
387 /** DMA virtual memory operations */
388 static const struct vm_operations_struct drm_vm_dma_ops = {
389         .fault = drm_vm_dma_fault,
390         .open = drm_vm_open,
391         .close = drm_vm_close,
392 };
393
394 /** Scatter-gather virtual memory operations */
395 static const struct vm_operations_struct drm_vm_sg_ops = {
396         .fault = drm_vm_sg_fault,
397         .open = drm_vm_open,
398         .close = drm_vm_close,
399 };
400
401 /**
402  * \c open method for shared virtual memory.
403  *
404  * \param vma virtual memory area.
405  *
406  * Create a new drm_vma_entry structure as the \p vma private data entry and
407  * add it to drm_device::vmalist.
408  */
409 void drm_vm_open_locked(struct drm_device *dev,
410                 struct vm_area_struct *vma)
411 {
412         struct drm_vma_entry *vma_entry;
413
414         DRM_DEBUG("0x%08lx,0x%08lx\n",
415                   vma->vm_start, vma->vm_end - vma->vm_start);
416
417         vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
418         if (vma_entry) {
419                 vma_entry->vma = vma;
420                 vma_entry->pid = current->pid;
421                 list_add(&vma_entry->head, &dev->vmalist);
422         }
423 }
424 EXPORT_SYMBOL_GPL(drm_vm_open_locked);
425
426 static void drm_vm_open(struct vm_area_struct *vma)
427 {
428         struct drm_file *priv = vma->vm_file->private_data;
429         struct drm_device *dev = priv->minor->dev;
430
431         mutex_lock(&dev->struct_mutex);
432         drm_vm_open_locked(dev, vma);
433         mutex_unlock(&dev->struct_mutex);
434 }
435
436 void drm_vm_close_locked(struct drm_device *dev,
437                 struct vm_area_struct *vma)
438 {
439         struct drm_vma_entry *pt, *temp;
440
441         DRM_DEBUG("0x%08lx,0x%08lx\n",
442                   vma->vm_start, vma->vm_end - vma->vm_start);
443
444         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
445                 if (pt->vma == vma) {
446                         list_del(&pt->head);
447                         kfree(pt);
448                         break;
449                 }
450         }
451 }
452
453 /**
454  * \c close method for all virtual memory types.
455  *
456  * \param vma virtual memory area.
457  *
458  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
459  * free it.
460  */
461 static void drm_vm_close(struct vm_area_struct *vma)
462 {
463         struct drm_file *priv = vma->vm_file->private_data;
464         struct drm_device *dev = priv->minor->dev;
465
466         mutex_lock(&dev->struct_mutex);
467         drm_vm_close_locked(dev, vma);
468         mutex_unlock(&dev->struct_mutex);
469 }
470
471 /**
472  * mmap DMA memory.
473  *
474  * \param file_priv DRM file private.
475  * \param vma virtual memory area.
476  * \return zero on success or a negative number on failure.
477  *
478  * Sets the virtual memory area operations structure to vm_dma_ops, the file
479  * pointer, and calls vm_open().
480  */
481 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
482 {
483         struct drm_file *priv = filp->private_data;
484         struct drm_device *dev;
485         struct drm_device_dma *dma;
486         unsigned long length = vma->vm_end - vma->vm_start;
487
488         dev = priv->minor->dev;
489         dma = dev->dma;
490         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
491                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
492
493         /* Length must match exact page count */
494         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
495                 return -EINVAL;
496         }
497
498         if (!capable(CAP_SYS_ADMIN) &&
499             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
500                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
501 #if defined(__i386__) || defined(__x86_64__)
502                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
503 #else
504                 /* Ye gads this is ugly.  With more thought
505                    we could move this up higher and use
506                    `protection_map' instead.  */
507                 vma->vm_page_prot =
508                     __pgprot(pte_val
509                              (pte_wrprotect
510                               (__pte(pgprot_val(vma->vm_page_prot)))));
511 #endif
512         }
513
514         vma->vm_ops = &drm_vm_dma_ops;
515
516         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
517
518         drm_vm_open_locked(dev, vma);
519         return 0;
520 }
521
522 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
523 {
524 #ifdef __alpha__
525         return dev->hose->dense_mem_base;
526 #else
527         return 0;
528 #endif
529 }
530
531 /**
532  * mmap DMA memory.
533  *
534  * \param file_priv DRM file private.
535  * \param vma virtual memory area.
536  * \return zero on success or a negative number on failure.
537  *
538  * If the virtual memory area has no offset associated with it then it's a DMA
539  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
540  * checks that the restricted flag is not set, sets the virtual memory operations
541  * according to the mapping type and remaps the pages. Finally sets the file
542  * pointer and calls vm_open().
543  */
544 int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
545 {
546         struct drm_file *priv = filp->private_data;
547         struct drm_device *dev = priv->minor->dev;
548         struct drm_local_map *map = NULL;
549         resource_size_t offset = 0;
550         struct drm_hash_item *hash;
551
552         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
553                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
554
555         if (!priv->authenticated)
556                 return -EACCES;
557
558         /* We check for "dma". On Apple's UniNorth, it's valid to have
559          * the AGP mapped at physical address 0
560          * --BenH.
561          */
562         if (!vma->vm_pgoff
563 #if __OS_HAS_AGP
564             && (!dev->agp
565                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
566 #endif
567             )
568                 return drm_mmap_dma(filp, vma);
569
570         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
571                 DRM_ERROR("Could not find map\n");
572                 return -EINVAL;
573         }
574
575         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
576         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
577                 return -EPERM;
578
579         /* Check for valid size. */
580         if (map->size < vma->vm_end - vma->vm_start)
581                 return -EINVAL;
582
583         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
584                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
585 #if defined(__i386__) || defined(__x86_64__)
586                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
587 #else
588                 /* Ye gads this is ugly.  With more thought
589                    we could move this up higher and use
590                    `protection_map' instead.  */
591                 vma->vm_page_prot =
592                     __pgprot(pte_val
593                              (pte_wrprotect
594                               (__pte(pgprot_val(vma->vm_page_prot)))));
595 #endif
596         }
597
598         switch (map->type) {
599 #if !defined(__arm__)
600         case _DRM_AGP:
601                 if (dev->agp && dev->agp->cant_use_aperture) {
602                         /*
603                          * On some platforms we can't talk to bus dma address from the CPU, so for
604                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
605                          * pages and mappings in fault()
606                          */
607 #if defined(__powerpc__)
608                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
609 #endif
610                         vma->vm_ops = &drm_vm_ops;
611                         break;
612                 }
613                 /* fall through to _DRM_FRAME_BUFFER... */
614 #endif
615         case _DRM_FRAME_BUFFER:
616         case _DRM_REGISTERS:
617                 offset = drm_core_get_reg_ofs(dev);
618                 vma->vm_page_prot = drm_io_prot(map, vma);
619                 if (io_remap_pfn_range(vma, vma->vm_start,
620                                        (map->offset + offset) >> PAGE_SHIFT,
621                                        vma->vm_end - vma->vm_start,
622                                        vma->vm_page_prot))
623                         return -EAGAIN;
624                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
625                           " offset = 0x%llx\n",
626                           map->type,
627                           vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
628
629                 vma->vm_ops = &drm_vm_ops;
630                 break;
631         case _DRM_CONSISTENT:
632                 /* Consistent memory is really like shared memory. But
633                  * it's allocated in a different way, so avoid fault */
634                 if (remap_pfn_range(vma, vma->vm_start,
635                     page_to_pfn(virt_to_page(map->handle)),
636                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
637                         return -EAGAIN;
638                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
639         /* fall through to _DRM_SHM */
640         case _DRM_SHM:
641                 vma->vm_ops = &drm_vm_shm_ops;
642                 vma->vm_private_data = (void *)map;
643                 break;
644         case _DRM_SCATTER_GATHER:
645                 vma->vm_ops = &drm_vm_sg_ops;
646                 vma->vm_private_data = (void *)map;
647                 vma->vm_page_prot = drm_dma_prot(map->type, vma);
648                 break;
649         default:
650                 return -EINVAL; /* This should never happen. */
651         }
652         vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
653
654         drm_vm_open_locked(dev, vma);
655         return 0;
656 }
657
658 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
659 {
660         struct drm_file *priv = filp->private_data;
661         struct drm_device *dev = priv->minor->dev;
662         int ret;
663
664         if (drm_device_is_unplugged(dev))
665                 return -ENODEV;
666
667         mutex_lock(&dev->struct_mutex);
668         ret = drm_mmap_locked(filp, vma);
669         mutex_unlock(&dev->struct_mutex);
670
671         return ret;
672 }
673 EXPORT_SYMBOL(drm_mmap);
674
675 void drm_legacy_vma_flush(struct drm_device *dev)
676 {
677         struct drm_vma_entry *vma, *vma_temp;
678
679         /* Clear vma list (only needed for legacy drivers) */
680         list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
681                 list_del(&vma->head);
682                 kfree(vma);
683         }
684 }
685
686 int drm_vma_info(struct seq_file *m, void *data)
687 {
688         struct drm_info_node *node = (struct drm_info_node *) m->private;
689         struct drm_device *dev = node->minor->dev;
690         struct drm_vma_entry *pt;
691         struct vm_area_struct *vma;
692         unsigned long vma_count = 0;
693 #if defined(__i386__)
694         unsigned int pgprot;
695 #endif
696
697         mutex_lock(&dev->struct_mutex);
698         list_for_each_entry(pt, &dev->vmalist, head)
699                 vma_count++;
700
701         seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
702                    vma_count, high_memory,
703                    (void *)(unsigned long)virt_to_phys(high_memory));
704
705         list_for_each_entry(pt, &dev->vmalist, head) {
706                 vma = pt->vma;
707                 if (!vma)
708                         continue;
709                 seq_printf(m,
710                            "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
711                            pt->pid,
712                            (void *)vma->vm_start, (void *)vma->vm_end,
713                            vma->vm_flags & VM_READ ? 'r' : '-',
714                            vma->vm_flags & VM_WRITE ? 'w' : '-',
715                            vma->vm_flags & VM_EXEC ? 'x' : '-',
716                            vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
717                            vma->vm_flags & VM_LOCKED ? 'l' : '-',
718                            vma->vm_flags & VM_IO ? 'i' : '-',
719                            vma->vm_pgoff);
720
721 #if defined(__i386__)
722                 pgprot = pgprot_val(vma->vm_page_prot);
723                 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
724                            pgprot & _PAGE_PRESENT ? 'p' : '-',
725                            pgprot & _PAGE_RW ? 'w' : 'r',
726                            pgprot & _PAGE_USER ? 'u' : 's',
727                            pgprot & _PAGE_PWT ? 't' : 'b',
728                            pgprot & _PAGE_PCD ? 'u' : 'c',
729                            pgprot & _PAGE_ACCESSED ? 'a' : '-',
730                            pgprot & _PAGE_DIRTY ? 'd' : '-',
731                            pgprot & _PAGE_PSE ? 'm' : 'k',
732                            pgprot & _PAGE_GLOBAL ? 'g' : 'l');
733 #endif
734                 seq_printf(m, "\n");
735         }
736         mutex_unlock(&dev->struct_mutex);
737         return 0;
738 }