2 * drivers/staging/omapdrm/omap_gem.c
4 * Copyright (C) 2011 Texas Instruments
5 * Author: Rob Clark <rob.clark@linaro.org>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published by
9 * the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * You should have received a copy of the GNU General Public License along with
17 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/spinlock.h>
22 #include <linux/shmem_fs.h>
25 #include "omap_dmm_tiler.h"
27 /* remove these once drm core helpers are merged */
28 struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
29 void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
30 bool dirty, bool accessed);
31 int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
34 * GEM buffer object implementation.
37 #define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
39 /* note: we use upper 8 bits of flags for driver-internal flags: */
40 #define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
41 #define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
42 #define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
45 struct omap_gem_object {
46 struct drm_gem_object base;
50 /** width/height for tiled formats (rounded up to slot boundaries) */
51 uint16_t width, height;
53 /** roll applied when mapping to DMM */
57 * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
58 * is set and the paddr is valid. Also if the buffer is remapped in
59 * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
60 * the physical address and OMAP_BO_DMA is not set, then you should
61 * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
62 * not removed from under your feet.
64 * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
65 * buffer is requested, but doesn't mean that it is. Use the
66 * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
77 * tiler block used when buffer is remapped in DMM/TILER.
79 struct tiler_block *block;
82 * Array of backing pages, if allocated. Note that pages are never
83 * allocated for buffers originally allocated from contiguous memory
88 * Virtual address, if mapped.
93 * sync-object allocated on demand (if needed)
95 * Per-buffer sync-object for tracking pending and completed hw/dma
96 * read and write operations. The layout in memory is dictated by
97 * the SGX firmware, which uses this information to stall the command
98 * stream if a surface is not ready yet.
100 * Note that when buffer is used by SGX, the sync-object needs to be
101 * allocated from a special heap of sync-objects. This way many sync
102 * objects can be packed in a page, and not waste GPU virtual address
103 * space. Because of this we have to have a omap_gem_set_sync_object()
104 * API to allow replacement of the syncobj after it has (potentially)
105 * already been allocated. A bit ugly but I haven't thought of a
106 * better alternative.
109 uint32_t write_pending;
110 uint32_t write_complete;
111 uint32_t read_pending;
112 uint32_t read_complete;
116 /* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
117 * not necessarily pinned in TILER all the time, and (b) when they are
118 * they are not necessarily page aligned, we reserve one or more small
119 * regions in each of the 2d containers to use as a user-GART where we
120 * can create a second page-aligned mapping of parts of the buffer
121 * being accessed from userspace.
123 * Note that we could optimize slightly when we know that multiple
124 * tiler containers are backed by the same PAT.. but I'll leave that
127 #define NUM_USERGART_ENTRIES 2
128 struct usergart_entry {
129 struct tiler_block *block; /* the reserved tiler block */
131 struct drm_gem_object *obj; /* the current pinned obj */
132 pgoff_t obj_pgoff; /* page offset of obj currently
136 struct usergart_entry entry[NUM_USERGART_ENTRIES];
137 int height; /* height in rows */
138 int height_shift; /* ilog2(height in rows) */
139 int slot_shift; /* ilog2(width per slot) */
140 int stride_pfn; /* stride in pages */
141 int last; /* index of last used entry */
144 static void evict_entry(struct drm_gem_object *obj,
145 enum tiler_fmt fmt, struct usergart_entry *entry)
147 if (obj->dev->dev_mapping) {
148 size_t size = PAGE_SIZE * usergart[fmt].height;
149 loff_t off = omap_gem_mmap_offset(obj) +
150 (entry->obj_pgoff << PAGE_SHIFT);
151 unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
157 /* Evict a buffer from usergart, if it is mapped there */
158 static void evict(struct drm_gem_object *obj)
160 struct omap_gem_object *omap_obj = to_omap_bo(obj);
162 if (omap_obj->flags & OMAP_BO_TILED) {
163 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
169 for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
170 struct usergart_entry *entry = &usergart[fmt].entry[i];
171 if (entry->obj == obj)
172 evict_entry(obj, fmt, entry);
177 /* GEM objects can either be allocated from contiguous memory (in which
178 * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
179 * contiguous buffers can be remapped in TILER/DMM if they need to be
180 * contiguous... but we don't do this all the time to reduce pressure
181 * on TILER/DMM space when we know at allocation time that the buffer
182 * will need to be scanned out.
184 static inline bool is_shmem(struct drm_gem_object *obj)
186 return obj->filp != NULL;
189 static int get_pages(struct drm_gem_object *obj, struct page ***pages);
191 static DEFINE_SPINLOCK(sync_lock);
193 /** ensure backing pages are allocated */
194 static int omap_gem_attach_pages(struct drm_gem_object *obj)
196 struct omap_gem_object *omap_obj = to_omap_bo(obj);
199 WARN_ON(omap_obj->pages);
201 /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
202 * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
203 * we actually want CMA memory for it all anyways..
205 pages = _drm_gem_get_pages(obj, GFP_KERNEL);
207 dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
208 return PTR_ERR(pages);
211 omap_obj->pages = pages;
215 /** release backing pages */
216 static void omap_gem_detach_pages(struct drm_gem_object *obj)
218 struct omap_gem_object *omap_obj = to_omap_bo(obj);
219 _drm_gem_put_pages(obj, omap_obj->pages, true, false);
220 omap_obj->pages = NULL;
223 /** get mmap offset */
224 uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
226 if (!obj->map_list.map) {
227 /* Make it mmapable */
228 size_t size = omap_gem_mmap_size(obj);
229 int ret = _drm_gem_create_mmap_offset_size(obj, size);
232 dev_err(obj->dev->dev, "could not allocate mmap offset");
237 return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
241 size_t omap_gem_mmap_size(struct drm_gem_object *obj)
243 struct omap_gem_object *omap_obj = to_omap_bo(obj);
244 size_t size = obj->size;
246 if (omap_obj->flags & OMAP_BO_TILED) {
247 /* for tiled buffers, the virtual size has stride rounded up
248 * to 4kb.. (to hide the fact that row n+1 might start 16kb or
249 * 32kb later!). But we don't back the entire buffer with
250 * pages, only the valid picture part.. so need to adjust for
251 * this in the size used to mmap and generate mmap offset
253 size = tiler_vsize(gem2fmt(omap_obj->flags),
254 omap_obj->width, omap_obj->height);
261 /* Normal handling for the case of faulting in non-tiled buffers */
262 static int fault_1d(struct drm_gem_object *obj,
263 struct vm_area_struct *vma, struct vm_fault *vmf)
265 struct omap_gem_object *omap_obj = to_omap_bo(obj);
269 /* We don't use vmf->pgoff since that has the fake offset: */
270 pgoff = ((unsigned long)vmf->virtual_address -
271 vma->vm_start) >> PAGE_SHIFT;
273 if (omap_obj->pages) {
274 pfn = page_to_pfn(omap_obj->pages[pgoff]);
276 BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
277 pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
280 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
281 pfn, pfn << PAGE_SHIFT);
283 return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
286 /* Special handling for the case of faulting in 2d tiled buffers */
287 static int fault_2d(struct drm_gem_object *obj,
288 struct vm_area_struct *vma, struct vm_fault *vmf)
290 struct omap_gem_object *omap_obj = to_omap_bo(obj);
291 struct usergart_entry *entry;
292 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
293 struct page *pages[64]; /* XXX is this too much to have on stack? */
295 pgoff_t pgoff, base_pgoff;
302 /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
303 * that are wider than 4kb
306 /* We don't use vmf->pgoff since that has the fake offset: */
307 pgoff = ((unsigned long)vmf->virtual_address -
308 vma->vm_start) >> PAGE_SHIFT;
310 /* actual address we start mapping at is rounded down to previous slot
311 * boundary in the y direction:
313 base_pgoff = round_down(pgoff, usergart[fmt].height);
314 vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
315 entry = &usergart[fmt].entry[usergart[fmt].last];
317 slots = omap_obj->width >> usergart[fmt].slot_shift;
319 /* evict previous buffer using this usergart entry, if any: */
321 evict_entry(entry->obj, fmt, entry);
324 entry->obj_pgoff = base_pgoff;
326 /* now convert base_pgoff to phys offset from virt offset:
328 base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
330 /* map in pages. Note the height of the slot is also equal to the
331 * number of pages that need to be mapped in to fill 4kb wide CPU page.
332 * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
333 * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
334 * get a dummy page mapped in.. if someone reads/writes it they will get
335 * random/undefined content, but at least it won't be corrupting
336 * whatever other random page used to be mapped in, or other undefined
339 memcpy(pages, &omap_obj->pages[base_pgoff],
340 sizeof(struct page *) * slots);
341 memset(pages + slots, 0,
342 sizeof(struct page *) * (usergart[fmt].height - slots));
344 ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
346 dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
350 i = usergart[fmt].height;
351 pfn = entry->paddr >> PAGE_SHIFT;
353 VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
354 pfn, pfn << PAGE_SHIFT);
357 vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
358 pfn += usergart[fmt].stride_pfn;
362 /* simple round-robin: */
363 usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
369 * omap_gem_fault - pagefault handler for GEM objects
370 * @vma: the VMA of the GEM object
373 * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
374 * does most of the work for us including the actual map/unmap calls
375 * but we need to do the actual page work.
377 * The VMA was set up by GEM. In doing so it also ensured that the
378 * vma->vm_private_data points to the GEM object that is backing this
381 int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
383 struct drm_gem_object *obj = vma->vm_private_data;
384 struct omap_gem_object *omap_obj = to_omap_bo(obj);
385 struct drm_device *dev = obj->dev;
389 /* Make sure we don't parallel update on a fault, nor move or remove
390 * something from beneath our feet
392 mutex_lock(&dev->struct_mutex);
394 /* if a shmem backed object, make sure we have pages attached now */
395 ret = get_pages(obj, &pages);
400 /* where should we do corresponding put_pages().. we are mapping
401 * the original page, rather than thru a GART, so we can't rely
402 * on eviction to trigger this. But munmap() or all mappings should
403 * probably trigger put_pages()?
406 if (omap_obj->flags & OMAP_BO_TILED)
407 ret = fault_2d(obj, vma, vmf);
409 ret = fault_1d(obj, vma, vmf);
413 mutex_unlock(&dev->struct_mutex);
418 return VM_FAULT_NOPAGE;
422 return VM_FAULT_SIGBUS;
426 /** We override mainly to fix up some of the vm mapping flags.. */
427 int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
429 struct omap_gem_object *omap_obj;
432 ret = drm_gem_mmap(filp, vma);
434 DBG("mmap failed: %d", ret);
438 /* after drm_gem_mmap(), it is safe to access the obj */
439 omap_obj = to_omap_bo(vma->vm_private_data);
441 vma->vm_flags &= ~VM_PFNMAP;
442 vma->vm_flags |= VM_MIXEDMAP;
444 if (omap_obj->flags & OMAP_BO_WC) {
445 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
446 } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
447 vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
449 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
456 * omap_gem_dumb_create - create a dumb buffer
457 * @drm_file: our client file
459 * @args: the requested arguments copied from userspace
461 * Allocate a buffer suitable for use for a frame buffer of the
462 * form described by user space. Give userspace a handle by which
465 int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
466 struct drm_mode_create_dumb *args)
468 union omap_gem_size gsize;
470 /* in case someone tries to feed us a completely bogus stride: */
471 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
472 args->size = PAGE_ALIGN(args->pitch * args->height);
474 gsize = (union omap_gem_size){
478 return omap_gem_new_handle(dev, file, gsize,
479 OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
483 * omap_gem_dumb_destroy - destroy a dumb buffer
485 * @dev: our DRM device
486 * @handle: the object handle
488 * Destroy a handle that was created via omap_gem_dumb_create.
490 int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
493 /* No special work needed, drop the reference and see what falls out */
494 return drm_gem_handle_delete(file, handle);
498 * omap_gem_dumb_map - buffer mapping for dumb interface
499 * @file: our drm client file
501 * @handle: GEM handle to the object (from dumb_create)
503 * Do the necessary setup to allow the mapping of the frame buffer
504 * into user memory. We don't have to do much here at the moment.
506 int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
507 uint32_t handle, uint64_t *offset)
509 struct drm_gem_object *obj;
512 /* GEM does all our handle to object mapping */
513 obj = drm_gem_object_lookup(dev, file, handle);
519 *offset = omap_gem_mmap_offset(obj);
521 drm_gem_object_unreference_unlocked(obj);
527 /* Set scrolling position. This allows us to implement fast scrolling
530 int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
532 struct omap_gem_object *omap_obj = to_omap_bo(obj);
533 uint32_t npages = obj->size >> PAGE_SHIFT;
537 dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
541 mutex_lock(&obj->dev->struct_mutex);
543 omap_obj->roll = roll;
545 /* if we aren't mapped yet, we don't need to do anything */
546 if (omap_obj->block) {
548 ret = get_pages(obj, &pages);
551 ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
553 dev_err(obj->dev->dev, "could not repin: %d\n", ret);
557 mutex_unlock(&obj->dev->struct_mutex);
562 /* Get physical address for DMA.. if 'remap' is true, and the buffer is not
563 * already contiguous, remap it to pin in physically contiguous memory.. (ie.
566 int omap_gem_get_paddr(struct drm_gem_object *obj,
567 dma_addr_t *paddr, bool remap)
569 struct omap_drm_private *priv = obj->dev->dev_private;
570 struct omap_gem_object *omap_obj = to_omap_bo(obj);
573 mutex_lock(&obj->dev->struct_mutex);
575 if (remap && is_shmem(obj) && priv->has_dmm) {
576 if (omap_obj->paddr_cnt == 0) {
578 uint32_t npages = obj->size >> PAGE_SHIFT;
579 enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
580 struct tiler_block *block;
582 BUG_ON(omap_obj->block);
584 ret = get_pages(obj, &pages);
588 if (omap_obj->flags & OMAP_BO_TILED) {
589 block = tiler_reserve_2d(fmt,
591 omap_obj->height, 0);
593 block = tiler_reserve_1d(obj->size);
597 ret = PTR_ERR(block);
598 dev_err(obj->dev->dev,
599 "could not remap: %d (%d)\n", ret, fmt);
603 /* TODO: enable async refill.. */
604 ret = tiler_pin(block, pages, npages,
605 omap_obj->roll, true);
607 tiler_release(block);
608 dev_err(obj->dev->dev,
609 "could not pin: %d\n", ret);
613 omap_obj->paddr = tiler_ssptr(block);
614 omap_obj->block = block;
616 DBG("got paddr: %08x", omap_obj->paddr);
619 omap_obj->paddr_cnt++;
621 *paddr = omap_obj->paddr;
622 } else if (omap_obj->flags & OMAP_BO_DMA) {
623 *paddr = omap_obj->paddr;
629 mutex_unlock(&obj->dev->struct_mutex);
634 /* Release physical address, when DMA is no longer being performed.. this
635 * could potentially unpin and unmap buffers from TILER
637 int omap_gem_put_paddr(struct drm_gem_object *obj)
639 struct omap_gem_object *omap_obj = to_omap_bo(obj);
642 mutex_lock(&obj->dev->struct_mutex);
643 if (omap_obj->paddr_cnt > 0) {
644 omap_obj->paddr_cnt--;
645 if (omap_obj->paddr_cnt == 0) {
646 ret = tiler_unpin(omap_obj->block);
648 dev_err(obj->dev->dev,
649 "could not unpin pages: %d\n", ret);
652 ret = tiler_release(omap_obj->block);
654 dev_err(obj->dev->dev,
655 "could not release unmap: %d\n", ret);
657 omap_obj->block = NULL;
661 mutex_unlock(&obj->dev->struct_mutex);
665 /* acquire pages when needed (for example, for DMA where physically
666 * contiguous buffer is not required
668 static int get_pages(struct drm_gem_object *obj, struct page ***pages)
670 struct omap_gem_object *omap_obj = to_omap_bo(obj);
673 if (is_shmem(obj) && !omap_obj->pages) {
674 ret = omap_gem_attach_pages(obj);
676 dev_err(obj->dev->dev, "could not attach pages\n");
681 /* TODO: even phys-contig.. we should have a list of pages? */
682 *pages = omap_obj->pages;
687 int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
690 mutex_lock(&obj->dev->struct_mutex);
691 ret = get_pages(obj, pages);
692 mutex_unlock(&obj->dev->struct_mutex);
696 /* release pages when DMA no longer being performed */
697 int omap_gem_put_pages(struct drm_gem_object *obj)
699 /* do something here if we dynamically attach/detach pages.. at
700 * least they would no longer need to be pinned if everyone has
701 * released the pages..
706 /* Get kernel virtual address for CPU access.. this more or less only
707 * exists for omap_fbdev. This should be called with struct_mutex
710 void *omap_gem_vaddr(struct drm_gem_object *obj)
712 struct omap_gem_object *omap_obj = to_omap_bo(obj);
713 WARN_ON(! mutex_is_locked(&obj->dev->struct_mutex));
714 if (!omap_obj->vaddr) {
716 int ret = get_pages(obj, &pages);
719 omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
720 VM_MAP, pgprot_writecombine(PAGE_KERNEL));
722 return omap_obj->vaddr;
725 /* Buffer Synchronization:
728 struct omap_gem_sync_waiter {
729 struct list_head list;
730 struct omap_gem_object *omap_obj;
732 uint32_t read_target, write_target;
733 /* notify called w/ sync_lock held */
734 void (*notify)(void *arg);
738 /* list of omap_gem_sync_waiter.. the notify fxn gets called back when
739 * the read and/or write target count is achieved which can call a user
740 * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
743 static LIST_HEAD(waiters);
745 static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
747 struct omap_gem_object *omap_obj = waiter->omap_obj;
748 if ((waiter->op & OMAP_GEM_READ) &&
749 (omap_obj->sync->read_complete < waiter->read_target))
751 if ((waiter->op & OMAP_GEM_WRITE) &&
752 (omap_obj->sync->write_complete < waiter->write_target))
757 /* macro for sync debug.. */
759 #define SYNC(fmt, ...) do { if (SYNCDBG) \
760 printk(KERN_ERR "%s:%d: "fmt"\n", \
761 __func__, __LINE__, ##__VA_ARGS__); \
765 static void sync_op_update(void)
767 struct omap_gem_sync_waiter *waiter, *n;
768 list_for_each_entry_safe(waiter, n, &waiters, list) {
769 if (!is_waiting(waiter)) {
770 list_del(&waiter->list);
771 SYNC("notify: %p", waiter);
772 waiter->notify(waiter->arg);
778 static inline int sync_op(struct drm_gem_object *obj,
779 enum omap_gem_op op, bool start)
781 struct omap_gem_object *omap_obj = to_omap_bo(obj);
784 spin_lock(&sync_lock);
786 if (!omap_obj->sync) {
787 omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
788 if (!omap_obj->sync) {
795 if (op & OMAP_GEM_READ)
796 omap_obj->sync->read_pending++;
797 if (op & OMAP_GEM_WRITE)
798 omap_obj->sync->write_pending++;
800 if (op & OMAP_GEM_READ)
801 omap_obj->sync->read_complete++;
802 if (op & OMAP_GEM_WRITE)
803 omap_obj->sync->write_complete++;
808 spin_unlock(&sync_lock);
813 /* it is a bit lame to handle updates in this sort of polling way, but
814 * in case of PVR, the GPU can directly update read/write complete
815 * values, and not really tell us which ones it updated.. this also
816 * means that sync_lock is not quite sufficient. So we'll need to
817 * do something a bit better when it comes time to add support for
820 void omap_gem_op_update(void)
822 spin_lock(&sync_lock);
824 spin_unlock(&sync_lock);
827 /* mark the start of read and/or write operation */
828 int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
830 return sync_op(obj, op, true);
833 int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
835 return sync_op(obj, op, false);
838 static DECLARE_WAIT_QUEUE_HEAD(sync_event);
840 static void sync_notify(void *arg)
842 struct task_struct **waiter_task = arg;
844 wake_up_all(&sync_event);
847 int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
849 struct omap_gem_object *omap_obj = to_omap_bo(obj);
851 if (omap_obj->sync) {
852 struct task_struct *waiter_task = current;
853 struct omap_gem_sync_waiter *waiter =
854 kzalloc(sizeof(*waiter), GFP_KERNEL);
860 waiter->omap_obj = omap_obj;
862 waiter->read_target = omap_obj->sync->read_pending;
863 waiter->write_target = omap_obj->sync->write_pending;
864 waiter->notify = sync_notify;
865 waiter->arg = &waiter_task;
867 spin_lock(&sync_lock);
868 if (is_waiting(waiter)) {
869 SYNC("waited: %p", waiter);
870 list_add_tail(&waiter->list, &waiters);
871 spin_unlock(&sync_lock);
872 ret = wait_event_interruptible(sync_event,
873 (waiter_task == NULL));
874 spin_lock(&sync_lock);
876 SYNC("interrupted: %p", waiter);
877 /* we were interrupted */
878 list_del(&waiter->list);
881 /* freed in sync_op_update() */
885 spin_unlock(&sync_lock);
894 /* call fxn(arg), either synchronously or asynchronously if the op
895 * is currently blocked.. fxn() can be called from any context
897 * (TODO for now fxn is called back from whichever context calls
898 * omap_gem_op_update().. but this could be better defined later
901 * TODO more code in common w/ _sync()..
903 int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
904 void (*fxn)(void *arg), void *arg)
906 struct omap_gem_object *omap_obj = to_omap_bo(obj);
907 if (omap_obj->sync) {
908 struct omap_gem_sync_waiter *waiter =
909 kzalloc(sizeof(*waiter), GFP_ATOMIC);
915 waiter->omap_obj = omap_obj;
917 waiter->read_target = omap_obj->sync->read_pending;
918 waiter->write_target = omap_obj->sync->write_pending;
919 waiter->notify = fxn;
922 spin_lock(&sync_lock);
923 if (is_waiting(waiter)) {
924 SYNC("waited: %p", waiter);
925 list_add_tail(&waiter->list, &waiters);
926 spin_unlock(&sync_lock);
930 spin_unlock(&sync_lock);
939 /* special API so PVR can update the buffer to use a sync-object allocated
940 * from it's sync-obj heap. Only used for a newly allocated (from PVR's
941 * perspective) sync-object, so we overwrite the new syncobj w/ values
942 * from the already allocated syncobj (if there is one)
944 int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
946 struct omap_gem_object *omap_obj = to_omap_bo(obj);
949 spin_lock(&sync_lock);
951 if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
952 /* clearing a previously set syncobj */
953 syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
958 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
959 omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
960 omap_obj->sync = syncobj;
961 } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
962 /* replacing an existing syncobj */
963 if (omap_obj->sync) {
964 memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
965 kfree(omap_obj->sync);
967 omap_obj->flags |= OMAP_BO_EXT_SYNC;
968 omap_obj->sync = syncobj;
972 spin_unlock(&sync_lock);
976 int omap_gem_init_object(struct drm_gem_object *obj)
978 return -EINVAL; /* unused */
981 /* don't call directly.. called from GEM core when it is time to actually
984 void omap_gem_free_object(struct drm_gem_object *obj)
986 struct drm_device *dev = obj->dev;
987 struct omap_gem_object *omap_obj = to_omap_bo(obj);
991 if (obj->map_list.map) {
992 drm_gem_free_mmap_offset(obj);
995 /* don't free externally allocated backing memory */
996 if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
997 if (omap_obj->pages) {
998 omap_gem_detach_pages(obj);
1000 if (!is_shmem(obj)) {
1001 dma_free_writecombine(dev->dev, obj->size,
1002 omap_obj->vaddr, omap_obj->paddr);
1003 } else if (omap_obj->vaddr) {
1004 vunmap(omap_obj->vaddr);
1008 /* don't free externally allocated syncobj */
1009 if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
1010 kfree(omap_obj->sync);
1013 drm_gem_object_release(obj);
1018 /* convenience method to construct a GEM buffer object, and userspace handle */
1019 int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
1020 union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
1022 struct drm_gem_object *obj;
1025 obj = omap_gem_new(dev, gsize, flags);
1029 ret = drm_gem_handle_create(file, obj, handle);
1031 drm_gem_object_release(obj);
1032 kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
1036 /* drop reference from allocate - handle holds it now */
1037 drm_gem_object_unreference_unlocked(obj);
1042 /* GEM buffer object constructor */
1043 struct drm_gem_object *omap_gem_new(struct drm_device *dev,
1044 union omap_gem_size gsize, uint32_t flags)
1046 struct omap_drm_private *priv = dev->dev_private;
1047 struct omap_gem_object *omap_obj;
1048 struct drm_gem_object *obj = NULL;
1052 if (flags & OMAP_BO_TILED) {
1054 dev_err(dev->dev, "Tiled buffers require DMM\n");
1058 /* tiled buffers are always shmem paged backed.. when they are
1059 * scanned out, they are remapped into DMM/TILER
1061 flags &= ~OMAP_BO_SCANOUT;
1063 /* currently don't allow cached buffers.. there is some caching
1064 * stuff that needs to be handled better
1066 flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
1067 flags |= OMAP_BO_WC;
1069 /* align dimensions to slot boundaries... */
1070 tiler_align(gem2fmt(flags),
1071 &gsize.tiled.width, &gsize.tiled.height);
1073 /* ...and calculate size based on aligned dimensions */
1074 size = tiler_size(gem2fmt(flags),
1075 gsize.tiled.width, gsize.tiled.height);
1077 size = PAGE_ALIGN(gsize.bytes);
1080 omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
1082 dev_err(dev->dev, "could not allocate GEM object\n");
1086 obj = &omap_obj->base;
1088 if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
1089 /* attempt to allocate contiguous memory if we don't
1090 * have DMM for remappign discontiguous buffers
1092 omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
1093 &omap_obj->paddr, GFP_KERNEL);
1094 if (omap_obj->vaddr) {
1095 flags |= OMAP_BO_DMA;
1099 omap_obj->flags = flags;
1101 if (flags & OMAP_BO_TILED) {
1102 omap_obj->width = gsize.tiled.width;
1103 omap_obj->height = gsize.tiled.height;
1106 if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
1107 ret = drm_gem_private_object_init(dev, obj, size);
1109 ret = drm_gem_object_init(dev, obj, size);
1120 omap_gem_free_object(obj);
1125 /* init/cleanup.. if DMM is used, we need to set some stuff up.. */
1126 void omap_gem_init(struct drm_device *dev)
1128 struct omap_drm_private *priv = dev->dev_private;
1129 const enum tiler_fmt fmts[] = {
1130 TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
1134 ret = omap_dmm_init(dev);
1136 /* DMM only supported on OMAP4 and later, so this isn't fatal */
1137 dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n");
1141 usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
1143 dev_warn(dev->dev, "could not allocate usergart\n");
1147 /* reserve 4k aligned/wide regions for userspace mappings: */
1148 for (i = 0; i < ARRAY_SIZE(fmts); i++) {
1149 uint16_t h = 1, w = PAGE_SIZE >> i;
1150 tiler_align(fmts[i], &w, &h);
1151 /* note: since each region is 1 4kb page wide, and minimum
1152 * number of rows, the height ends up being the same as the
1153 * # of pages in the region
1155 usergart[i].height = h;
1156 usergart[i].height_shift = ilog2(h);
1157 usergart[i].stride_pfn = tiler_stride(fmts[i]) >> PAGE_SHIFT;
1158 usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
1159 for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
1160 struct usergart_entry *entry = &usergart[i].entry[j];
1161 struct tiler_block *block =
1162 tiler_reserve_2d(fmts[i], w, h,
1164 if (IS_ERR(block)) {
1166 "reserve failed: %d, %d, %ld\n",
1167 i, j, PTR_ERR(block));
1170 entry->paddr = tiler_ssptr(block);
1171 entry->block = block;
1173 DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
1175 usergart[i].stride_pfn << PAGE_SHIFT);
1179 priv->has_dmm = true;
1182 void omap_gem_deinit(struct drm_device *dev)
1184 /* I believe we can rely on there being no more outstanding GEM
1185 * objects which could depend on usergart/dmm at this point.