2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
32 void radeon_gem_object_free(struct drm_gem_object *gobj)
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_bo_unref(&robj);
43 int radeon_gem_object_create(struct radeon_device *rdev, int size,
44 int alignment, int initial_domain,
45 u32 flags, bool discardable, bool kernel,
46 struct drm_gem_object **obj)
48 struct radeon_bo *robj;
49 unsigned long max_size;
53 /* At least align on page size */
54 if (alignment < PAGE_SIZE) {
55 alignment = PAGE_SIZE;
58 /* maximun bo size is the minimun btw visible vram and gtt size */
59 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
60 if (size > max_size) {
61 DRM_DEBUG("Allocation size %dMb bigger than %ldMb limit\n",
62 size >> 20, max_size >> 20);
67 r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain,
70 if (r != -ERESTARTSYS) {
71 if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
72 initial_domain |= RADEON_GEM_DOMAIN_GTT;
75 DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
76 size, initial_domain, alignment, r);
80 *obj = &robj->gem_base;
81 robj->pid = task_pid_nr(current);
83 mutex_lock(&rdev->gem.mutex);
84 list_add_tail(&robj->list, &rdev->gem.objects);
85 mutex_unlock(&rdev->gem.mutex);
90 static int radeon_gem_set_domain(struct drm_gem_object *gobj,
91 uint32_t rdomain, uint32_t wdomain)
93 struct radeon_bo *robj;
97 /* FIXME: reeimplement */
98 robj = gem_to_radeon_bo(gobj);
99 /* work out where to validate the buffer to */
106 printk(KERN_WARNING "Set domain without domain !\n");
109 if (domain == RADEON_GEM_DOMAIN_CPU) {
110 /* Asking for cpu access wait for object idle */
111 r = radeon_bo_wait(robj, NULL, false);
113 printk(KERN_ERR "Failed to wait for object !\n");
120 int radeon_gem_init(struct radeon_device *rdev)
122 INIT_LIST_HEAD(&rdev->gem.objects);
126 void radeon_gem_fini(struct radeon_device *rdev)
128 radeon_bo_force_delete(rdev);
132 * Call from drm_gem_handle_create which appear in both new and open ioctl
135 int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
137 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
138 struct radeon_device *rdev = rbo->rdev;
139 struct radeon_fpriv *fpriv = file_priv->driver_priv;
140 struct radeon_vm *vm = &fpriv->vm;
141 struct radeon_bo_va *bo_va;
144 if (rdev->family < CHIP_CAYMAN) {
148 r = radeon_bo_reserve(rbo, false);
153 bo_va = radeon_vm_bo_find(vm, rbo);
155 bo_va = radeon_vm_bo_add(rdev, vm, rbo);
159 radeon_bo_unreserve(rbo);
164 void radeon_gem_object_close(struct drm_gem_object *obj,
165 struct drm_file *file_priv)
167 struct radeon_bo *rbo = gem_to_radeon_bo(obj);
168 struct radeon_device *rdev = rbo->rdev;
169 struct radeon_fpriv *fpriv = file_priv->driver_priv;
170 struct radeon_vm *vm = &fpriv->vm;
171 struct radeon_bo_va *bo_va;
174 if (rdev->family < CHIP_CAYMAN) {
178 r = radeon_bo_reserve(rbo, true);
180 dev_err(rdev->dev, "leaking bo va because "
181 "we fail to reserve bo (%d)\n", r);
184 bo_va = radeon_vm_bo_find(vm, rbo);
186 if (--bo_va->ref_count == 0) {
187 radeon_vm_bo_rmv(rdev, bo_va);
190 radeon_bo_unreserve(rbo);
193 static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
196 r = radeon_gpu_reset(rdev);
206 int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
207 struct drm_file *filp)
209 struct radeon_device *rdev = dev->dev_private;
210 struct drm_radeon_gem_info *args = data;
211 struct ttm_mem_type_manager *man;
214 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
216 args->vram_size = rdev->mc.real_vram_size;
217 args->vram_visible = (u64)man->size << PAGE_SHIFT;
218 if (rdev->stollen_vga_memory)
219 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
220 args->vram_visible -= radeon_fbdev_total_size(rdev);
221 args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
222 for(i = 0; i < RADEON_NUM_RINGS; ++i)
223 args->gart_size -= rdev->ring[i].ring_size;
227 int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
228 struct drm_file *filp)
230 /* TODO: implement */
231 DRM_ERROR("unimplemented %s\n", __func__);
235 int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
236 struct drm_file *filp)
238 /* TODO: implement */
239 DRM_ERROR("unimplemented %s\n", __func__);
243 int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
244 struct drm_file *filp)
246 struct radeon_device *rdev = dev->dev_private;
247 struct drm_radeon_gem_create *args = data;
248 struct drm_gem_object *gobj;
252 down_read(&rdev->exclusive_lock);
253 /* create a gem object to contain this object in */
254 args->size = roundup(args->size, PAGE_SIZE);
255 r = radeon_gem_object_create(rdev, args->size, args->alignment,
256 args->initial_domain, args->flags,
257 false, false, &gobj);
259 up_read(&rdev->exclusive_lock);
260 r = radeon_gem_handle_lockup(rdev, r);
263 r = drm_gem_handle_create(filp, gobj, &handle);
264 /* drop reference from allocate - handle holds it now */
265 drm_gem_object_unreference_unlocked(gobj);
267 up_read(&rdev->exclusive_lock);
268 r = radeon_gem_handle_lockup(rdev, r);
271 args->handle = handle;
272 up_read(&rdev->exclusive_lock);
276 int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
277 struct drm_file *filp)
279 /* transition the BO to a domain -
280 * just validate the BO into a certain domain */
281 struct radeon_device *rdev = dev->dev_private;
282 struct drm_radeon_gem_set_domain *args = data;
283 struct drm_gem_object *gobj;
284 struct radeon_bo *robj;
287 /* for now if someone requests domain CPU -
288 * just make sure the buffer is finished with */
289 down_read(&rdev->exclusive_lock);
291 /* just do a BO wait for now */
292 gobj = drm_gem_object_lookup(dev, filp, args->handle);
294 up_read(&rdev->exclusive_lock);
297 robj = gem_to_radeon_bo(gobj);
299 r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
301 drm_gem_object_unreference_unlocked(gobj);
302 up_read(&rdev->exclusive_lock);
303 r = radeon_gem_handle_lockup(robj->rdev, r);
307 int radeon_mode_dumb_mmap(struct drm_file *filp,
308 struct drm_device *dev,
309 uint32_t handle, uint64_t *offset_p)
311 struct drm_gem_object *gobj;
312 struct radeon_bo *robj;
314 gobj = drm_gem_object_lookup(dev, filp, handle);
318 robj = gem_to_radeon_bo(gobj);
319 *offset_p = radeon_bo_mmap_offset(robj);
320 drm_gem_object_unreference_unlocked(gobj);
324 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
325 struct drm_file *filp)
327 struct drm_radeon_gem_mmap *args = data;
329 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
332 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
333 struct drm_file *filp)
335 struct radeon_device *rdev = dev->dev_private;
336 struct drm_radeon_gem_busy *args = data;
337 struct drm_gem_object *gobj;
338 struct radeon_bo *robj;
340 uint32_t cur_placement = 0;
342 gobj = drm_gem_object_lookup(dev, filp, args->handle);
346 robj = gem_to_radeon_bo(gobj);
347 r = radeon_bo_wait(robj, &cur_placement, true);
348 args->domain = radeon_mem_type_to_domain(cur_placement);
349 drm_gem_object_unreference_unlocked(gobj);
350 r = radeon_gem_handle_lockup(rdev, r);
354 int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
355 struct drm_file *filp)
357 struct radeon_device *rdev = dev->dev_private;
358 struct drm_radeon_gem_wait_idle *args = data;
359 struct drm_gem_object *gobj;
360 struct radeon_bo *robj;
363 gobj = drm_gem_object_lookup(dev, filp, args->handle);
367 robj = gem_to_radeon_bo(gobj);
368 r = radeon_bo_wait(robj, NULL, false);
369 /* callback hw specific functions if any */
370 if (rdev->asic->ioctl_wait_idle)
371 robj->rdev->asic->ioctl_wait_idle(rdev, robj);
372 drm_gem_object_unreference_unlocked(gobj);
373 r = radeon_gem_handle_lockup(rdev, r);
377 int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
378 struct drm_file *filp)
380 struct drm_radeon_gem_set_tiling *args = data;
381 struct drm_gem_object *gobj;
382 struct radeon_bo *robj;
385 DRM_DEBUG("%d \n", args->handle);
386 gobj = drm_gem_object_lookup(dev, filp, args->handle);
389 robj = gem_to_radeon_bo(gobj);
390 r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
391 drm_gem_object_unreference_unlocked(gobj);
395 int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
396 struct drm_file *filp)
398 struct drm_radeon_gem_get_tiling *args = data;
399 struct drm_gem_object *gobj;
400 struct radeon_bo *rbo;
404 gobj = drm_gem_object_lookup(dev, filp, args->handle);
407 rbo = gem_to_radeon_bo(gobj);
408 r = radeon_bo_reserve(rbo, false);
409 if (unlikely(r != 0))
411 radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
412 radeon_bo_unreserve(rbo);
414 drm_gem_object_unreference_unlocked(gobj);
418 int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
419 struct drm_file *filp)
421 struct drm_radeon_gem_va *args = data;
422 struct drm_gem_object *gobj;
423 struct radeon_device *rdev = dev->dev_private;
424 struct radeon_fpriv *fpriv = filp->driver_priv;
425 struct radeon_bo *rbo;
426 struct radeon_bo_va *bo_va;
430 if (!rdev->vm_manager.enabled) {
431 args->operation = RADEON_VA_RESULT_ERROR;
436 * We don't support vm_id yet, to be sure we don't have have broken
437 * userspace, reject anyone trying to use non 0 value thus moving
438 * forward we can use those fields without breaking existant userspace
441 args->operation = RADEON_VA_RESULT_ERROR;
445 if (args->offset < RADEON_VA_RESERVED_SIZE) {
446 dev_err(&dev->pdev->dev,
447 "offset 0x%lX is in reserved area 0x%X\n",
448 (unsigned long)args->offset,
449 RADEON_VA_RESERVED_SIZE);
450 args->operation = RADEON_VA_RESULT_ERROR;
454 /* don't remove, we need to enforce userspace to set the snooped flag
455 * otherwise we will endup with broken userspace and we won't be able
456 * to enable this feature without adding new interface
458 invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
459 if ((args->flags & invalid_flags)) {
460 dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
461 args->flags, invalid_flags);
462 args->operation = RADEON_VA_RESULT_ERROR;
466 switch (args->operation) {
468 case RADEON_VA_UNMAP:
471 dev_err(&dev->pdev->dev, "unsupported operation %d\n",
473 args->operation = RADEON_VA_RESULT_ERROR;
477 gobj = drm_gem_object_lookup(dev, filp, args->handle);
479 args->operation = RADEON_VA_RESULT_ERROR;
482 rbo = gem_to_radeon_bo(gobj);
483 r = radeon_bo_reserve(rbo, false);
485 args->operation = RADEON_VA_RESULT_ERROR;
486 drm_gem_object_unreference_unlocked(gobj);
489 bo_va = radeon_vm_bo_find(&fpriv->vm, rbo);
491 args->operation = RADEON_VA_RESULT_ERROR;
492 drm_gem_object_unreference_unlocked(gobj);
496 switch (args->operation) {
498 if (bo_va->soffset) {
499 args->operation = RADEON_VA_RESULT_VA_EXIST;
500 args->offset = bo_va->soffset;
503 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
505 case RADEON_VA_UNMAP:
506 r = radeon_vm_bo_set_addr(rdev, bo_va, 0, 0);
511 args->operation = RADEON_VA_RESULT_OK;
513 args->operation = RADEON_VA_RESULT_ERROR;
516 radeon_bo_unreserve(rbo);
517 drm_gem_object_unreference_unlocked(gobj);
521 int radeon_gem_op_ioctl(struct drm_device *dev, void *data,
522 struct drm_file *filp)
524 struct drm_radeon_gem_op *args = data;
525 struct drm_gem_object *gobj;
526 struct radeon_bo *robj;
529 gobj = drm_gem_object_lookup(dev, filp, args->handle);
533 robj = gem_to_radeon_bo(gobj);
534 r = radeon_bo_reserve(robj, false);
539 case RADEON_GEM_OP_GET_INITIAL_DOMAIN:
540 args->value = robj->initial_domain;
542 case RADEON_GEM_OP_SET_INITIAL_DOMAIN:
543 robj->initial_domain = args->value & (RADEON_GEM_DOMAIN_VRAM |
544 RADEON_GEM_DOMAIN_GTT |
545 RADEON_GEM_DOMAIN_CPU);
551 radeon_bo_unreserve(robj);
553 drm_gem_object_unreference_unlocked(gobj);
557 int radeon_mode_dumb_create(struct drm_file *file_priv,
558 struct drm_device *dev,
559 struct drm_mode_create_dumb *args)
561 struct radeon_device *rdev = dev->dev_private;
562 struct drm_gem_object *gobj;
566 args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
567 args->size = args->pitch * args->height;
568 args->size = ALIGN(args->size, PAGE_SIZE);
570 r = radeon_gem_object_create(rdev, args->size, 0,
571 RADEON_GEM_DOMAIN_VRAM, 0,
572 false, ttm_bo_type_device,
577 r = drm_gem_handle_create(file_priv, gobj, &handle);
578 /* drop reference from allocate - handle holds it now */
579 drm_gem_object_unreference_unlocked(gobj);
583 args->handle = handle;
587 #if defined(CONFIG_DEBUG_FS)
588 static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
590 struct drm_info_node *node = (struct drm_info_node *)m->private;
591 struct drm_device *dev = node->minor->dev;
592 struct radeon_device *rdev = dev->dev_private;
593 struct radeon_bo *rbo;
596 mutex_lock(&rdev->gem.mutex);
597 list_for_each_entry(rbo, &rdev->gem.objects, list) {
599 const char *placement;
601 domain = radeon_mem_type_to_domain(rbo->tbo.mem.mem_type);
603 case RADEON_GEM_DOMAIN_VRAM:
606 case RADEON_GEM_DOMAIN_GTT:
609 case RADEON_GEM_DOMAIN_CPU:
614 seq_printf(m, "bo[0x%08x] %8ldkB %8ldMB %s pid %8ld\n",
615 i, radeon_bo_size(rbo) >> 10, radeon_bo_size(rbo) >> 20,
616 placement, (unsigned long)rbo->pid);
619 mutex_unlock(&rdev->gem.mutex);
623 static struct drm_info_list radeon_debugfs_gem_list[] = {
624 {"radeon_gem_info", &radeon_debugfs_gem_info, 0, NULL},
628 int radeon_gem_debugfs_init(struct radeon_device *rdev)
630 #if defined(CONFIG_DEBUG_FS)
631 return radeon_debugfs_add_files(rdev, radeon_debugfs_gem_list, 1);