2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
13 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
15 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
16 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
17 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
18 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 * The above copyright notice and this permission notice (including the
21 * next paragraph) shall be included in all copies or substantial portions
26 * Authors: Dave Airlie <airlied@redhat.com>
30 #include <ttm/ttm_page_alloc.h>
32 static inline struct ast_private *
33 ast_bdev(struct ttm_bo_device *bd)
35 return container_of(bd, struct ast_private, ttm.bdev);
39 ast_ttm_mem_global_init(struct drm_global_reference *ref)
41 return ttm_mem_global_init(ref->object);
45 ast_ttm_mem_global_release(struct drm_global_reference *ref)
47 ttm_mem_global_release(ref->object);
50 static int ast_ttm_global_init(struct ast_private *ast)
52 struct drm_global_reference *global_ref;
55 global_ref = &ast->ttm.mem_global_ref;
56 global_ref->global_type = DRM_GLOBAL_TTM_MEM;
57 global_ref->size = sizeof(struct ttm_mem_global);
58 global_ref->init = &ast_ttm_mem_global_init;
59 global_ref->release = &ast_ttm_mem_global_release;
60 r = drm_global_item_ref(global_ref);
62 DRM_ERROR("Failed setting up TTM memory accounting "
67 ast->ttm.bo_global_ref.mem_glob =
68 ast->ttm.mem_global_ref.object;
69 global_ref = &ast->ttm.bo_global_ref.ref;
70 global_ref->global_type = DRM_GLOBAL_TTM_BO;
71 global_ref->size = sizeof(struct ttm_bo_global);
72 global_ref->init = &ttm_bo_global_init;
73 global_ref->release = &ttm_bo_global_release;
74 r = drm_global_item_ref(global_ref);
76 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
77 drm_global_item_unref(&ast->ttm.mem_global_ref);
84 ast_ttm_global_release(struct ast_private *ast)
86 if (ast->ttm.mem_global_ref.release == NULL)
89 drm_global_item_unref(&ast->ttm.bo_global_ref.ref);
90 drm_global_item_unref(&ast->ttm.mem_global_ref);
91 ast->ttm.mem_global_ref.release = NULL;
95 static void ast_bo_ttm_destroy(struct ttm_buffer_object *tbo)
99 bo = container_of(tbo, struct ast_bo, bo);
101 drm_gem_object_release(&bo->gem);
105 bool ast_ttm_bo_is_ast_bo(struct ttm_buffer_object *bo)
107 if (bo->destroy == &ast_bo_ttm_destroy)
113 ast_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
114 struct ttm_mem_type_manager *man)
118 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
119 man->available_caching = TTM_PL_MASK_CACHING;
120 man->default_caching = TTM_PL_FLAG_CACHED;
123 man->func = &ttm_bo_manager_func;
124 man->flags = TTM_MEMTYPE_FLAG_FIXED |
125 TTM_MEMTYPE_FLAG_MAPPABLE;
126 man->available_caching = TTM_PL_FLAG_UNCACHED |
128 man->default_caching = TTM_PL_FLAG_WC;
131 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
138 ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
140 struct ast_bo *astbo = ast_bo(bo);
142 if (!ast_ttm_bo_is_ast_bo(bo))
145 ast_ttm_placement(astbo, TTM_PL_FLAG_SYSTEM);
146 *pl = astbo->placement;
149 static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
154 static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
155 struct ttm_mem_reg *mem)
157 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
158 struct ast_private *ast = ast_bdev(bdev);
160 mem->bus.addr = NULL;
162 mem->bus.size = mem->num_pages << PAGE_SHIFT;
164 mem->bus.is_iomem = false;
165 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
167 switch (mem->mem_type) {
172 mem->bus.offset = mem->start << PAGE_SHIFT;
173 mem->bus.base = pci_resource_start(ast->dev->pdev, 0);
174 mem->bus.is_iomem = true;
183 static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
187 static int ast_bo_move(struct ttm_buffer_object *bo,
188 bool evict, bool interruptible,
190 struct ttm_mem_reg *new_mem)
193 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
198 static void ast_ttm_backend_destroy(struct ttm_tt *tt)
204 static struct ttm_backend_func ast_tt_backend_func = {
205 .destroy = &ast_ttm_backend_destroy,
209 struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev,
210 unsigned long size, uint32_t page_flags,
211 struct page *dummy_read_page)
215 tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
218 tt->func = &ast_tt_backend_func;
219 if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
226 static int ast_ttm_tt_populate(struct ttm_tt *ttm)
228 return ttm_pool_populate(ttm);
231 static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
233 ttm_pool_unpopulate(ttm);
236 struct ttm_bo_driver ast_bo_driver = {
237 .ttm_tt_create = ast_ttm_tt_create,
238 .ttm_tt_populate = ast_ttm_tt_populate,
239 .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
240 .init_mem_type = ast_bo_init_mem_type,
241 .evict_flags = ast_bo_evict_flags,
243 .verify_access = ast_bo_verify_access,
244 .io_mem_reserve = &ast_ttm_io_mem_reserve,
245 .io_mem_free = &ast_ttm_io_mem_free,
248 int ast_mm_init(struct ast_private *ast)
251 struct drm_device *dev = ast->dev;
252 struct ttm_bo_device *bdev = &ast->ttm.bdev;
254 ret = ast_ttm_global_init(ast);
258 ret = ttm_bo_device_init(&ast->ttm.bdev,
259 ast->ttm.bo_global_ref.ref.object,
260 &ast_bo_driver, DRM_FILE_PAGE_OFFSET,
263 DRM_ERROR("Error initialising bo driver; %d\n", ret);
267 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
268 ast->vram_size >> PAGE_SHIFT);
270 DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
274 ast->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
275 pci_resource_len(dev->pdev, 0),
281 void ast_mm_fini(struct ast_private *ast)
283 struct drm_device *dev = ast->dev;
284 ttm_bo_device_release(&ast->ttm.bdev);
286 ast_ttm_global_release(ast);
288 if (ast->fb_mtrr >= 0) {
289 drm_mtrr_del(ast->fb_mtrr,
290 pci_resource_start(dev->pdev, 0),
291 pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
296 void ast_ttm_placement(struct ast_bo *bo, int domain)
299 bo->placement.fpfn = 0;
300 bo->placement.lpfn = 0;
301 bo->placement.placement = bo->placements;
302 bo->placement.busy_placement = bo->placements;
303 if (domain & TTM_PL_FLAG_VRAM)
304 bo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
305 if (domain & TTM_PL_FLAG_SYSTEM)
306 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
308 bo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
309 bo->placement.num_placement = c;
310 bo->placement.num_busy_placement = c;
313 int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
317 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
319 if (ret != -ERESTARTSYS && ret != -EBUSY)
320 DRM_ERROR("reserve failed %p\n", bo);
326 void ast_bo_unreserve(struct ast_bo *bo)
328 ttm_bo_unreserve(&bo->bo);
331 int ast_bo_create(struct drm_device *dev, int size, int align,
332 uint32_t flags, struct ast_bo **pastbo)
334 struct ast_private *ast = dev->dev_private;
335 struct ast_bo *astbo;
339 astbo = kzalloc(sizeof(struct ast_bo), GFP_KERNEL);
343 ret = drm_gem_object_init(dev, &astbo->gem, size);
349 astbo->gem.driver_private = NULL;
350 astbo->bo.bdev = &ast->ttm.bdev;
352 ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
354 acc_size = ttm_bo_dma_acc_size(&ast->ttm.bdev, size,
355 sizeof(struct ast_bo));
357 ret = ttm_bo_init(&ast->ttm.bdev, &astbo->bo, size,
358 ttm_bo_type_device, &astbo->placement,
359 align >> PAGE_SHIFT, false, NULL, acc_size,
360 NULL, ast_bo_ttm_destroy);
368 static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
370 return bo->bo.offset;
373 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
380 *gpu_addr = ast_bo_gpu_offset(bo);
383 ast_ttm_placement(bo, pl_flag);
384 for (i = 0; i < bo->placement.num_placement; i++)
385 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
386 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
392 *gpu_addr = ast_bo_gpu_offset(bo);
396 int ast_bo_unpin(struct ast_bo *bo)
399 if (!bo->pin_count) {
400 DRM_ERROR("unpin bad %p\n", bo);
407 for (i = 0; i < bo->placement.num_placement ; i++)
408 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
409 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
416 int ast_bo_push_sysram(struct ast_bo *bo)
419 if (!bo->pin_count) {
420 DRM_ERROR("unpin bad %p\n", bo);
427 if (bo->kmap.virtual)
428 ttm_bo_kunmap(&bo->kmap);
430 ast_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
431 for (i = 0; i < bo->placement.num_placement ; i++)
432 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
434 ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
436 DRM_ERROR("pushing to VRAM failed\n");
442 int ast_mmap(struct file *filp, struct vm_area_struct *vma)
444 struct drm_file *file_priv;
445 struct ast_private *ast;
447 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
448 return drm_mmap(filp, vma);
450 file_priv = filp->private_data;
451 ast = file_priv->minor->dev->dev_private;
452 return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);