drm/ttm: expose CPU address of DMA-allocated pages
authorAlexandre Courbot <acourbot@nvidia.com>
Mon, 4 Aug 2014 09:28:54 +0000 (18:28 +0900)
committerBen Skeggs <bskeggs@redhat.com>
Sat, 9 Aug 2014 15:08:03 +0000 (01:08 +1000)
Pages allocated using the DMA API have a coherent memory mapping. Make
this mapping visible to drivers so they can decide to use it instead of
creating their own redundant one.

Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
Acked-by: David Airlie <airlied@linux.ie>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drivers/gpu/drm/ttm/ttm_tt.c
include/drm/ttm/ttm_bo_driver.h

index ca65df144765e530571040ff246eda61df1326fc..c96db433f8af834398f6496da9498db98013a7c7 100644 (file)
@@ -848,6 +848,7 @@ static int ttm_dma_pool_get_pages(struct dma_pool *pool,
        if (count) {
                d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
                ttm->pages[index] = d_page->p;
+               ttm_dma->cpu_address[index] = d_page->vaddr;
                ttm_dma->dma_address[index] = d_page->dma;
                list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
                r = 0;
@@ -979,6 +980,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
        INIT_LIST_HEAD(&ttm_dma->pages_list);
        for (i = 0; i < ttm->num_pages; i++) {
                ttm->pages[i] = NULL;
+               ttm_dma->cpu_address[i] = 0;
                ttm_dma->dma_address[i] = 0;
        }
 
index 75f319090043a69021467e0d8fc9840fa796067d..bf080abc86d12c9bdcb8947d1d1d637d0b2a4bb7 100644 (file)
@@ -55,9 +55,12 @@ static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
 
 static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
 {
-       ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
-       ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
-                                           sizeof(*ttm->dma_address));
+       ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages,
+                                         sizeof(*ttm->ttm.pages) +
+                                         sizeof(*ttm->dma_address) +
+                                         sizeof(*ttm->cpu_address));
+       ttm->cpu_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages);
+       ttm->dma_address = (void *) (ttm->cpu_address + ttm->ttm.num_pages);
 }
 
 #ifdef CONFIG_X86
@@ -228,7 +231,7 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
 
        INIT_LIST_HEAD(&ttm_dma->pages_list);
        ttm_dma_tt_alloc_page_directory(ttm_dma);
-       if (!ttm->pages || !ttm_dma->dma_address) {
+       if (!ttm->pages) {
                ttm_tt_destroy(ttm);
                pr_err("Failed allocating page table\n");
                return -ENOMEM;
@@ -243,7 +246,7 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
 
        drm_free_large(ttm->pages);
        ttm->pages = NULL;
-       drm_free_large(ttm_dma->dma_address);
+       ttm_dma->cpu_address = NULL;
        ttm_dma->dma_address = NULL;
 }
 EXPORT_SYMBOL(ttm_dma_tt_fini);
index 202f0a7171e85a2c5ea93ef9c9e2768d561e8d6e..1d9f0f1ff52d2c01cd4b42cc4113a70a90ffa0a8 100644 (file)
@@ -133,6 +133,7 @@ struct ttm_tt {
  * struct ttm_dma_tt
  *
  * @ttm: Base ttm_tt struct.
+ * @cpu_address: The CPU address of the pages
  * @dma_address: The DMA (bus) addresses of the pages
  * @pages_list: used by some page allocation backend
  *
@@ -142,6 +143,7 @@ struct ttm_tt {
  */
 struct ttm_dma_tt {
        struct ttm_tt ttm;
+       void **cpu_address;
        dma_addr_t *dma_address;
        struct list_head pages_list;
 };