2 * Copyright 2011 (c) Oracle Corp.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
29 * - Pool collects resently freed pages for reuse (and hooks up to
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
36 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37 #define pr_fmt(fmt) "[TTM] " fmt
39 #include <linux/dma-mapping.h>
40 #include <linux/list.h>
41 #include <linux/seq_file.h> /* for seq_printf */
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/highmem.h>
45 #include <linux/mm_types.h>
46 #include <linux/module.h>
48 #include <linux/atomic.h>
49 #include <linux/device.h>
50 #include <linux/kthread.h>
51 #include <drm/ttm/ttm_bo_driver.h>
52 #include <drm/ttm/ttm_page_alloc.h>
57 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
58 #define SMALL_ALLOCATION 4
59 #define FREE_ALL_PAGES (~0U)
60 /* times are in msecs */
61 #define IS_UNDEFINED (0)
64 #define IS_CACHED (1<<3)
65 #define IS_DMA32 (1<<4)
71 POOL_IS_CACHED = IS_CACHED,
72 POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
73 POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
74 POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
77 * The pool structure. There are usually six pools:
78 * - generic (not restricted to DMA32):
79 * - write combined, uncached, cached.
80 * - dma32 (up to 2^32 - so up 4GB):
81 * - write combined, uncached, cached.
82 * for each 'struct device'. The 'cached' is for pages that are actively used.
83 * The other ones can be shrunk by the shrinker API if neccessary.
84 * @pools: The 'struct device->dma_pools' link.
85 * @type: Type of the pool
86 * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
87 * used with irqsave/irqrestore variants because pool allocator maybe called
89 * @inuse_list: Pool of pages that are in use. The order is very important and
90 * it is in the order that the TTM pages that are put back are in.
91 * @free_list: Pool of pages that are free to be used. No order requirements.
92 * @dev: The device that is associated with these pools.
93 * @size: Size used during DMA allocation.
94 * @npages_free: Count of available pages for re-use.
95 * @npages_in_use: Count of pages that are in use.
96 * @nfrees: Stats when pool is shrinking.
97 * @nrefills: Stats when the pool is grown.
98 * @gfp_flags: Flags to pass for alloc_page.
99 * @name: Name of the pool.
100 * @dev_name: Name derieved from dev - similar to how dev_info works.
101 * Used during shutdown as the dev_info during release is unavailable.
104 struct list_head pools; /* The 'struct device->dma_pools link */
107 struct list_head inuse_list;
108 struct list_head free_list;
111 unsigned npages_free;
112 unsigned npages_in_use;
113 unsigned long nfrees; /* Stats when shrunk. */
114 unsigned long nrefills; /* Stats when grown. */
116 char name[13]; /* "cached dma32" */
117 char dev_name[64]; /* Constructed from dev */
121 * The accounting page keeping track of the allocated page along with
123 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
124 * @vaddr: The virtual address of the page
125 * @dma: The bus address of the page. If the page is not allocated
126 * via the DMA API, it will be -1.
129 struct list_head page_list;
136 * Limits for the pool. They are handled without locks because only place where
137 * they may change is in sysfs store. They won't have immediate effect anyway
138 * so forcing serialization to access them is pointless.
141 struct ttm_pool_opts {
148 * Contains the list of all of the 'struct device' and their corresponding
149 * DMA pools. Guarded by _mutex->lock.
150 * @pools: The link to 'struct ttm_pool_manager->pools'
151 * @dev: The 'struct device' associated with the 'pool'
152 * @pool: The 'struct dma_pool' associated with the 'dev'
154 struct device_pools {
155 struct list_head pools;
157 struct dma_pool *pool;
161 * struct ttm_pool_manager - Holds memory pools for fast allocation
163 * @lock: Lock used when adding/removing from pools
164 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
165 * @options: Limits for the pool.
166 * @npools: Total amount of pools in existence.
167 * @shrinker: The structure used by [un|]register_shrinker
169 struct ttm_pool_manager {
171 struct list_head pools;
172 struct ttm_pool_opts options;
174 struct shrinker mm_shrink;
178 static struct ttm_pool_manager *_manager;
180 static struct attribute ttm_page_pool_max = {
181 .name = "pool_max_size",
182 .mode = S_IRUGO | S_IWUSR
184 static struct attribute ttm_page_pool_small = {
185 .name = "pool_small_allocation",
186 .mode = S_IRUGO | S_IWUSR
188 static struct attribute ttm_page_pool_alloc_size = {
189 .name = "pool_allocation_size",
190 .mode = S_IRUGO | S_IWUSR
193 static struct attribute *ttm_pool_attrs[] = {
195 &ttm_page_pool_small,
196 &ttm_page_pool_alloc_size,
200 static void ttm_pool_kobj_release(struct kobject *kobj)
202 struct ttm_pool_manager *m =
203 container_of(kobj, struct ttm_pool_manager, kobj);
207 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
208 const char *buffer, size_t size)
210 struct ttm_pool_manager *m =
211 container_of(kobj, struct ttm_pool_manager, kobj);
214 chars = sscanf(buffer, "%u", &val);
218 /* Convert kb to number of pages */
219 val = val / (PAGE_SIZE >> 10);
221 if (attr == &ttm_page_pool_max)
222 m->options.max_size = val;
223 else if (attr == &ttm_page_pool_small)
224 m->options.small = val;
225 else if (attr == &ttm_page_pool_alloc_size) {
226 if (val > NUM_PAGES_TO_ALLOC*8) {
227 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
228 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
229 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
231 } else if (val > NUM_PAGES_TO_ALLOC) {
232 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
233 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
235 m->options.alloc_size = val;
241 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
244 struct ttm_pool_manager *m =
245 container_of(kobj, struct ttm_pool_manager, kobj);
248 if (attr == &ttm_page_pool_max)
249 val = m->options.max_size;
250 else if (attr == &ttm_page_pool_small)
251 val = m->options.small;
252 else if (attr == &ttm_page_pool_alloc_size)
253 val = m->options.alloc_size;
255 val = val * (PAGE_SIZE >> 10);
257 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
260 static const struct sysfs_ops ttm_pool_sysfs_ops = {
261 .show = &ttm_pool_show,
262 .store = &ttm_pool_store,
265 static struct kobj_type ttm_pool_kobj_type = {
266 .release = &ttm_pool_kobj_release,
267 .sysfs_ops = &ttm_pool_sysfs_ops,
268 .default_attrs = ttm_pool_attrs,
272 static int set_pages_array_wb(struct page **pages, int addrinarray)
277 for (i = 0; i < addrinarray; i++)
278 unmap_page_from_agp(pages[i]);
283 static int set_pages_array_wc(struct page **pages, int addrinarray)
288 for (i = 0; i < addrinarray; i++)
289 map_page_into_agp(pages[i]);
294 static int set_pages_array_uc(struct page **pages, int addrinarray)
299 for (i = 0; i < addrinarray; i++)
300 map_page_into_agp(pages[i]);
304 #endif /* for !CONFIG_X86 */
306 static int ttm_set_pages_caching(struct dma_pool *pool,
307 struct page **pages, unsigned cpages)
310 /* Set page caching */
311 if (pool->type & IS_UC) {
312 r = set_pages_array_uc(pages, cpages);
314 pr_err("%s: Failed to set %d pages to uc!\n",
315 pool->dev_name, cpages);
317 if (pool->type & IS_WC) {
318 r = set_pages_array_wc(pages, cpages);
320 pr_err("%s: Failed to set %d pages to wc!\n",
321 pool->dev_name, cpages);
326 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
328 dma_addr_t dma = d_page->dma;
329 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
334 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
336 struct dma_page *d_page;
338 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
342 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
346 d_page->p = virt_to_page(d_page->vaddr);
353 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
355 enum pool_type type = IS_UNDEFINED;
357 if (flags & TTM_PAGE_FLAG_DMA32)
359 if (cstate == tt_cached)
361 else if (cstate == tt_uncached)
369 static void ttm_pool_update_free_locked(struct dma_pool *pool,
370 unsigned freed_pages)
372 pool->npages_free -= freed_pages;
373 pool->nfrees += freed_pages;
377 /* set memory back to wb and free the pages. */
378 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
379 struct page *pages[], unsigned npages)
381 struct dma_page *d_page, *tmp;
383 /* Don't set WB on WB page pool. */
384 if (npages && !(pool->type & IS_CACHED) &&
385 set_pages_array_wb(pages, npages))
386 pr_err("%s: Failed to set %d pages to wb!\n",
387 pool->dev_name, npages);
389 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
390 list_del(&d_page->page_list);
391 __ttm_dma_free_page(pool, d_page);
395 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
397 /* Don't set WB on WB page pool. */
398 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
399 pr_err("%s: Failed to set %d pages to wb!\n",
402 list_del(&d_page->page_list);
403 __ttm_dma_free_page(pool, d_page);
407 * Free pages from pool.
409 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
410 * number of pages in one go.
412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool
416 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
419 unsigned long irq_flags;
420 struct dma_page *dma_p, *tmp;
421 struct page **pages_to_free;
422 struct list_head d_pages;
423 unsigned freed_pages = 0,
424 npages_to_free = nr_free;
426 if (NUM_PAGES_TO_ALLOC < nr_free)
427 npages_to_free = NUM_PAGES_TO_ALLOC;
430 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
431 pool->dev_name, pool->name, current->pid,
432 npages_to_free, nr_free);
435 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
437 if (!pages_to_free) {
438 pr_err("%s: Failed to allocate memory for pool free operation\n",
442 INIT_LIST_HEAD(&d_pages);
444 spin_lock_irqsave(&pool->lock, irq_flags);
446 /* We picking the oldest ones off the list */
447 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
449 if (freed_pages >= npages_to_free)
452 /* Move the dma_page from one list to another. */
453 list_move(&dma_p->page_list, &d_pages);
455 pages_to_free[freed_pages++] = dma_p->p;
456 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
457 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
459 ttm_pool_update_free_locked(pool, freed_pages);
461 * Because changing page caching is costly
462 * we unlock the pool to prevent stalling.
464 spin_unlock_irqrestore(&pool->lock, irq_flags);
466 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
469 INIT_LIST_HEAD(&d_pages);
471 if (likely(nr_free != FREE_ALL_PAGES))
472 nr_free -= freed_pages;
474 if (NUM_PAGES_TO_ALLOC >= nr_free)
475 npages_to_free = nr_free;
477 npages_to_free = NUM_PAGES_TO_ALLOC;
481 /* free all so restart the processing */
485 /* Not allowed to fall through or break because
486 * following context is inside spinlock while we are
494 /* remove range of pages from the pool */
496 ttm_pool_update_free_locked(pool, freed_pages);
497 nr_free -= freed_pages;
500 spin_unlock_irqrestore(&pool->lock, irq_flags);
503 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
505 kfree(pages_to_free);
509 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
511 struct device_pools *p;
512 struct dma_pool *pool;
517 mutex_lock(&_manager->lock);
518 list_for_each_entry_reverse(p, &_manager->pools, pools) {
522 if (pool->type != type)
530 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
531 if (pool->type != type)
533 /* Takes a spinlock.. */
534 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
535 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
536 /* This code path is called after _all_ references to the
537 * struct device has been dropped - so nobody should be
538 * touching it. In case somebody is trying to _add_ we are
539 * guarded by the mutex. */
540 list_del(&pool->pools);
544 mutex_unlock(&_manager->lock);
548 * On free-ing of the 'struct device' this deconstructor is run.
549 * Albeit the pool might have already been freed earlier.
551 static void ttm_dma_pool_release(struct device *dev, void *res)
553 struct dma_pool *pool = *(struct dma_pool **)res;
556 ttm_dma_free_pool(dev, pool->type);
559 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
561 return *(struct dma_pool **)res == match_data;
564 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
567 char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
568 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
569 struct device_pools *sec_pool = NULL;
570 struct dma_pool *pool = NULL, **ptr;
578 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
584 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
589 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
594 INIT_LIST_HEAD(&sec_pool->pools);
596 sec_pool->pool = pool;
598 INIT_LIST_HEAD(&pool->free_list);
599 INIT_LIST_HEAD(&pool->inuse_list);
600 INIT_LIST_HEAD(&pool->pools);
601 spin_lock_init(&pool->lock);
603 pool->npages_free = pool->npages_in_use = 0;
605 pool->gfp_flags = flags;
606 pool->size = PAGE_SIZE;
610 for (i = 0; i < 5; i++) {
612 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
617 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
618 * - the kobj->name has already been deallocated.*/
619 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
620 dev_driver_string(dev), dev_name(dev));
621 mutex_lock(&_manager->lock);
622 /* You can get the dma_pool from either the global: */
623 list_add(&sec_pool->pools, &_manager->pools);
625 /* or from 'struct device': */
626 list_add(&pool->pools, &dev->dma_pools);
627 mutex_unlock(&_manager->lock);
630 devres_add(dev, ptr);
640 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
643 struct dma_pool *pool, *tmp, *found = NULL;
645 if (type == IS_UNDEFINED)
648 /* NB: We iterate on the 'struct dev' which has no spinlock, but
649 * it does have a kref which we have taken. The kref is taken during
650 * graphic driver loading - in the drm_pci_init it calls either
651 * pci_dev_get or pci_register_driver which both end up taking a kref
652 * on 'struct device'.
654 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
655 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
656 * thing is at that point of time there are no pages associated with the
657 * driver so this function will not be called.
659 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
660 if (pool->type != type)
669 * Free pages the pages that failed to change the caching state. If there
670 * are pages that have changed their caching state already put them to the
673 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
674 struct list_head *d_pages,
675 struct page **failed_pages,
678 struct dma_page *d_page, *tmp;
685 /* Find the failed page. */
686 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
689 /* .. and then progress over the full list. */
690 list_del(&d_page->page_list);
691 __ttm_dma_free_page(pool, d_page);
701 * Allocate 'count' pages, and put 'need' number of them on the
702 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
703 * The full list of pages should also be on 'd_pages'.
704 * We return zero for success, and negative numbers as errors.
706 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
707 struct list_head *d_pages,
710 struct page **caching_array;
711 struct dma_page *dma_p;
715 unsigned max_cpages = min(count,
716 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
718 /* allocate array for page caching change */
719 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
721 if (!caching_array) {
722 pr_err("%s: Unable to allocate table for new pages\n",
728 pr_debug("%s: (%s:%d) Getting %d pages\n",
729 pool->dev_name, pool->name, current->pid, count);
732 for (i = 0, cpages = 0; i < count; ++i) {
733 dma_p = __ttm_dma_alloc_page(pool);
735 pr_err("%s: Unable to get page %u\n",
738 /* store already allocated pages in the pool after
739 * setting the caching state */
741 r = ttm_set_pages_caching(pool, caching_array,
744 ttm_dma_handle_caching_state_failure(
745 pool, d_pages, caching_array,
752 #ifdef CONFIG_HIGHMEM
753 /* gfp flags of highmem page should never be dma32 so we
754 * we should be fine in such case
759 caching_array[cpages++] = p;
760 if (cpages == max_cpages) {
761 /* Note: Cannot hold the spinlock */
762 r = ttm_set_pages_caching(pool, caching_array,
765 ttm_dma_handle_caching_state_failure(
766 pool, d_pages, caching_array,
773 list_add(&dma_p->page_list, d_pages);
777 r = ttm_set_pages_caching(pool, caching_array, cpages);
779 ttm_dma_handle_caching_state_failure(pool, d_pages,
780 caching_array, cpages);
783 kfree(caching_array);
788 * @return count of pages still required to fulfill the request.
790 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
791 unsigned long *irq_flags)
793 unsigned count = _manager->options.small;
794 int r = pool->npages_free;
796 if (count > pool->npages_free) {
797 struct list_head d_pages;
799 INIT_LIST_HEAD(&d_pages);
801 spin_unlock_irqrestore(&pool->lock, *irq_flags);
803 /* Returns how many more are neccessary to fulfill the
805 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
807 spin_lock_irqsave(&pool->lock, *irq_flags);
809 /* Add the fresh to the end.. */
810 list_splice(&d_pages, &pool->free_list);
812 pool->npages_free += count;
815 struct dma_page *d_page;
818 pr_err("%s: Failed to fill %s pool (r:%d)!\n",
819 pool->dev_name, pool->name, r);
821 list_for_each_entry(d_page, &d_pages, page_list) {
824 list_splice_tail(&d_pages, &pool->free_list);
825 pool->npages_free += cpages;
833 * @return count of pages still required to fulfill the request.
834 * The populate list is actually a stack (not that is matters as TTM
835 * allocates one page at a time.
837 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
838 struct ttm_dma_tt *ttm_dma,
841 struct dma_page *d_page;
842 struct ttm_tt *ttm = &ttm_dma->ttm;
843 unsigned long irq_flags;
844 int count, r = -ENOMEM;
846 spin_lock_irqsave(&pool->lock, irq_flags);
847 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
849 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
850 ttm->pages[index] = d_page->p;
851 ttm_dma->cpu_address[index] = d_page->vaddr;
852 ttm_dma->dma_address[index] = d_page->dma;
853 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
855 pool->npages_in_use += 1;
856 pool->npages_free -= 1;
858 spin_unlock_irqrestore(&pool->lock, irq_flags);
863 * On success pages list will hold count number of correctly
864 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
866 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
868 struct ttm_tt *ttm = &ttm_dma->ttm;
869 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
870 struct dma_pool *pool;
876 if (ttm->state != tt_unpopulated)
879 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
880 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
881 gfp_flags = GFP_USER | GFP_DMA32;
883 gfp_flags = GFP_HIGHUSER;
884 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
885 gfp_flags |= __GFP_ZERO;
887 pool = ttm_dma_find_pool(dev, type);
889 pool = ttm_dma_pool_init(dev, gfp_flags, type);
890 if (IS_ERR_OR_NULL(pool)) {
895 INIT_LIST_HEAD(&ttm_dma->pages_list);
896 for (i = 0; i < ttm->num_pages; ++i) {
897 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
899 ttm_dma_unpopulate(ttm_dma, dev);
903 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
905 if (unlikely(ret != 0)) {
906 ttm_dma_unpopulate(ttm_dma, dev);
911 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
912 ret = ttm_tt_swapin(ttm);
913 if (unlikely(ret != 0)) {
914 ttm_dma_unpopulate(ttm_dma, dev);
919 ttm->state = tt_unbound;
922 EXPORT_SYMBOL_GPL(ttm_dma_populate);
924 /* Put all pages in pages list to correct pool to wait for reuse */
925 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
927 struct ttm_tt *ttm = &ttm_dma->ttm;
928 struct dma_pool *pool;
929 struct dma_page *d_page, *next;
931 bool is_cached = false;
932 unsigned count = 0, i, npages = 0;
933 unsigned long irq_flags;
935 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
936 pool = ttm_dma_find_pool(dev, type);
940 is_cached = (ttm_dma_find_pool(pool->dev,
941 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
943 /* make sure pages array match list and count number of pages */
944 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
945 ttm->pages[count] = d_page->p;
949 spin_lock_irqsave(&pool->lock, irq_flags);
950 pool->npages_in_use -= count;
952 pool->nfrees += count;
954 pool->npages_free += count;
955 list_splice(&ttm_dma->pages_list, &pool->free_list);
957 if (pool->npages_free > _manager->options.max_size) {
958 npages = pool->npages_free - _manager->options.max_size;
959 /* free at least NUM_PAGES_TO_ALLOC number of pages
960 * to reduce calls to set_memory_wb */
961 if (npages < NUM_PAGES_TO_ALLOC)
962 npages = NUM_PAGES_TO_ALLOC;
965 spin_unlock_irqrestore(&pool->lock, irq_flags);
968 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
969 ttm_mem_global_free_page(ttm->glob->mem_glob,
971 ttm_dma_page_put(pool, d_page);
974 for (i = 0; i < count; i++) {
975 ttm_mem_global_free_page(ttm->glob->mem_glob,
980 INIT_LIST_HEAD(&ttm_dma->pages_list);
981 for (i = 0; i < ttm->num_pages; i++) {
982 ttm->pages[i] = NULL;
983 ttm_dma->cpu_address[i] = 0;
984 ttm_dma->dma_address[i] = 0;
987 /* shrink pool if necessary (only on !is_cached pools)*/
989 ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
990 ttm->state = tt_unpopulated;
992 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
995 * Callback for mm to request pool to reduce number of page held.
997 * XXX: (dchinner) Deadlock warning!
999 * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
1001 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1004 static unsigned long
1005 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1007 static unsigned start_pool;
1009 unsigned pool_offset;
1010 unsigned shrink_pages = sc->nr_to_scan;
1011 struct device_pools *p;
1012 unsigned long freed = 0;
1014 if (list_empty(&_manager->pools))
1017 if (!mutex_trylock(&_manager->lock))
1019 if (!_manager->npools)
1021 pool_offset = ++start_pool % _manager->npools;
1022 list_for_each_entry(p, &_manager->pools, pools) {
1027 if (shrink_pages == 0)
1029 /* Do it in round-robin fashion. */
1030 if (++idx < pool_offset)
1032 nr_free = shrink_pages;
1033 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
1035 freed += nr_free - shrink_pages;
1037 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1038 p->pool->dev_name, p->pool->name, current->pid,
1039 nr_free, shrink_pages);
1042 mutex_unlock(&_manager->lock);
1046 static unsigned long
1047 ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1049 struct device_pools *p;
1050 unsigned long count = 0;
1052 if (!mutex_trylock(&_manager->lock))
1054 list_for_each_entry(p, &_manager->pools, pools)
1055 count += p->pool->npages_free;
1056 mutex_unlock(&_manager->lock);
1060 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1062 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1063 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1064 manager->mm_shrink.seeks = 1;
1065 register_shrinker(&manager->mm_shrink);
1068 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1070 unregister_shrinker(&manager->mm_shrink);
1073 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1079 pr_info("Initializing DMA pool allocator\n");
1081 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1085 mutex_init(&_manager->lock);
1086 INIT_LIST_HEAD(&_manager->pools);
1088 _manager->options.max_size = max_pages;
1089 _manager->options.small = SMALL_ALLOCATION;
1090 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1092 /* This takes care of auto-freeing the _manager */
1093 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1094 &glob->kobj, "dma_pool");
1095 if (unlikely(ret != 0)) {
1096 kobject_put(&_manager->kobj);
1099 ttm_dma_pool_mm_shrink_init(_manager);
1105 void ttm_dma_page_alloc_fini(void)
1107 struct device_pools *p, *t;
1109 pr_info("Finalizing DMA pool allocator\n");
1110 ttm_dma_pool_mm_shrink_fini(_manager);
1112 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1113 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1115 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1116 ttm_dma_pool_match, p->pool));
1117 ttm_dma_free_pool(p->dev, p->pool->type);
1119 kobject_put(&_manager->kobj);
1123 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1125 struct device_pools *p;
1126 struct dma_pool *pool = NULL;
1127 char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1128 "name", "virt", "busaddr"};
1131 seq_printf(m, "No pool allocator running.\n");
1134 seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1135 h[0], h[1], h[2], h[3], h[4], h[5]);
1136 mutex_lock(&_manager->lock);
1137 list_for_each_entry(p, &_manager->pools, pools) {
1138 struct device *dev = p->dev;
1142 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1143 pool->name, pool->nrefills,
1144 pool->nfrees, pool->npages_in_use,
1148 mutex_unlock(&_manager->lock);
1151 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);