4 * Copyright 2001 David Brownell
5 * Copyright 2007 Intel Corporation
6 * Author: Matthew Wilcox <willy@linux.intel.com>
8 * This software may be redistributed and/or modified under the terms of
9 * the GNU General Public License ("GPL") version 2 as published by the
10 * Free Software Foundation.
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
15 * Many older drivers still have their own code to do this.
17 * The current design of this allocator is fairly simple. The pool is
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
25 #include <linux/device.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/dmapool.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/poison.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/spinlock.h>
36 #include <linux/string.h>
37 #include <linux/types.h>
38 #include <linux/wait.h>
40 struct dma_pool { /* the pool */
41 struct list_head page_list;
47 wait_queue_head_t waitq;
48 struct list_head pools;
51 struct dma_page { /* cacheable header for 'allocation' bytes */
52 struct list_head page_list;
59 #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
61 static DEFINE_MUTEX(pools_lock);
64 show_pools(struct device *dev, struct device_attribute *attr, char *buf)
69 struct dma_page *page;
70 struct dma_pool *pool;
75 temp = scnprintf(next, size, "poolinfo - 0.1\n");
79 mutex_lock(&pools_lock);
80 list_for_each_entry(pool, &dev->dma_pools, pools) {
84 list_for_each_entry(page, &pool->page_list, page_list) {
86 blocks += page->in_use;
89 /* per-pool info, no real statistics yet */
90 temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n",
92 pages * (pool->allocation / pool->size),
97 mutex_unlock(&pools_lock);
99 return PAGE_SIZE - size;
102 static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL);
105 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
106 * @name: name of pool, for diagnostics
107 * @dev: device that will be doing the DMA
108 * @size: size of the blocks in this pool.
109 * @align: alignment requirement for blocks; must be a power of two
110 * @allocation: returned blocks won't cross this boundary (or zero)
111 * Context: !in_interrupt()
113 * Returns a dma allocation pool with the requested characteristics, or
114 * null if one can't be created. Given one of these pools, dma_pool_alloc()
115 * may be used to allocate memory. Such memory will all have "consistent"
116 * DMA mappings, accessible by the device and its driver without using
117 * cache flushing primitives. The actual size of blocks allocated may be
118 * larger than requested because of alignment.
120 * If allocation is nonzero, objects returned from dma_pool_alloc() won't
121 * cross that size boundary. This is useful for devices which have
122 * addressing restrictions on individual DMA transfers, such as not crossing
123 * boundaries of 4KBytes.
125 struct dma_pool *dma_pool_create(const char *name, struct device *dev,
126 size_t size, size_t align, size_t allocation)
128 struct dma_pool *retval;
132 } else if (align & (align - 1)) {
138 } else if (size < 4) {
142 if ((size % align) != 0)
143 size = ALIGN(size, align);
145 if (allocation == 0) {
146 if (PAGE_SIZE < size)
149 allocation = PAGE_SIZE;
150 /* FIXME: round up for less fragmentation */
151 } else if (allocation < size)
156 kmalloc_node(sizeof *retval, GFP_KERNEL, dev_to_node(dev))))
159 strlcpy(retval->name, name, sizeof retval->name);
163 INIT_LIST_HEAD(&retval->page_list);
164 spin_lock_init(&retval->lock);
166 retval->allocation = allocation;
167 init_waitqueue_head(&retval->waitq);
172 mutex_lock(&pools_lock);
173 if (list_empty(&dev->dma_pools))
174 ret = device_create_file(dev, &dev_attr_pools);
177 /* note: not currently insisting "name" be unique */
179 list_add(&retval->pools, &dev->dma_pools);
184 mutex_unlock(&pools_lock);
186 INIT_LIST_HEAD(&retval->pools);
190 EXPORT_SYMBOL(dma_pool_create);
192 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
194 unsigned int offset = 0;
197 unsigned int next = offset + pool->size;
198 if (unlikely((next + pool->size) >= pool->allocation))
199 next = pool->allocation;
200 *(int *)(page->vaddr + offset) = next;
202 } while (offset < pool->allocation);
205 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
207 struct dma_page *page;
209 page = kmalloc(sizeof(*page), mem_flags);
212 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
213 &page->dma, mem_flags);
215 #ifdef CONFIG_DEBUG_SLAB
216 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
218 pool_initialise_page(pool, page);
219 list_add(&page->page_list, &pool->page_list);
229 static inline int is_page_busy(struct dma_page *page)
231 return page->in_use != 0;
234 static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
236 dma_addr_t dma = page->dma;
238 #ifdef CONFIG_DEBUG_SLAB
239 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
241 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
242 list_del(&page->page_list);
247 * dma_pool_destroy - destroys a pool of dma memory blocks.
248 * @pool: dma pool that will be destroyed
249 * Context: !in_interrupt()
251 * Caller guarantees that no more memory from the pool is in use,
252 * and that nothing will try to use the pool after this call.
254 void dma_pool_destroy(struct dma_pool *pool)
256 mutex_lock(&pools_lock);
257 list_del(&pool->pools);
258 if (pool->dev && list_empty(&pool->dev->dma_pools))
259 device_remove_file(pool->dev, &dev_attr_pools);
260 mutex_unlock(&pools_lock);
262 while (!list_empty(&pool->page_list)) {
263 struct dma_page *page;
264 page = list_entry(pool->page_list.next,
265 struct dma_page, page_list);
266 if (is_page_busy(page)) {
269 "dma_pool_destroy %s, %p busy\n",
270 pool->name, page->vaddr);
273 "dma_pool_destroy %s, %p busy\n",
274 pool->name, page->vaddr);
275 /* leak the still-in-use consistent memory */
276 list_del(&page->page_list);
279 pool_free_page(pool, page);
284 EXPORT_SYMBOL(dma_pool_destroy);
287 * dma_pool_alloc - get a block of consistent memory
288 * @pool: dma pool that will produce the block
289 * @mem_flags: GFP_* bitmask
290 * @handle: pointer to dma address of block
292 * This returns the kernel virtual address of a currently unused block,
293 * and reports its dma address through the handle.
294 * If such a memory block can't be allocated, %NULL is returned.
296 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
300 struct dma_page *page;
304 spin_lock_irqsave(&pool->lock, flags);
306 list_for_each_entry(page, &pool->page_list, page_list) {
307 if (page->offset < pool->allocation)
310 page = pool_alloc_page(pool, GFP_ATOMIC);
312 if (mem_flags & __GFP_WAIT) {
313 DECLARE_WAITQUEUE(wait, current);
315 __set_current_state(TASK_INTERRUPTIBLE);
316 __add_wait_queue(&pool->waitq, &wait);
317 spin_unlock_irqrestore(&pool->lock, flags);
319 schedule_timeout(POOL_TIMEOUT_JIFFIES);
321 spin_lock_irqsave(&pool->lock, flags);
322 __remove_wait_queue(&pool->waitq, &wait);
331 offset = page->offset;
332 page->offset = *(int *)(page->vaddr + offset);
333 retval = offset + page->vaddr;
334 *handle = offset + page->dma;
335 #ifdef CONFIG_DEBUG_SLAB
336 memset(retval, POOL_POISON_ALLOCATED, pool->size);
339 spin_unlock_irqrestore(&pool->lock, flags);
342 EXPORT_SYMBOL(dma_pool_alloc);
344 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
347 struct dma_page *page;
349 spin_lock_irqsave(&pool->lock, flags);
350 list_for_each_entry(page, &pool->page_list, page_list) {
353 if (dma < (page->dma + pool->allocation))
358 spin_unlock_irqrestore(&pool->lock, flags);
363 * dma_pool_free - put block back into dma pool
364 * @pool: the dma pool holding the block
365 * @vaddr: virtual address of block
366 * @dma: dma address of block
368 * Caller promises neither device nor driver will again touch this block
369 * unless it is first re-allocated.
371 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
373 struct dma_page *page;
377 page = pool_find_page(pool, dma);
381 "dma_pool_free %s, %p/%lx (bad dma)\n",
382 pool->name, vaddr, (unsigned long)dma);
384 printk(KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n",
385 pool->name, vaddr, (unsigned long)dma);
389 offset = vaddr - page->vaddr;
390 #ifdef CONFIG_DEBUG_SLAB
391 if ((dma - page->dma) != offset) {
394 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
395 pool->name, vaddr, (unsigned long long)dma);
398 "dma_pool_free %s, %p (bad vaddr)/%Lx\n",
399 pool->name, vaddr, (unsigned long long)dma);
403 unsigned int chain = page->offset;
404 while (chain < pool->allocation) {
405 if (chain != offset) {
406 chain = *(int *)(page->vaddr + chain);
410 dev_err(pool->dev, "dma_pool_free %s, dma %Lx "
411 "already free\n", pool->name,
412 (unsigned long long)dma);
414 printk(KERN_ERR "dma_pool_free %s, dma %Lx "
415 "already free\n", pool->name,
416 (unsigned long long)dma);
420 memset(vaddr, POOL_POISON_FREED, pool->size);
423 spin_lock_irqsave(&pool->lock, flags);
425 *(int *)vaddr = page->offset;
426 page->offset = offset;
427 if (waitqueue_active(&pool->waitq))
428 wake_up_locked(&pool->waitq);
430 * Resist a temptation to do
431 * if (!is_page_busy(page)) pool_free_page(pool, page);
432 * Better have a few empty pages hang around.
434 spin_unlock_irqrestore(&pool->lock, flags);
436 EXPORT_SYMBOL(dma_pool_free);
441 static void dmam_pool_release(struct device *dev, void *res)
443 struct dma_pool *pool = *(struct dma_pool **)res;
445 dma_pool_destroy(pool);
448 static int dmam_pool_match(struct device *dev, void *res, void *match_data)
450 return *(struct dma_pool **)res == match_data;
454 * dmam_pool_create - Managed dma_pool_create()
455 * @name: name of pool, for diagnostics
456 * @dev: device that will be doing the DMA
457 * @size: size of the blocks in this pool.
458 * @align: alignment requirement for blocks; must be a power of two
459 * @allocation: returned blocks won't cross this boundary (or zero)
461 * Managed dma_pool_create(). DMA pool created with this function is
462 * automatically destroyed on driver detach.
464 struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
465 size_t size, size_t align, size_t allocation)
467 struct dma_pool **ptr, *pool;
469 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
473 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
475 devres_add(dev, ptr);
481 EXPORT_SYMBOL(dmam_pool_create);
484 * dmam_pool_destroy - Managed dma_pool_destroy()
485 * @pool: dma pool that will be destroyed
487 * Managed dma_pool_destroy().
489 void dmam_pool_destroy(struct dma_pool *pool)
491 struct device *dev = pool->dev;
493 dma_pool_destroy(pool);
494 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
496 EXPORT_SYMBOL(dmam_pool_destroy);