drm: mm: track free areas implicitly
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 18 Feb 2011 16:59:12 +0000 (17:59 +0100)
committerDave Airlie <airlied@redhat.com>
Wed, 23 Feb 2011 00:32:34 +0000 (10:32 +1000)
The idea is to track free holes implicitly by marking the allocation
immediatly preceeding a hole.

To avoid an ugly corner case add a dummy head_node to struct drm_mm
to track the hole that spans to complete allocation area when the
memory manager is empty.

To guarantee that there's always a preceeding/following node (that might
be marked as hole_follows == 1), move the mm->node_list list_head to the
head_node.

The main allocator and fair-lru scan code actually becomes simpler.
Only the debug code slightly suffers because free areas are no longer
explicit.

Also add drm_mm_for_each_node (which will be much more useful when
struct drm_mm_node is embeddable).

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/drm_mm.c
include/drm/drm_mm.h

index c59515ba7e69c1b3abad019a6f0cb281deeab6a6..4fa33e1283afad5008a74021a29a0b896a7b939f 100644 (file)
@@ -64,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
                else {
                        child =
                            list_entry(mm->unused_nodes.next,
-                                      struct drm_mm_node, free_stack);
-                       list_del(&child->free_stack);
+                                      struct drm_mm_node, node_list);
+                       list_del(&child->node_list);
                        --mm->num_unused;
                }
                spin_unlock(&mm->unused_lock);
@@ -94,126 +94,123 @@ int drm_mm_pre_get(struct drm_mm *mm)
                        return ret;
                }
                ++mm->num_unused;
-               list_add_tail(&node->free_stack, &mm->unused_nodes);
+               list_add_tail(&node->node_list, &mm->unused_nodes);
        }
        spin_unlock(&mm->unused_lock);
        return 0;
 }
 EXPORT_SYMBOL(drm_mm_pre_get);
 
-static int drm_mm_create_tail_node(struct drm_mm *mm,
-                                  unsigned long start,
-                                  unsigned long size, int atomic)
+static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
 {
-       struct drm_mm_node *child;
-
-       child = drm_mm_kmalloc(mm, atomic);
-       if (unlikely(child == NULL))
-               return -ENOMEM;
-
-       child->free = 1;
-       child->size = size;
-       child->start = start;
-       child->mm = mm;
-
-       list_add_tail(&child->node_list, &mm->node_list);
-       list_add_tail(&child->free_stack, &mm->free_stack);
-
-       return 0;
+       return hole_node->start + hole_node->size;
 }
 
-static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
-                                                unsigned long size,
-                                                int atomic)
+static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
 {
-       struct drm_mm_node *child;
-
-       child = drm_mm_kmalloc(parent->mm, atomic);
-       if (unlikely(child == NULL))
-               return NULL;
-
-       INIT_LIST_HEAD(&child->free_stack);
+       struct drm_mm_node *next_node =
+               list_entry(hole_node->node_list.next, struct drm_mm_node,
+                          node_list);
 
-       child->size = size;
-       child->start = parent->start;
-       child->mm = parent->mm;
-
-       list_add_tail(&child->node_list, &parent->node_list);
-       INIT_LIST_HEAD(&child->free_stack);
-
-       parent->size -= size;
-       parent->start += size;
-       return child;
+       return next_node->start;
 }
 
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
+struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
                                             unsigned long size,
                                             unsigned alignment,
                                             int atomic)
 {
 
-       struct drm_mm_node *align_splitoff = NULL;
-       unsigned tmp = 0;
+       struct drm_mm_node *node;
+       struct drm_mm *mm = hole_node->mm;
+       unsigned long tmp = 0, wasted = 0;
+       unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+       unsigned long hole_end = drm_mm_hole_node_end(hole_node);
+
+       BUG_ON(!hole_node->hole_follows);
+
+       node = drm_mm_kmalloc(mm, atomic);
+       if (unlikely(node == NULL))
+               return NULL;
 
        if (alignment)
-               tmp = node->start % alignment;
+               tmp = hole_start % alignment;
 
-       if (tmp) {
-               align_splitoff =
-                   drm_mm_split_at_start(node, alignment - tmp, atomic);
-               if (unlikely(align_splitoff == NULL))
-                       return NULL;
-       }
+       if (!tmp) {
+               hole_node->hole_follows = 0;
+               list_del_init(&hole_node->hole_stack);
+       } else
+               wasted = alignment - tmp;
+
+       node->start = hole_start + wasted;
+       node->size = size;
+       node->mm = mm;
 
-       if (node->size == size) {
-               list_del_init(&node->free_stack);
-               node->free = 0;
+       INIT_LIST_HEAD(&node->hole_stack);
+       list_add(&node->node_list, &hole_node->node_list);
+
+       BUG_ON(node->start + node->size > hole_end);
+
+       if (node->start + node->size < hole_end) {
+               list_add(&node->hole_stack, &mm->hole_stack);
+               node->hole_follows = 1;
        } else {
-               node = drm_mm_split_at_start(node, size, atomic);
+               node->hole_follows = 0;
        }
 
-       if (align_splitoff)
-               drm_mm_put_block(align_splitoff);
-
        return node;
 }
 EXPORT_SYMBOL(drm_mm_get_block_generic);
 
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
                                                unsigned long size,
                                                unsigned alignment,
                                                unsigned long start,
                                                unsigned long end,
                                                int atomic)
 {
-       struct drm_mm_node *align_splitoff = NULL;
-       unsigned tmp = 0;
-       unsigned wasted = 0;
+       struct drm_mm_node *node;
+       struct drm_mm *mm = hole_node->mm;
+       unsigned long tmp = 0, wasted = 0;
+       unsigned long hole_start = drm_mm_hole_node_start(hole_node);
+       unsigned long hole_end = drm_mm_hole_node_end(hole_node);
 
-       if (node->start < start)
-               wasted += start - node->start;
+       BUG_ON(!hole_node->hole_follows);
+
+       node = drm_mm_kmalloc(mm, atomic);
+       if (unlikely(node == NULL))
+               return NULL;
+
+       if (hole_start < start)
+               wasted += start - hole_start;
        if (alignment)
-               tmp = ((node->start + wasted) % alignment);
+               tmp = (hole_start + wasted) % alignment;
 
        if (tmp)
                wasted += alignment - tmp;
-       if (wasted) {
-               align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
-               if (unlikely(align_splitoff == NULL))
-                       return NULL;
+
+       if (!wasted) {
+               hole_node->hole_follows = 0;
+               list_del_init(&hole_node->hole_stack);
        }
 
-       if (node->size == size) {
-               list_del_init(&node->free_stack);
-               node->free = 0;
+       node->start = hole_start + wasted;
+       node->size = size;
+       node->mm = mm;
+
+       INIT_LIST_HEAD(&node->hole_stack);
+       list_add(&node->node_list, &hole_node->node_list);
+
+       BUG_ON(node->start + node->size > hole_end);
+       BUG_ON(node->start + node->size > end);
+
+       if (node->start + node->size < hole_end) {
+               list_add(&node->hole_stack, &mm->hole_stack);
+               node->hole_follows = 1;
        } else {
-               node = drm_mm_split_at_start(node, size, atomic);
+               node->hole_follows = 0;
        }
 
-       if (align_splitoff)
-               drm_mm_put_block(align_splitoff);
-
        return node;
 }
 EXPORT_SYMBOL(drm_mm_get_block_range_generic);
@@ -223,66 +220,41 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
  * Otherwise add to the free stack.
  */
 
-void drm_mm_put_block(struct drm_mm_node *cur)
+void drm_mm_put_block(struct drm_mm_node *node)
 {
 
-       struct drm_mm *mm = cur->mm;
-       struct list_head *cur_head = &cur->node_list;
-       struct list_head *root_head = &mm->node_list;
-       struct drm_mm_node *prev_node = NULL;
-       struct drm_mm_node *next_node;
+       struct drm_mm *mm = node->mm;
+       struct drm_mm_node *prev_node;
 
-       int merged = 0;
+       BUG_ON(node->scanned_block || node->scanned_prev_free
+                                  || node->scanned_next_free);
 
-       BUG_ON(cur->scanned_block || cur->scanned_prev_free
-                                 || cur->scanned_next_free);
+       prev_node =
+           list_entry(node->node_list.prev, struct drm_mm_node, node_list);
 
-       if (cur_head->prev != root_head) {
-               prev_node =
-                   list_entry(cur_head->prev, struct drm_mm_node, node_list);
-               if (prev_node->free) {
-                       prev_node->size += cur->size;
-                       merged = 1;
-               }
-       }
-       if (cur_head->next != root_head) {
-               next_node =
-                   list_entry(cur_head->next, struct drm_mm_node, node_list);
-               if (next_node->free) {
-                       if (merged) {
-                               prev_node->size += next_node->size;
-                               list_del(&next_node->node_list);
-                               list_del(&next_node->free_stack);
-                               spin_lock(&mm->unused_lock);
-                               if (mm->num_unused < MM_UNUSED_TARGET) {
-                                       list_add(&next_node->free_stack,
-                                                &mm->unused_nodes);
-                                       ++mm->num_unused;
-                               } else
-                                       kfree(next_node);
-                               spin_unlock(&mm->unused_lock);
-                       } else {
-                               next_node->size += cur->size;
-                               next_node->start = cur->start;
-                               merged = 1;
-                       }
-               }
-       }
-       if (!merged) {
-               cur->free = 1;
-               list_add(&cur->free_stack, &mm->free_stack);
-       } else {
-               list_del(&cur->node_list);
-               spin_lock(&mm->unused_lock);
-               if (mm->num_unused < MM_UNUSED_TARGET) {
-                       list_add(&cur->free_stack, &mm->unused_nodes);
-                       ++mm->num_unused;
-               } else
-                       kfree(cur);
-               spin_unlock(&mm->unused_lock);
-       }
-}
+       if (node->hole_follows) {
+               BUG_ON(drm_mm_hole_node_start(node)
+                               == drm_mm_hole_node_end(node));
+               list_del(&node->hole_stack);
+       } else
+               BUG_ON(drm_mm_hole_node_start(node)
+                               != drm_mm_hole_node_end(node));
 
+       if (!prev_node->hole_follows) {
+               prev_node->hole_follows = 1;
+               list_add(&prev_node->hole_stack, &mm->hole_stack);
+       } else
+               list_move(&prev_node->hole_stack, &mm->hole_stack);
+
+       list_del(&node->node_list);
+       spin_lock(&mm->unused_lock);
+       if (mm->num_unused < MM_UNUSED_TARGET) {
+               list_add(&node->node_list, &mm->unused_nodes);
+               ++mm->num_unused;
+       } else
+               kfree(node);
+       spin_unlock(&mm->unused_lock);
+}
 EXPORT_SYMBOL(drm_mm_put_block);
 
 static int check_free_hole(unsigned long start, unsigned long end,
@@ -319,8 +291,10 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
        best = NULL;
        best_size = ~0UL;
 
-       list_for_each_entry(entry, &mm->free_stack, free_stack) {
-               if (!check_free_hole(entry->start, entry->start + entry->size,
+       list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+               BUG_ON(!entry->hole_follows);
+               if (!check_free_hole(drm_mm_hole_node_start(entry),
+                                    drm_mm_hole_node_end(entry),
                                     size, alignment))
                        continue;
 
@@ -353,12 +327,13 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
        best = NULL;
        best_size = ~0UL;
 
-       list_for_each_entry(entry, &mm->free_stack, free_stack) {
-               unsigned long adj_start = entry->start < start ?
-                       start : entry->start;
-               unsigned long adj_end = entry->start + entry->size > end ?
-                       end : entry->start + entry->size;
+       list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
+               unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
+                       start : drm_mm_hole_node_start(entry);
+               unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
+                       end : drm_mm_hole_node_end(entry);
 
+               BUG_ON(!entry->hole_follows);
                if (!check_free_hole(adj_start, adj_end, size, alignment))
                        continue;
 
@@ -430,70 +405,40 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range);
 int drm_mm_scan_add_block(struct drm_mm_node *node)
 {
        struct drm_mm *mm = node->mm;
-       struct list_head *prev_free, *next_free;
-       struct drm_mm_node *prev_node, *next_node;
+       struct drm_mm_node *prev_node;
+       unsigned long hole_start, hole_end;
        unsigned long adj_start;
        unsigned long adj_end;
 
        mm->scanned_blocks++;
 
-       prev_free = next_free = NULL;
-
-       BUG_ON(node->free);
+       BUG_ON(node->scanned_block);
        node->scanned_block = 1;
-       node->free = 1;
-
-       if (node->node_list.prev != &mm->node_list) {
-               prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
-                                      node_list);
-
-               if (prev_node->free) {
-                       list_del(&prev_node->node_list);
-
-                       node->start = prev_node->start;
-                       node->size += prev_node->size;
-
-                       prev_node->scanned_prev_free = 1;
-
-                       prev_free = &prev_node->free_stack;
-               }
-       }
-
-       if (node->node_list.next != &mm->node_list) {
-               next_node = list_entry(node->node_list.next, struct drm_mm_node,
-                                      node_list);
-
-               if (next_node->free) {
-                       list_del(&next_node->node_list);
-
-                       node->size += next_node->size;
-
-                       next_node->scanned_next_free = 1;
 
-                       next_free = &next_node->free_stack;
-               }
-       }
+       prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+                              node_list);
 
-       /* The free_stack list is not used for allocated objects, so these two
-        * pointers can be abused (as long as no allocations in this memory
-        * manager happens). */
-       node->free_stack.prev = prev_free;
-       node->free_stack.next = next_free;
+       node->scanned_preceeds_hole = prev_node->hole_follows;
+       prev_node->hole_follows = 1;
+       list_del(&node->node_list);
+       node->node_list.prev = &prev_node->node_list;
 
+       hole_start = drm_mm_hole_node_start(prev_node);
+       hole_end = drm_mm_hole_node_end(prev_node);
        if (mm->scan_check_range) {
-               adj_start = node->start < mm->scan_start ?
-                       mm->scan_start : node->start;
-               adj_end = node->start + node->size > mm->scan_end ?
-                       mm->scan_end : node->start + node->size;
+               adj_start = hole_start < mm->scan_start ?
+                       mm->scan_start : hole_start;
+               adj_end = hole_end > mm->scan_end ?
+                       mm->scan_end : hole_end;
        } else {
-               adj_start = node->start;
-               adj_end = node->start + node->size;
+               adj_start = hole_start;
+               adj_end = hole_end;
        }
 
        if (check_free_hole(adj_start , adj_end,
                            mm->scan_size, mm->scan_alignment)) {
-               mm->scan_hit_start = node->start;
-               mm->scan_hit_size = node->size;
+               mm->scan_hit_start = hole_start;
+               mm->scan_hit_size = hole_end;
 
                return 1;
        }
@@ -519,39 +464,19 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
 int drm_mm_scan_remove_block(struct drm_mm_node *node)
 {
        struct drm_mm *mm = node->mm;
-       struct drm_mm_node *prev_node, *next_node;
+       struct drm_mm_node *prev_node;
 
        mm->scanned_blocks--;
 
        BUG_ON(!node->scanned_block);
        node->scanned_block = 0;
-       node->free = 0;
-
-       prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
-                              free_stack);
-       next_node = list_entry(node->free_stack.next, struct drm_mm_node,
-                              free_stack);
-
-       if (prev_node) {
-               BUG_ON(!prev_node->scanned_prev_free);
-               prev_node->scanned_prev_free = 0;
-
-               list_add_tail(&prev_node->node_list, &node->node_list);
 
-               node->start = prev_node->start + prev_node->size;
-               node->size -= prev_node->size;
-       }
-
-       if (next_node) {
-               BUG_ON(!next_node->scanned_next_free);
-               next_node->scanned_next_free = 0;
-
-               list_add(&next_node->node_list, &node->node_list);
-
-               node->size -= next_node->size;
-       }
+       prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+                              node_list);
 
-       INIT_LIST_HEAD(&node->free_stack);
+       prev_node->hole_follows = node->scanned_preceeds_hole;
+       INIT_LIST_HEAD(&node->node_list);
+       list_add(&node->node_list, &prev_node->node_list);
 
        /* Only need to check for containement because start&size for the
         * complete resulting free block (not just the desired part) is
@@ -568,7 +493,7 @@ EXPORT_SYMBOL(drm_mm_scan_remove_block);
 
 int drm_mm_clean(struct drm_mm * mm)
 {
-       struct list_head *head = &mm->node_list;
+       struct list_head *head = &mm->head_node.node_list;
 
        return (head->next->next == head);
 }
@@ -576,38 +501,40 @@ EXPORT_SYMBOL(drm_mm_clean);
 
 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
-       INIT_LIST_HEAD(&mm->node_list);
-       INIT_LIST_HEAD(&mm->free_stack);
+       INIT_LIST_HEAD(&mm->hole_stack);
        INIT_LIST_HEAD(&mm->unused_nodes);
        mm->num_unused = 0;
        mm->scanned_blocks = 0;
        spin_lock_init(&mm->unused_lock);
 
-       return drm_mm_create_tail_node(mm, start, size, 0);
+       /* Clever trick to avoid a special case in the free hole tracking. */
+       INIT_LIST_HEAD(&mm->head_node.node_list);
+       INIT_LIST_HEAD(&mm->head_node.hole_stack);
+       mm->head_node.hole_follows = 1;
+       mm->head_node.scanned_block = 0;
+       mm->head_node.scanned_prev_free = 0;
+       mm->head_node.scanned_next_free = 0;
+       mm->head_node.mm = mm;
+       mm->head_node.start = start + size;
+       mm->head_node.size = start - mm->head_node.start;
+       list_add_tail(&mm->head_node.hole_stack, &mm->hole_stack);
+
+       return 0;
 }
 EXPORT_SYMBOL(drm_mm_init);
 
 void drm_mm_takedown(struct drm_mm * mm)
 {
-       struct list_head *bnode = mm->free_stack.next;
-       struct drm_mm_node *entry;
-       struct drm_mm_node *next;
+       struct drm_mm_node *entry, *next;
 
-       entry = list_entry(bnode, struct drm_mm_node, free_stack);
-
-       if (entry->node_list.next != &mm->node_list ||
-           entry->free_stack.next != &mm->free_stack) {
+       if (!list_empty(&mm->head_node.node_list)) {
                DRM_ERROR("Memory manager not clean. Delaying takedown\n");
                return;
        }
 
-       list_del(&entry->free_stack);
-       list_del(&entry->node_list);
-       kfree(entry);
-
        spin_lock(&mm->unused_lock);
-       list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
-               list_del(&entry->free_stack);
+       list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
+               list_del(&entry->node_list);
                kfree(entry);
                --mm->num_unused;
        }
@@ -620,19 +547,37 @@ EXPORT_SYMBOL(drm_mm_takedown);
 void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
 {
        struct drm_mm_node *entry;
-       int total_used = 0, total_free = 0, total = 0;
-
-       list_for_each_entry(entry, &mm->node_list, node_list) {
-               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+       unsigned long total_used = 0, total_free = 0, total = 0;
+       unsigned long hole_start, hole_end, hole_size;
+
+       hole_start = drm_mm_hole_node_start(&mm->head_node);
+       hole_end = drm_mm_hole_node_end(&mm->head_node);
+       hole_size = hole_end - hole_start;
+       if (hole_size)
+               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+                       prefix, hole_start, hole_end,
+                       hole_size);
+       total_free += hole_size;
+
+       drm_mm_for_each_node(entry, mm) {
+               printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: used\n",
                        prefix, entry->start, entry->start + entry->size,
-                       entry->size, entry->free ? "free" : "used");
-               total += entry->size;
-               if (entry->free)
-                       total_free += entry->size;
-               else
-                       total_used += entry->size;
+                       entry->size);
+               total_used += entry->size;
+
+               if (entry->hole_follows) {
+                       hole_start = drm_mm_hole_node_start(entry);
+                       hole_end = drm_mm_hole_node_end(entry);
+                       hole_size = hole_end - hole_start;
+                       printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8lu: free\n",
+                               prefix, hole_start, hole_end,
+                               hole_size);
+                       total_free += hole_size;
+               }
        }
-       printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+       total = total_free + total_used;
+
+       printk(KERN_DEBUG "%s total: %lu, used %lu free %lu\n", prefix, total,
                total_used, total_free);
 }
 EXPORT_SYMBOL(drm_mm_debug_table);
@@ -641,17 +586,34 @@ EXPORT_SYMBOL(drm_mm_debug_table);
 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
 {
        struct drm_mm_node *entry;
-       int total_used = 0, total_free = 0, total = 0;
-
-       list_for_each_entry(entry, &mm->node_list, node_list) {
-               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
-               total += entry->size;
-               if (entry->free)
-                       total_free += entry->size;
-               else
-                       total_used += entry->size;
+       unsigned long total_used = 0, total_free = 0, total = 0;
+       unsigned long hole_start, hole_end, hole_size;
+
+       hole_start = drm_mm_hole_node_start(&mm->head_node);
+       hole_end = drm_mm_hole_node_end(&mm->head_node);
+       hole_size = hole_end - hole_start;
+       if (hole_size)
+               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+                               hole_start, hole_end, hole_size);
+       total_free += hole_size;
+
+       drm_mm_for_each_node(entry, mm) {
+               seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
+                               entry->start, entry->start + entry->size,
+                               entry->size);
+               total_used += entry->size;
+               if (entry->hole_follows) {
+                       hole_start = drm_mm_hole_node_start(&mm->head_node);
+                       hole_end = drm_mm_hole_node_end(&mm->head_node);
+                       hole_size = hole_end - hole_start;
+                       seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
+                                       hole_start, hole_end, hole_size);
+                       total_free += hole_size;
+               }
        }
-       seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
+       total = total_free + total_used;
+
+       seq_printf(m, "total: %lu, used %lu free %lu\n", total, total_used, total_free);
        return 0;
 }
 EXPORT_SYMBOL(drm_mm_dump_table);
index 0d791462f7b2234ddedb15957bc0673515e37cb4..34fa36f2de705a177ceab95f9b148a70fc9362b0 100644 (file)
 #endif
 
 struct drm_mm_node {
-       struct list_head free_stack;
        struct list_head node_list;
-       unsigned free : 1;
+       struct list_head hole_stack;
+       unsigned hole_follows : 1;
        unsigned scanned_block : 1;
        unsigned scanned_prev_free : 1;
        unsigned scanned_next_free : 1;
+       unsigned scanned_preceeds_hole : 1;
        unsigned long start;
        unsigned long size;
        struct drm_mm *mm;
 };
 
 struct drm_mm {
-       /* List of free memory blocks, most recently freed ordered. */
-       struct list_head free_stack;
-       /* List of all memory nodes, ordered according to the (increasing) start
-        * address of the memory node. */
-       struct list_head node_list;
+       /* List of all memory nodes that immediatly preceed a free hole. */
+       struct list_head hole_stack;
+       /* head_node.node_list is the list of all memory nodes, ordered
+        * according to the (increasing) start address of the memory node. */
+       struct drm_mm_node head_node;
        struct list_head unused_nodes;
        int num_unused;
        spinlock_t unused_lock;
@@ -74,9 +75,11 @@ struct drm_mm {
 
 static inline bool drm_mm_initialized(struct drm_mm *mm)
 {
-       return mm->free_stack.next;
+       return mm->hole_stack.next;
 }
-
+#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
+                                               &(mm)->head_node.node_list, \
+                                               node_list);
 /*
  * Basic range manager support (drm_mm.c)
  */