bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
{
- return ((buffer->flags & ION_FLAG_CACHED) &&
- !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC));
+ return (buffer->flags & ION_FLAG_CACHED) &&
+ !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
}
bool ion_buffer_cached(struct ion_buffer *buffer)
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
- if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error"))
+ if (WARN_ONCE(table == NULL,
+ "heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
heap->ops->free(buffer);
return handle ? handle : ERR_PTR(-EINVAL);
}
-static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle)
+static bool ion_handle_validate(struct ion_client *client,
+ struct ion_handle *handle)
{
WARN_ON(!mutex_is_locked(&client->lock));
return (idr_find(&client->idr, handle->id) == handle);
return buffer->vaddr;
}
vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
- if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error"))
+ if (WARN_ONCE(vaddr == NULL,
+ "heap->ops->map_kernel should return ERR_PTR on error"))
return ERR_PTR(-EINVAL);
if (IS_ERR(vaddr))
return vaddr;
#ifdef DEBUG_HEAP_SHRINKER
static int debug_shrink_set(void *data, u64 val)
{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- if (!val)
- return 0;
+ if (!val)
+ return 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- sc.nr_to_scan = objs;
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ sc.nr_to_scan = objs;
- heap->shrinker.shrink(&heap->shrinker, &sc);
- return 0;
+ heap->shrinker.shrink(&heap->shrinker, &sc);
+ return 0;
}
static int debug_shrink_get(void *data, u64 *val)
{
- struct ion_heap *heap = data;
- struct shrink_control sc;
- int objs;
+ struct ion_heap *heap = data;
+ struct shrink_control sc;
+ int objs;
- sc.gfp_mask = -1;
- sc.nr_to_scan = 0;
+ sc.gfp_mask = -1;
+ sc.nr_to_scan = 0;
- objs = heap->shrinker.shrink(&heap->shrinker, &sc);
- *val = objs;
- return 0;
+ objs = heap->shrinker.shrink(&heap->shrinker, &sc);
+ *val = objs;
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
- debug_shrink_set, "%llu\n");
+ debug_shrink_set, "%llu\n");
#endif
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
struct page *page = sg_page(sg);
BUG_ON(i >= npages);
- for (j = 0; j < npages_this_entry; j++) {
+ for (j = 0; j < npages_this_entry; j++)
*(tmp++) = page++;
- }
}
vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
return ion_heap_sglist_zero(&sg, 1, pgprot);
}
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer * buffer)
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
{
rt_mutex_lock(&heap->lock);
list_add(&buffer->list, &heap->free_list);