size_t size; /* padded (as-allocated) size */
size_t orig_size; /* original (as-requested) size */
struct nvmap_client *owner;
+ struct nvmap_device *dev;
union {
struct nvmap_pgalloc pgalloc;
struct nvmap_heap_block *carveout;
bool secure; /* zap IOVMM area on unpin */
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
bool alloc; /* handle has memory allocated */
+ struct mutex lock;
};
struct nvmap_share {
struct nvmap_carveout_node *node,
size_t len);
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
+
struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
unsigned long handle);
return client->dev->dev_user.this_device;
}
+struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev)
+{
+ return &dev->iovmm_master;
+}
+
/* allocates a PTE for the caller's use; returns the PTE pointer or
* a negative errno. may be called from IRQs */
pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr)
struct nvmap_carveout_node *node,
size_t len)
{
+ if (!client)
+ return;
+
mutex_lock(&node->clients_mutex);
client->carveout_commit[node->index].commit -= len;
BUG_ON(client->carveout_commit[node->index].commit < 0);
smp_rmb();
pins = atomic_read(&ref->pin);
+ mutex_lock(&ref->handle->lock);
+ if (ref->handle->owner == client)
+ ref->handle->owner = NULL;
+ mutex_unlock(&ref->handle->lock);
+
while (pins--)
nvmap_unpin_handles(client, &ref->handle, 1);
void _nvmap_handle_free(struct nvmap_handle *h)
{
- struct nvmap_client *client = h->owner;
+ struct nvmap_device *dev = h->dev;
unsigned int i, nr_page;
- if (nvmap_handle_remove(client->dev, h) != 0)
+ if (nvmap_handle_remove(dev, h) != 0)
return;
if (!h->alloc)
goto out;
if (!h->heap_pgalloc) {
- nvmap_carveout_commit_subtract(client,
+ mutex_lock(&h->lock);
+ nvmap_carveout_commit_subtract(h->owner,
nvmap_heap_to_arg(nvmap_block_to_heap(h->carveout)),
h->size);
+ mutex_unlock(&h->lock);
nvmap_heap_free(h->carveout);
goto out;
}
BUG_ON(h->size & ~PAGE_MASK);
BUG_ON(!h->pgalloc.pages);
- nvmap_mru_remove(client->share, h);
+ nvmap_mru_remove(nvmap_get_share_from_dev(dev), h);
+
if (h->pgalloc.area)
tegra_iovmm_free_vm(h->pgalloc.area);
out:
kfree(h);
- nvmap_client_put(client);
}
extern void __flush_dcache_page(struct address_space *, struct page *);
atomic_set(&h->ref, 1);
atomic_set(&h->pin, 0);
- h->owner = nvmap_client_get(client);
+ h->owner = client;
+ h->dev = client->dev;
BUG_ON(!h->owner);
h->size = h->orig_size = size;
h->flags = NVMAP_HANDLE_WRITE_COMBINE;
+ mutex_init(&h->lock);
nvmap_handle_add(client->dev, h);