void __iomem *mmio;
+ spinlock_t ramin_lock;
void __iomem *ramin;
u32 ramin_size;
u32 ramin_base;
bool ramin_available;
- spinlock_t ramin_lock;
+ struct drm_mm ramin_heap;
+ struct list_head gpuobj_list;
struct nouveau_bo *vga_ram;
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
- struct drm_mm ramin_heap;
-
- struct list_head gpuobj_list;
-
struct nvbios vbios;
struct nv04_mode_state mode_reg;
kref_init(&gpuobj->refcount);
gpuobj->size = size;
+ spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
if (chan) {
NV_DEBUG(dev, "channel heap\n");
}
/* try and get aperture space */
- ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
- if (ramin)
- ramin = drm_mm_get_block(ramin, size, align);
+ do {
+ if (drm_mm_pre_get(&dev_priv->ramin_heap))
+ return -ENOMEM;
+
+ spin_lock(&dev_priv->ramin_lock);
+ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size,
+ align, 0);
+ if (ramin == NULL) {
+ spin_unlock(&dev_priv->ramin_lock);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
+ return ret;
+ }
+
+ ramin = drm_mm_get_block_atomic(ramin, size, align);
+ spin_unlock(&dev_priv->ramin_lock);
+ } while (ramin == NULL);
/* on nv50 it's ok to fail, we have a fallback path */
if (!ramin && dev_priv->card_type < NV_50) {
if (gpuobj->im_backing)
engine->instmem.clear(dev, gpuobj);
+ spin_lock(&dev_priv->ramin_lock);
if (gpuobj->im_pramin)
drm_mm_put_block(gpuobj->im_pramin);
-
list_del(&gpuobj->list);
+ spin_unlock(&dev_priv->ramin_lock);
kfree(gpuobj);
}
dev_priv->engine.instmem.flush(dev);
}
+ spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
*pgpuobj = gpuobj;
return 0;
}
kref_init(&gpuobj->refcount);
gpuobj->cinst = 0x40;
+ spin_lock(&dev_priv->ramin_lock);
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+ spin_unlock(&dev_priv->ramin_lock);
*gpuobj_ret = gpuobj;
return 0;
}