static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
- int *align, int *size, int *page_shift)
+ int *align, int *size)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
}
}
} else {
- if (likely(dev_priv->chan_vm)) {
- if (!(flags & TTM_PL_FLAG_TT) && *size > 256 * 1024)
- *page_shift = dev_priv->chan_vm->lpg_shift;
- else
- *page_shift = dev_priv->chan_vm->spg_shift;
- } else {
- *page_shift = 12;
- }
-
- *size = roundup(*size, (1 << *page_shift));
- *align = max((1 << *page_shift), *align);
+ *size = roundup(*size, (1 << nvbo->page_shift));
+ *align = max((1 << nvbo->page_shift), *align);
}
*size = roundup(*size, PAGE_SIZE);
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
- int ret = 0, page_shift = 0;
+ int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
if (!nvbo)
nvbo->tile_flags = tile_flags;
nvbo->bo.bdev = &dev_priv->ttm.bdev;
- nouveau_bo_fixup_align(nvbo, flags, &align, &size, &page_shift);
+ nvbo->page_shift = 12;
+ if (dev_priv->bar1_vm) {
+ if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
+ nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
+ }
+
+ nouveau_bo_fixup_align(nvbo, flags, &align, &size);
align >>= PAGE_SHIFT;
if (dev_priv->chan_vm) {
- ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
+ ret = nouveau_vm_get(dev_priv->chan_vm, size, nvbo->page_shift,
NV_MEM_ACCESS_RW, &nvbo->vma);
if (ret) {
kfree(nvbo);
int ret;
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
- size_nc = 1 << nvbo->vma.node->type;
+ size_nc = 1 << nvbo->page_shift;
ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
mem->page_alignment << PAGE_SHIFT, size_nc,
return (ret == -ENOSPC) ? 0 : ret;
}
- node->page_shift = 12;
- if (nvbo->vma.node)
- node->page_shift = nvbo->vma.node->type;
+ node->page_shift = nvbo->page_shift;
mem->mm_node = node;
mem->start = node->offset >> PAGE_SHIFT;