void (*agp_enable)(struct agp_bridge_data *, u32);
void (*cleanup)(void);
void (*tlb_flush)(struct agp_memory *);
- unsigned long (*mask_memory)(struct agp_bridge_data *, unsigned long, int);
+ unsigned long (*mask_memory)(struct agp_bridge_data *, struct page *, int);
void (*cache_flush)(void);
int (*create_gatt_table)(struct agp_bridge_data *);
int (*free_gatt_table)(struct agp_bridge_data *);
int (*remove_memory)(struct agp_memory *, off_t, int);
struct agp_memory *(*alloc_by_type) (size_t, int);
void (*free_by_type)(struct agp_memory *);
- void *(*agp_alloc_page)(struct agp_bridge_data *);
+ struct page *(*agp_alloc_page)(struct agp_bridge_data *);
int (*agp_alloc_pages)(struct agp_bridge_data *, struct agp_memory *, size_t);
- void (*agp_destroy_page)(void *, int flags);
+ void (*agp_destroy_page)(struct page *, int flags);
void (*agp_destroy_pages)(struct agp_memory *);
int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
void (*chipset_flush)(struct agp_bridge_data *);
int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type);
struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type);
void agp_generic_free_by_type(struct agp_memory *curr);
-void *agp_generic_alloc_page(struct agp_bridge_data *bridge);
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge);
int agp_generic_alloc_pages(struct agp_bridge_data *agp_bridge,
struct agp_memory *memory, size_t page_count);
-void agp_generic_destroy_page(void *addr, int flags);
+void agp_generic_destroy_page(struct page *page, int flags);
void agp_generic_destroy_pages(struct agp_memory *memory);
void agp_free_key(int key);
int agp_num_entries(void);
void global_cache_flush(void);
void get_agp_version(struct agp_bridge_data *bridge);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
- unsigned long addr, int type);
+ struct page *page, int type);
int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge,
int type);
struct agp_bridge_data *agp_generic_find_bridge(struct pci_dev *pdev);
}
}
-static void *m1541_alloc_page(struct agp_bridge_data *bridge)
+static struct page *m1541_alloc_page(struct agp_bridge_data *bridge)
{
- void *addr = agp_generic_alloc_page(agp_bridge);
+ struct page *page = agp_generic_alloc_page(agp_bridge);
u32 temp;
- if (!addr)
+ if (!page)
return NULL;
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN ));
- return addr;
+ phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN ));
+ return page;
}
-static void ali_destroy_page(void * addr, int flags)
+static void ali_destroy_page(struct page *page, int flags)
{
- if (addr) {
+ if (page) {
if (flags & AGP_PAGE_DESTROY_UNMAP) {
global_cache_flush(); /* is this really needed? --hch */
- agp_generic_destroy_page(addr, flags);
+ agp_generic_destroy_page(page, flags);
} else
- agp_generic_destroy_page(addr, flags);
+ agp_generic_destroy_page(page, flags);
}
}
-static void m1541_destroy_page(void * addr, int flags)
+static void m1541_destroy_page(struct page *page, int flags)
{
u32 temp;
- if (addr == NULL)
+ if (page == NULL)
return;
if (flags & AGP_PAGE_DESTROY_UNMAP) {
pci_read_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL, &temp);
pci_write_config_dword(agp_bridge->dev, ALI_CACHE_FLUSH_CTRL,
(((temp & ALI_CACHE_FLUSH_ADDR_MASK) |
- virt_to_gart(addr)) | ALI_CACHE_FLUSH_EN));
+ phys_to_gart(page_to_phys(page))) | ALI_CACHE_FLUSH_EN));
}
- agp_generic_destroy_page(addr, flags);
+ agp_generic_destroy_page(page, flags);
}
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
writel(agp_generic_mask_memory(agp_bridge,
- mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
amd_irongate_tlbflush(mem);
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
tmp = agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mask_type);
+ mem->pages[i], mask_type);
BUG_ON(tmp & 0xffffff0000000ffcULL);
pte = (tmp & 0x000000ff00000000ULL) >> 28;
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = GET_GATT(addr);
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ writel(agp_bridge->driver->mask_memory(agp_bridge,
+ mem->pages[i], mem->type),
+ cur_gatt+GET_GATT_OFF(addr));
readl(cur_gatt+GET_GATT_OFF(addr)); /* PCI Posting. */
}
agp_bridge->driver->tlb_flush(mem);
bridge->version = &agp_current_version;
if (bridge->driver->needs_scratch_page) {
- void *addr = bridge->driver->agp_alloc_page(bridge);
+ struct page *page = bridge->driver->agp_alloc_page(bridge);
- if (!addr) {
+ if (!page) {
dev_err(&bridge->dev->dev,
"can't get memory for scratch page\n");
return -ENOMEM;
}
- bridge->scratch_page_real = virt_to_gart(addr);
+ bridge->scratch_page_real = phys_to_gart(page_to_phys(page));
bridge->scratch_page =
- bridge->driver->mask_memory(bridge, bridge->scratch_page_real, 0);
+ bridge->driver->mask_memory(bridge, page, 0);
}
size_value = bridge->driver->fetch_size();
};
/* This function does the same thing as mask_memory() for this chipset... */
-static inline unsigned long efficeon_mask_memory(unsigned long addr)
+static inline unsigned long efficeon_mask_memory(struct page *page)
{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
return addr | 0x00000001;
}
last_page = NULL;
for (i = 0; i < count; i++) {
int index = pg_start + i;
- unsigned long insert = efficeon_mask_memory(mem->memory[i]);
+ unsigned long insert = efficeon_mask_memory(mem->pages[i]);
page = (unsigned int *) efficeon_private.l1_table[index >> 10];
void agp_alloc_page_array(size_t size, struct agp_memory *mem)
{
- mem->memory = NULL;
+ mem->pages = NULL;
mem->vmalloc_flag = false;
if (size <= 2*PAGE_SIZE)
- mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
- if (mem->memory == NULL) {
- mem->memory = vmalloc(size);
+ mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NORETRY);
+ if (mem->pages == NULL) {
+ mem->pages = vmalloc(size);
mem->vmalloc_flag = true;
}
}
void agp_free_page_array(struct agp_memory *mem)
{
if (mem->vmalloc_flag) {
- vfree(mem->memory);
+ vfree(mem->pages);
} else {
- kfree(mem->memory);
+ kfree(mem->pages);
}
}
EXPORT_SYMBOL(agp_free_page_array);
agp_alloc_page_array(alloc_size, new);
- if (new->memory == NULL) {
+ if (new->pages == NULL) {
agp_free_key(new->key);
kfree(new);
return NULL;
agp_alloc_page_array(PAGE_SIZE * scratch_pages, new);
- if (new->memory == NULL) {
+ if (new->pages == NULL) {
agp_free_key(new->key);
kfree(new);
return NULL;
} else {
for (i = 0; i < curr->page_count; i++) {
- curr->memory[i] = (unsigned long)gart_to_virt(
- curr->memory[i]);
curr->bridge->driver->agp_destroy_page(
- (void *)curr->memory[i],
+ curr->pages[i],
AGP_PAGE_DESTROY_UNMAP);
}
for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(
- (void *)curr->memory[i],
+ curr->pages[i],
AGP_PAGE_DESTROY_FREE);
}
}
}
for (i = 0; i < page_count; i++) {
- void *addr = bridge->driver->agp_alloc_page(bridge);
+ struct page *page = bridge->driver->agp_alloc_page(bridge);
- if (addr == NULL) {
+ if (page == NULL) {
agp_free_memory(new);
return NULL;
}
- new->memory[i] = virt_to_gart(addr);
+ new->pages[i] = page;
new->page_count++;
}
new->bridge = bridge;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type),
+ writel(bridge->driver->mask_memory(bridge, mem->pages[i], mask_type),
bridge->gatt_table+j);
}
readl(bridge->gatt_table+j-1); /* PCI Posting. */
return NULL;
for (i = 0; i < page_count; i++)
- new->memory[i] = 0;
+ new->pages[i] = 0;
new->page_count = 0;
new->type = type;
new->num_scratch_pages = pages;
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
- /* set_memory_array_uc() needs virtual address */
- mem->memory[i] = (unsigned long)page_address(page);
+ mem->pages[i] = page;
mem->page_count++;
}
#ifdef CONFIG_X86
- set_memory_array_uc(mem->memory, num_pages);
+ set_pages_array_uc(mem->pages, num_pages);
#endif
ret = 0;
out:
- for (i = 0; i < mem->page_count; i++)
- mem->memory[i] = virt_to_gart((void *)mem->memory[i]);
return ret;
}
EXPORT_SYMBOL(agp_generic_alloc_pages);
-void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
+struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge)
{
struct page * page;
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
- return page_address(page);
+ return page;
}
EXPORT_SYMBOL(agp_generic_alloc_page);
void agp_generic_destroy_pages(struct agp_memory *mem)
{
int i;
- void *addr;
struct page *page;
if (!mem)
return;
- for (i = 0; i < mem->page_count; i++)
- mem->memory[i] = (unsigned long)gart_to_virt(mem->memory[i]);
-
#ifdef CONFIG_X86
- set_memory_array_wb(mem->memory, mem->page_count);
+ set_pages_array_wb(mem->pages, mem->page_count);
#endif
for (i = 0; i < mem->page_count; i++) {
- addr = (void *)mem->memory[i];
- page = virt_to_page(addr);
+ page = mem->pages[i];
#ifndef CONFIG_X86
unmap_page_from_agp(page);
#endif
-
put_page(page);
- free_page((unsigned long)addr);
+ __free_page(page);
atomic_dec(&agp_bridge->current_memory_agp);
- mem->memory[i] = 0;
+ mem->pages[i] = NULL;
}
}
EXPORT_SYMBOL(agp_generic_destroy_pages);
-void agp_generic_destroy_page(void *addr, int flags)
+void agp_generic_destroy_page(struct page *page, int flags)
{
- struct page *page;
-
- if (addr == NULL)
+ if (page == NULL)
return;
- page = virt_to_page(addr);
if (flags & AGP_PAGE_DESTROY_UNMAP)
unmap_page_from_agp(page);
if (flags & AGP_PAGE_DESTROY_FREE) {
put_page(page);
- free_page((unsigned long)addr);
+ __free_page(page);
atomic_dec(&agp_bridge->current_memory_agp);
}
}
EXPORT_SYMBOL(global_cache_flush);
unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+ struct page *page, int type)
{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
/* memory type is ignored in the generic routine */
if (bridge->driver->masks)
return addr | bridge->driver->masks[0].mask;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
unsigned long paddr;
- paddr = mem->memory[i];
+ paddr = page_to_phys(mem->pages[i]);
for (k = 0;
k < hp->io_pages_per_kpage;
k++, j++, paddr += hp->io_page_size) {
- hp->gatt[j] =
- agp_bridge->driver->mask_memory(agp_bridge,
- paddr, type);
+ hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
}
}
static unsigned long
hp_zx1_mask_memory (struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+ struct page *page, int type)
{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
return HP_ZX1_PDIR_VALID_BIT | addr;
}
*/
#define WR_FLUSH_GATT(index) RD_GATT(index)
+static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
+ unsigned long addr, int type);
+
static struct {
void *gatt; /* ioremap'd GATT area */
unsigned long *alloced_map; /* bitmap of kernel-pages in use */
int refcount; /* number of kernel pages using the large page */
u64 paddr; /* physical address of large page */
+ struct page *page; /* page pointer */
} *lp_desc;
} i460;
void *temp;
pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
- mem, pg_start, type, mem->memory[0]);
+ mem, pg_start, type, page_to_phys(mem->pages[0]));
if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
return -EINVAL;
io_page_size = 1UL << I460_IO_PAGE_SHIFT;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
- paddr = mem->memory[i];
+ paddr = phys_to_gart(page_to_phys(mem->pages[i]));
for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
- WR_GATT(j, agp_bridge->driver->mask_memory(agp_bridge,
- paddr, mem->type));
+ WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
}
WR_FLUSH_GATT(j - 1);
return 0;
{
unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
size_t map_size;
- void *lpage;
- lpage = (void *) __get_free_pages(GFP_KERNEL, order);
- if (!lpage) {
+ lp->page = alloc_pages(GFP_KERNEL, order);
+ if (!lp->page) {
printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
return -ENOMEM;
}
map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
if (!lp->alloced_map) {
- free_pages((unsigned long) lpage, order);
+ __free_pages(lp->page, order);
printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
return -ENOMEM;
}
- lp->paddr = virt_to_gart(lpage);
+ lp->paddr = phys_to_gart(page_to_phys(lp->page));
lp->refcount = 0;
atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
return 0;
kfree(lp->alloced_map);
lp->alloced_map = NULL;
- free_pages((unsigned long) gart_to_virt(lp->paddr), I460_IO_PAGE_SHIFT - PAGE_SHIFT);
+ __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
}
if (i460_alloc_large_page(lp) < 0)
return -ENOMEM;
pg = lp - i460.lp_desc;
- WR_GATT(pg, agp_bridge->driver->mask_memory(agp_bridge,
- lp->paddr, 0));
+ WR_GATT(pg, i460_mask_memory(agp_bridge,
+ lp->paddr, 0));
WR_FLUSH_GATT(pg);
}
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++, i++)
{
- mem->memory[i] = lp->paddr + idx*PAGE_SIZE;
+ mem->pages[i] = lp->page;
__set_bit(idx, lp->alloced_map);
++lp->refcount;
}
struct lp_desc *start, *end, *lp;
void *temp;
- temp = agp_bridge->driver->current_size;
+ temp = agp_bridge->current_size;
num_entries = A_SIZE_8(temp)->num_entries;
/* Figure out what pg_start means in terms of our large GART pages */
idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
idx++, i++)
{
- mem->memory[i] = 0;
+ mem->pages[i] = NULL;
__clear_bit(idx, lp->alloced_map);
--lp->refcount;
}
* Let's just hope nobody counts on the allocated AGP memory being there before bind time
* (I don't think current drivers do)...
*/
-static void *i460_alloc_page (struct agp_bridge_data *bridge)
+static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
{
void *page;
return page;
}
-static void i460_destroy_page (void *page, int flags)
+static void i460_destroy_page (struct page *page, int flags)
{
if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
agp_generic_destroy_page(page, flags);
#endif /* I460_LARGE_IO_PAGES */
static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+ unsigned long addr, int type)
{
/* Make sure the returned address is a valid GATT entry */
return bridge->driver->masks[0].mask
| (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
}
+static unsigned long i460_page_mask_memory(struct agp_bridge_data *bridge,
+ struct page *page, int type)
+{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
+ return i460_mask_memory(bridge, addr, type);
+}
+
const struct agp_bridge_driver intel_i460_driver = {
.owner = THIS_MODULE,
.aperture_sizes = i460_sizes,
.fetch_size = i460_fetch_size,
.cleanup = i460_cleanup,
.tlb_flush = i460_tlb_flush,
- .mask_memory = i460_mask_memory,
+ .mask_memory = i460_page_mask_memory,
.masks = i460_masks,
.agp_enable = agp_generic_enable,
.cache_flush = global_cache_flush,
}
/* Exists to support ARGB cursors */
-static void *i8xx_alloc_pages(void)
+static struct page *i8xx_alloc_pages(void)
{
struct page *page;
}
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
- return page_address(page);
+ return page;
}
-static void i8xx_destroy_pages(void *addr)
+static void i8xx_destroy_pages(struct page *page)
{
- struct page *page;
-
- if (addr == NULL)
+ if (page == NULL)
return;
- page = virt_to_page(addr);
set_pages_wb(page, 4);
put_page(page);
__free_pages(page, 2);
global_cache_flush();
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i],
+ mem->pages[i],
mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
{
struct agp_memory *new;
- void *addr;
+ struct page *page;
switch (pg_count) {
- case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ case 1: page = agp_bridge->driver->agp_alloc_page(agp_bridge);
break;
case 4:
/* kludge to get 4 physical pages for ARGB cursor */
- addr = i8xx_alloc_pages();
+ page = i8xx_alloc_pages();
break;
default:
return NULL;
}
- if (addr == NULL)
+ if (page == NULL)
return NULL;
new = agp_create_memory(pg_count);
if (new == NULL)
return NULL;
- new->memory[0] = virt_to_gart(addr);
+ new->pages[0] = page;
if (pg_count == 4) {
/* kludge to get 4 physical pages for ARGB cursor */
- new->memory[1] = new->memory[0] + PAGE_SIZE;
- new->memory[2] = new->memory[1] + PAGE_SIZE;
- new->memory[3] = new->memory[2] + PAGE_SIZE;
+ new->pages[1] = new->pages[0] + 1;
+ new->pages[2] = new->pages[1] + 1;
+ new->pages[3] = new->pages[2] + 1;
}
new->page_count = pg_count;
new->num_scratch_pages = pg_count;
new->type = AGP_PHYS_MEMORY;
- new->physical = new->memory[0];
+ new->physical = page_to_phys(new->pages[0]);
return new;
}
agp_free_key(curr->key);
if (curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4)
- i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
+ i8xx_destroy_pages(curr->pages[0]);
else {
- void *va = gart_to_virt(curr->memory[0]);
-
- agp_bridge->driver->agp_destroy_page(va,
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
AGP_PAGE_DESTROY_UNMAP);
- agp_bridge->driver->agp_destroy_page(va,
+ agp_bridge->driver->agp_destroy_page(curr->pages[0],
AGP_PAGE_DESTROY_FREE);
}
agp_free_page_array(curr);
}
static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+ struct page *page, int type)
{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
/* Type checking must be done elsewhere */
return addr | bridge->driver->masks[type].mask;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mask_type),
+ mem->pages[i], mask_type),
intel_private.registers+I810_PTE_BASE+(j*4));
}
readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mask_type), intel_private.gtt+j);
+ mem->pages[i], mask_type), intel_private.gtt+j);
}
readl(intel_private.gtt+j-1);
* this conditional.
*/
static unsigned long intel_i965_mask_memory(struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+ struct page *page, int type)
{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
/* Shift high bits down */
addr |= (addr >> 28) & 0xf0;
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
- mem->memory[i], mask_type),
+ mem->pages[i], mask_type),
agp_bridge->gatt_table+nvidia_private.pg_offset+j);
}
#define AGP8X_MODE_BIT 3
#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
+static unsigned long
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr,
+ int type);
+
static struct _parisc_agp_info {
void __iomem *ioc_regs;
void __iomem *lba_regs;
for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
unsigned long paddr;
- paddr = mem->memory[i];
+ paddr = page_to_phys(mem->pages[i]);
for (k = 0;
k < info->io_pages_per_kpage;
k++, j++, paddr += info->io_page_size) {
info->gatt[j] =
- agp_bridge->driver->mask_memory(agp_bridge,
+ parisc_agp_mask_memory(agp_bridge,
paddr, type);
}
}
}
static unsigned long
-parisc_agp_mask_memory(struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+parisc_agp_mask_memory(struct agp_bridge_data *bridge, unsigned long addr,
+ int type)
+{
+ return SBA_PDIR_VALID_BIT | addr;
+}
+
+static unsigned long
+parisc_agp_page_mask_memory(struct agp_bridge_data *bridge, struct page *page,
+ int type)
{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
return SBA_PDIR_VALID_BIT | addr;
}
{0, 0, 0},
};
-static void *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
+static struct page *sgi_tioca_alloc_page(struct agp_bridge_data *bridge)
{
struct page *page;
int nid;
get_page(page);
atomic_inc(&agp_bridge->current_memory_agp);
- return page_address(page);
+ return page;
}
/*
*/
static unsigned long
sgi_tioca_mask_memory(struct agp_bridge_data *bridge,
- unsigned long addr, int type)
+ struct page *page, int type)
{
+ unsigned long addr = phys_to_gart(page_to_phys(page));
return tioca_physpage_to_gart(addr);
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
table[j] =
- bridge->driver->mask_memory(bridge, mem->memory[i],
+ bridge->driver->mask_memory(bridge, mem->pages[i],
mem->type);
}
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
addr = (j * PAGE_SIZE) + agp_bridge->gart_bus_addr;
cur_gatt = SVRWRKS_GET_GATT(addr);
- writel(agp_bridge->driver->mask_memory(agp_bridge, mem->memory[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
+ writel(agp_bridge->driver->mask_memory(agp_bridge, mem->pages[i], mem->type), cur_gatt+GET_GATT_OFF(addr));
}
serverworks_tlbflush(mem);
return 0;
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
agp_bridge->gatt_table[j] =
- cpu_to_le32((mem->memory[i] & 0xFFFFF000UL) | 0x1UL);
- flush_dcache_range((unsigned long)__va(mem->memory[i]),
- (unsigned long)__va(mem->memory[i])+0x1000);
+ cpu_to_le32((page_to_phys(mem->pages[i]) & 0xFFFFF000UL) | 0x1UL);
+ flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
+ (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
}
(void)in_le32((volatile u32*)&agp_bridge->gatt_table[pg_start]);
mb();
}
for (i = 0; i < mem->page_count; i++) {
- gp[i] = (mem->memory[i] >> PAGE_SHIFT) | 0x80000000UL;
- flush_dcache_range((unsigned long)__va(mem->memory[i]),
- (unsigned long)__va(mem->memory[i])+0x1000);
+ gp[i] = (page_to_phys(mem->pages[i]) >> PAGE_SHIFT) | 0x80000000UL;
+ flush_dcache_range((unsigned long)__va(page_to_phys(mem->pages[i])),
+ (unsigned long)__va(page_to_phys(mem->pages[i]))+0x1000);
}
mb();
flush_dcache_range((unsigned long)gp, (unsigned long) &gp[i]);
}
for (i = 0; i < num_pages; i++)
- mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
+ mem->pages[i] = pages[i];
mem->page_count = num_pages;
mem->is_flushed = true;
static void *agp_remap(unsigned long offset, unsigned long size,
struct drm_device * dev)
{
- unsigned long *phys_addr_map, i, num_pages =
+ unsigned long i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
struct drm_agp_mem *agpmem;
struct page **page_map;
+ struct page **phys_page_map;
void *addr;
size = PAGE_ALIGN(size);
if (!page_map)
return NULL;
- phys_addr_map =
- agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
+ phys_page_map = (agpmem->memory->pages + (offset - agpmem->bound) / PAGE_SIZE);
for (i = 0; i < num_pages; ++i)
- page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
+ page_map[i] = phys_page_map[i];
addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
vfree(page_map);
* Get the page, inc the use count, and return it
*/
offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
- page = virt_to_page(__va(agpmem->memory->memory[offset]));
+ page = agpmem->memory->pages[offset];
get_page(page);
vmf->page = page;
DRM_DEBUG
("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
(unsigned long long)baddr,
- __va(agpmem->memory->memory[offset]),
+ agpmem->memory->pages[offset],
(unsigned long long)offset,
page_count(page));
return 0;
if (!page)
page = dummy_read_page;
- mem->memory[mem->page_count++] =
- phys_to_gart(page_to_phys(page));
+ mem->pages[mem->page_count++] = page;
}
agp_be->mem = mem;
return 0;
struct agp_memory *next;
struct agp_memory *prev;
struct agp_bridge_data *bridge;
- unsigned long *memory;
+ struct page **pages;
size_t page_count;
int key;
int num_scratch_pages;