drm/nouveau/fb: convert to new-style nvkm_subdev
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include <linux/dma-mapping.h>
31 #include <linux/swiotlb.h>
32
33 #include "nouveau_drm.h"
34 #include "nouveau_dma.h"
35 #include "nouveau_fence.h"
36
37 #include "nouveau_bo.h"
38 #include "nouveau_ttm.h"
39 #include "nouveau_gem.h"
40
41 /*
42  * NV10-NV40 tiling helpers
43  */
44
45 static void
46 nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
47                            u32 addr, u32 size, u32 pitch, u32 flags)
48 {
49         struct nouveau_drm *drm = nouveau_drm(dev);
50         int i = reg - drm->tile.reg;
51         struct nvkm_fb *fb = nvxx_fb(&drm->device);
52         struct nvkm_fb_tile *tile = &fb->tile.region[i];
53
54         nouveau_fence_unref(&reg->fence);
55
56         if (tile->pitch)
57                 nvkm_fb_tile_fini(fb, i, tile);
58
59         if (pitch)
60                 nvkm_fb_tile_init(fb, i, addr, size, pitch, flags, tile);
61
62         nvkm_fb_tile_prog(fb, i, tile);
63 }
64
65 static struct nouveau_drm_tile *
66 nv10_bo_get_tile_region(struct drm_device *dev, int i)
67 {
68         struct nouveau_drm *drm = nouveau_drm(dev);
69         struct nouveau_drm_tile *tile = &drm->tile.reg[i];
70
71         spin_lock(&drm->tile.lock);
72
73         if (!tile->used &&
74             (!tile->fence || nouveau_fence_done(tile->fence)))
75                 tile->used = true;
76         else
77                 tile = NULL;
78
79         spin_unlock(&drm->tile.lock);
80         return tile;
81 }
82
83 static void
84 nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
85                         struct fence *fence)
86 {
87         struct nouveau_drm *drm = nouveau_drm(dev);
88
89         if (tile) {
90                 spin_lock(&drm->tile.lock);
91                 tile->fence = (struct nouveau_fence *)fence_get(fence);
92                 tile->used = false;
93                 spin_unlock(&drm->tile.lock);
94         }
95 }
96
97 static struct nouveau_drm_tile *
98 nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
99                    u32 size, u32 pitch, u32 flags)
100 {
101         struct nouveau_drm *drm = nouveau_drm(dev);
102         struct nvkm_fb *fb = nvxx_fb(&drm->device);
103         struct nouveau_drm_tile *tile, *found = NULL;
104         int i;
105
106         for (i = 0; i < fb->tile.regions; i++) {
107                 tile = nv10_bo_get_tile_region(dev, i);
108
109                 if (pitch && !found) {
110                         found = tile;
111                         continue;
112
113                 } else if (tile && fb->tile.region[i].pitch) {
114                         /* Kill an unused tile region. */
115                         nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
116                 }
117
118                 nv10_bo_put_tile_region(dev, tile, NULL);
119         }
120
121         if (found)
122                 nv10_bo_update_tile_region(dev, found, addr, size,
123                                             pitch, flags);
124         return found;
125 }
126
127 static void
128 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
129 {
130         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
131         struct drm_device *dev = drm->dev;
132         struct nouveau_bo *nvbo = nouveau_bo(bo);
133
134         if (unlikely(nvbo->gem.filp))
135                 DRM_ERROR("bo %p still attached to GEM object\n", bo);
136         WARN_ON(nvbo->pin_refcnt > 0);
137         nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
138         kfree(nvbo);
139 }
140
141 static void
142 nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
143                        int *align, int *size)
144 {
145         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
146         struct nvif_device *device = &drm->device;
147
148         if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
149                 if (nvbo->tile_mode) {
150                         if (device->info.chipset >= 0x40) {
151                                 *align = 65536;
152                                 *size = roundup(*size, 64 * nvbo->tile_mode);
153
154                         } else if (device->info.chipset >= 0x30) {
155                                 *align = 32768;
156                                 *size = roundup(*size, 64 * nvbo->tile_mode);
157
158                         } else if (device->info.chipset >= 0x20) {
159                                 *align = 16384;
160                                 *size = roundup(*size, 64 * nvbo->tile_mode);
161
162                         } else if (device->info.chipset >= 0x10) {
163                                 *align = 16384;
164                                 *size = roundup(*size, 32 * nvbo->tile_mode);
165                         }
166                 }
167         } else {
168                 *size = roundup(*size, (1 << nvbo->page_shift));
169                 *align = max((1 <<  nvbo->page_shift), *align);
170         }
171
172         *size = roundup(*size, PAGE_SIZE);
173 }
174
175 int
176 nouveau_bo_new(struct drm_device *dev, int size, int align,
177                uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
178                struct sg_table *sg, struct reservation_object *robj,
179                struct nouveau_bo **pnvbo)
180 {
181         struct nouveau_drm *drm = nouveau_drm(dev);
182         struct nouveau_bo *nvbo;
183         size_t acc_size;
184         int ret;
185         int type = ttm_bo_type_device;
186         int lpg_shift = 12;
187         int max_size;
188
189         if (drm->client.vm)
190                 lpg_shift = drm->client.vm->mmu->lpg_shift;
191         max_size = INT_MAX & ~((1 << lpg_shift) - 1);
192
193         if (size <= 0 || size > max_size) {
194                 NV_WARN(drm, "skipped size %x\n", (u32)size);
195                 return -EINVAL;
196         }
197
198         if (sg)
199                 type = ttm_bo_type_sg;
200
201         nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
202         if (!nvbo)
203                 return -ENOMEM;
204         INIT_LIST_HEAD(&nvbo->head);
205         INIT_LIST_HEAD(&nvbo->entry);
206         INIT_LIST_HEAD(&nvbo->vma_list);
207         nvbo->tile_mode = tile_mode;
208         nvbo->tile_flags = tile_flags;
209         nvbo->bo.bdev = &drm->ttm.bdev;
210
211         if (!nv_device_is_cpu_coherent(nvxx_device(&drm->device)))
212                 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
213
214         nvbo->page_shift = 12;
215         if (drm->client.vm) {
216                 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
217                         nvbo->page_shift = drm->client.vm->mmu->lpg_shift;
218         }
219
220         nouveau_bo_fixup_align(nvbo, flags, &align, &size);
221         nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
222         nouveau_bo_placement_set(nvbo, flags, 0);
223
224         acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
225                                        sizeof(struct nouveau_bo));
226
227         ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
228                           type, &nvbo->placement,
229                           align >> PAGE_SHIFT, false, NULL, acc_size, sg,
230                           robj, nouveau_bo_del_ttm);
231         if (ret) {
232                 /* ttm will call nouveau_bo_del_ttm if it fails.. */
233                 return ret;
234         }
235
236         *pnvbo = nvbo;
237         return 0;
238 }
239
240 static void
241 set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t type, uint32_t flags)
242 {
243         *n = 0;
244
245         if (type & TTM_PL_FLAG_VRAM)
246                 pl[(*n)++].flags = TTM_PL_FLAG_VRAM | flags;
247         if (type & TTM_PL_FLAG_TT)
248                 pl[(*n)++].flags = TTM_PL_FLAG_TT | flags;
249         if (type & TTM_PL_FLAG_SYSTEM)
250                 pl[(*n)++].flags = TTM_PL_FLAG_SYSTEM | flags;
251 }
252
253 static void
254 set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
255 {
256         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
257         u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
258         unsigned i, fpfn, lpfn;
259
260         if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
261             nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
262             nvbo->bo.mem.num_pages < vram_pages / 4) {
263                 /*
264                  * Make sure that the color and depth buffers are handled
265                  * by independent memory controller units. Up to a 9x
266                  * speed up when alpha-blending and depth-test are enabled
267                  * at the same time.
268                  */
269                 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
270                         fpfn = vram_pages / 2;
271                         lpfn = ~0;
272                 } else {
273                         fpfn = 0;
274                         lpfn = vram_pages / 2;
275                 }
276                 for (i = 0; i < nvbo->placement.num_placement; ++i) {
277                         nvbo->placements[i].fpfn = fpfn;
278                         nvbo->placements[i].lpfn = lpfn;
279                 }
280                 for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
281                         nvbo->busy_placements[i].fpfn = fpfn;
282                         nvbo->busy_placements[i].lpfn = lpfn;
283                 }
284         }
285 }
286
287 void
288 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
289 {
290         struct ttm_placement *pl = &nvbo->placement;
291         uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
292                                                  TTM_PL_MASK_CACHING) |
293                          (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
294
295         pl->placement = nvbo->placements;
296         set_placement_list(nvbo->placements, &pl->num_placement,
297                            type, flags);
298
299         pl->busy_placement = nvbo->busy_placements;
300         set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
301                            type | busy, flags);
302
303         set_placement_range(nvbo, type);
304 }
305
306 int
307 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
308 {
309         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
310         struct ttm_buffer_object *bo = &nvbo->bo;
311         bool force = false, evict = false;
312         int ret;
313
314         ret = ttm_bo_reserve(bo, false, false, false, NULL);
315         if (ret)
316                 return ret;
317
318         if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
319             memtype == TTM_PL_FLAG_VRAM && contig) {
320                 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
321                         if (bo->mem.mem_type == TTM_PL_VRAM) {
322                                 struct nvkm_mem *mem = bo->mem.mm_node;
323                                 if (!list_is_singular(&mem->regions))
324                                         evict = true;
325                         }
326                         nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
327                         force = true;
328                 }
329         }
330
331         if (nvbo->pin_refcnt) {
332                 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
333                         NV_ERROR(drm, "bo %p pinned elsewhere: "
334                                       "0x%08x vs 0x%08x\n", bo,
335                                  1 << bo->mem.mem_type, memtype);
336                         ret = -EBUSY;
337                 }
338                 nvbo->pin_refcnt++;
339                 goto out;
340         }
341
342         if (evict) {
343                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
344                 ret = nouveau_bo_validate(nvbo, false, false);
345                 if (ret)
346                         goto out;
347         }
348
349         nvbo->pin_refcnt++;
350         nouveau_bo_placement_set(nvbo, memtype, 0);
351
352         /* drop pin_refcnt temporarily, so we don't trip the assertion
353          * in nouveau_bo_move() that makes sure we're not trying to
354          * move a pinned buffer
355          */
356         nvbo->pin_refcnt--;
357         ret = nouveau_bo_validate(nvbo, false, false);
358         if (ret)
359                 goto out;
360         nvbo->pin_refcnt++;
361
362         switch (bo->mem.mem_type) {
363         case TTM_PL_VRAM:
364                 drm->gem.vram_available -= bo->mem.size;
365                 break;
366         case TTM_PL_TT:
367                 drm->gem.gart_available -= bo->mem.size;
368                 break;
369         default:
370                 break;
371         }
372
373 out:
374         if (force && ret)
375                 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
376         ttm_bo_unreserve(bo);
377         return ret;
378 }
379
380 int
381 nouveau_bo_unpin(struct nouveau_bo *nvbo)
382 {
383         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
384         struct ttm_buffer_object *bo = &nvbo->bo;
385         int ret, ref;
386
387         ret = ttm_bo_reserve(bo, false, false, false, NULL);
388         if (ret)
389                 return ret;
390
391         ref = --nvbo->pin_refcnt;
392         WARN_ON_ONCE(ref < 0);
393         if (ref)
394                 goto out;
395
396         nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
397
398         ret = nouveau_bo_validate(nvbo, false, false);
399         if (ret == 0) {
400                 switch (bo->mem.mem_type) {
401                 case TTM_PL_VRAM:
402                         drm->gem.vram_available += bo->mem.size;
403                         break;
404                 case TTM_PL_TT:
405                         drm->gem.gart_available += bo->mem.size;
406                         break;
407                 default:
408                         break;
409                 }
410         }
411
412 out:
413         ttm_bo_unreserve(bo);
414         return ret;
415 }
416
417 int
418 nouveau_bo_map(struct nouveau_bo *nvbo)
419 {
420         int ret;
421
422         ret = ttm_bo_reserve(&nvbo->bo, false, false, false, NULL);
423         if (ret)
424                 return ret;
425
426         /*
427          * TTM buffers allocated using the DMA API already have a mapping, let's
428          * use it instead.
429          */
430         if (!nvbo->force_coherent)
431                 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
432                                   &nvbo->kmap);
433
434         ttm_bo_unreserve(&nvbo->bo);
435         return ret;
436 }
437
438 void
439 nouveau_bo_unmap(struct nouveau_bo *nvbo)
440 {
441         if (!nvbo)
442                 return;
443
444         /*
445          * TTM buffers allocated using the DMA API already had a coherent
446          * mapping which we used, no need to unmap.
447          */
448         if (!nvbo->force_coherent)
449                 ttm_bo_kunmap(&nvbo->kmap);
450 }
451
452 void
453 nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
454 {
455         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
456         struct nvkm_device *device = nvxx_device(&drm->device);
457         struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
458         int i;
459
460         if (!ttm_dma)
461                 return;
462
463         /* Don't waste time looping if the object is coherent */
464         if (nvbo->force_coherent)
465                 return;
466
467         for (i = 0; i < ttm_dma->ttm.num_pages; i++)
468                 dma_sync_single_for_device(nv_device_base(device),
469                         ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
470 }
471
472 void
473 nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
474 {
475         struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
476         struct nvkm_device *device = nvxx_device(&drm->device);
477         struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
478         int i;
479
480         if (!ttm_dma)
481                 return;
482
483         /* Don't waste time looping if the object is coherent */
484         if (nvbo->force_coherent)
485                 return;
486
487         for (i = 0; i < ttm_dma->ttm.num_pages; i++)
488                 dma_sync_single_for_cpu(nv_device_base(device),
489                         ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
490 }
491
492 int
493 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
494                     bool no_wait_gpu)
495 {
496         int ret;
497
498         ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
499                               interruptible, no_wait_gpu);
500         if (ret)
501                 return ret;
502
503         nouveau_bo_sync_for_device(nvbo);
504
505         return 0;
506 }
507
508 static inline void *
509 _nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
510 {
511         struct ttm_dma_tt *dma_tt;
512         u8 *m = mem;
513
514         index *= sz;
515
516         if (m) {
517                 /* kmap'd address, return the corresponding offset */
518                 m += index;
519         } else {
520                 /* DMA-API mapping, lookup the right address */
521                 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
522                 m = dma_tt->cpu_address[index / PAGE_SIZE];
523                 m += index % PAGE_SIZE;
524         }
525
526         return m;
527 }
528 #define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
529
530 void
531 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
532 {
533         bool is_iomem;
534         u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
535
536         mem = nouveau_bo_mem_index(nvbo, index, mem);
537
538         if (is_iomem)
539                 iowrite16_native(val, (void __force __iomem *)mem);
540         else
541                 *mem = val;
542 }
543
544 u32
545 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
546 {
547         bool is_iomem;
548         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
549
550         mem = nouveau_bo_mem_index(nvbo, index, mem);
551
552         if (is_iomem)
553                 return ioread32_native((void __force __iomem *)mem);
554         else
555                 return *mem;
556 }
557
558 void
559 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
560 {
561         bool is_iomem;
562         u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
563
564         mem = nouveau_bo_mem_index(nvbo, index, mem);
565
566         if (is_iomem)
567                 iowrite32_native(val, (void __force __iomem *)mem);
568         else
569                 *mem = val;
570 }
571
572 static struct ttm_tt *
573 nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
574                       uint32_t page_flags, struct page *dummy_read)
575 {
576 #if __OS_HAS_AGP
577         struct nouveau_drm *drm = nouveau_bdev(bdev);
578         struct drm_device *dev = drm->dev;
579
580         if (drm->agp.stat == ENABLED) {
581                 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
582                                          page_flags, dummy_read);
583         }
584 #endif
585
586         return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
587 }
588
589 static int
590 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
591 {
592         /* We'll do this from user space. */
593         return 0;
594 }
595
596 static int
597 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
598                          struct ttm_mem_type_manager *man)
599 {
600         struct nouveau_drm *drm = nouveau_bdev(bdev);
601
602         switch (type) {
603         case TTM_PL_SYSTEM:
604                 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
605                 man->available_caching = TTM_PL_MASK_CACHING;
606                 man->default_caching = TTM_PL_FLAG_CACHED;
607                 break;
608         case TTM_PL_VRAM:
609                 man->flags = TTM_MEMTYPE_FLAG_FIXED |
610                              TTM_MEMTYPE_FLAG_MAPPABLE;
611                 man->available_caching = TTM_PL_FLAG_UNCACHED |
612                                          TTM_PL_FLAG_WC;
613                 man->default_caching = TTM_PL_FLAG_WC;
614
615                 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
616                         /* Some BARs do not support being ioremapped WC */
617                         if (nvxx_bar(&drm->device)->iomap_uncached) {
618                                 man->available_caching = TTM_PL_FLAG_UNCACHED;
619                                 man->default_caching = TTM_PL_FLAG_UNCACHED;
620                         }
621
622                         man->func = &nouveau_vram_manager;
623                         man->io_reserve_fastpath = false;
624                         man->use_io_reserve_lru = true;
625                 } else {
626                         man->func = &ttm_bo_manager_func;
627                 }
628                 break;
629         case TTM_PL_TT:
630                 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
631                         man->func = &nouveau_gart_manager;
632                 else
633                 if (drm->agp.stat != ENABLED)
634                         man->func = &nv04_gart_manager;
635                 else
636                         man->func = &ttm_bo_manager_func;
637
638                 if (drm->agp.stat == ENABLED) {
639                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
640                         man->available_caching = TTM_PL_FLAG_UNCACHED |
641                                 TTM_PL_FLAG_WC;
642                         man->default_caching = TTM_PL_FLAG_WC;
643                 } else {
644                         man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
645                                      TTM_MEMTYPE_FLAG_CMA;
646                         man->available_caching = TTM_PL_MASK_CACHING;
647                         man->default_caching = TTM_PL_FLAG_CACHED;
648                 }
649
650                 break;
651         default:
652                 return -EINVAL;
653         }
654         return 0;
655 }
656
657 static void
658 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
659 {
660         struct nouveau_bo *nvbo = nouveau_bo(bo);
661
662         switch (bo->mem.mem_type) {
663         case TTM_PL_VRAM:
664                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
665                                          TTM_PL_FLAG_SYSTEM);
666                 break;
667         default:
668                 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
669                 break;
670         }
671
672         *pl = nvbo->placement;
673 }
674
675
676 static int
677 nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
678 {
679         int ret = RING_SPACE(chan, 2);
680         if (ret == 0) {
681                 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
682                 OUT_RING  (chan, handle & 0x0000ffff);
683                 FIRE_RING (chan);
684         }
685         return ret;
686 }
687
688 static int
689 nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
690                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
691 {
692         struct nvkm_mem *node = old_mem->mm_node;
693         int ret = RING_SPACE(chan, 10);
694         if (ret == 0) {
695                 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
696                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
697                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
698                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
699                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
700                 OUT_RING  (chan, PAGE_SIZE);
701                 OUT_RING  (chan, PAGE_SIZE);
702                 OUT_RING  (chan, PAGE_SIZE);
703                 OUT_RING  (chan, new_mem->num_pages);
704                 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
705         }
706         return ret;
707 }
708
709 static int
710 nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
711 {
712         int ret = RING_SPACE(chan, 2);
713         if (ret == 0) {
714                 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
715                 OUT_RING  (chan, handle);
716         }
717         return ret;
718 }
719
720 static int
721 nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
722                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
723 {
724         struct nvkm_mem *node = old_mem->mm_node;
725         u64 src_offset = node->vma[0].offset;
726         u64 dst_offset = node->vma[1].offset;
727         u32 page_count = new_mem->num_pages;
728         int ret;
729
730         page_count = new_mem->num_pages;
731         while (page_count) {
732                 int line_count = (page_count > 8191) ? 8191 : page_count;
733
734                 ret = RING_SPACE(chan, 11);
735                 if (ret)
736                         return ret;
737
738                 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
739                 OUT_RING  (chan, upper_32_bits(src_offset));
740                 OUT_RING  (chan, lower_32_bits(src_offset));
741                 OUT_RING  (chan, upper_32_bits(dst_offset));
742                 OUT_RING  (chan, lower_32_bits(dst_offset));
743                 OUT_RING  (chan, PAGE_SIZE);
744                 OUT_RING  (chan, PAGE_SIZE);
745                 OUT_RING  (chan, PAGE_SIZE);
746                 OUT_RING  (chan, line_count);
747                 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
748                 OUT_RING  (chan, 0x00000110);
749
750                 page_count -= line_count;
751                 src_offset += (PAGE_SIZE * line_count);
752                 dst_offset += (PAGE_SIZE * line_count);
753         }
754
755         return 0;
756 }
757
758 static int
759 nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
760                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
761 {
762         struct nvkm_mem *node = old_mem->mm_node;
763         u64 src_offset = node->vma[0].offset;
764         u64 dst_offset = node->vma[1].offset;
765         u32 page_count = new_mem->num_pages;
766         int ret;
767
768         page_count = new_mem->num_pages;
769         while (page_count) {
770                 int line_count = (page_count > 2047) ? 2047 : page_count;
771
772                 ret = RING_SPACE(chan, 12);
773                 if (ret)
774                         return ret;
775
776                 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
777                 OUT_RING  (chan, upper_32_bits(dst_offset));
778                 OUT_RING  (chan, lower_32_bits(dst_offset));
779                 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
780                 OUT_RING  (chan, upper_32_bits(src_offset));
781                 OUT_RING  (chan, lower_32_bits(src_offset));
782                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
783                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
784                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
785                 OUT_RING  (chan, line_count);
786                 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
787                 OUT_RING  (chan, 0x00100110);
788
789                 page_count -= line_count;
790                 src_offset += (PAGE_SIZE * line_count);
791                 dst_offset += (PAGE_SIZE * line_count);
792         }
793
794         return 0;
795 }
796
797 static int
798 nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
799                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
800 {
801         struct nvkm_mem *node = old_mem->mm_node;
802         u64 src_offset = node->vma[0].offset;
803         u64 dst_offset = node->vma[1].offset;
804         u32 page_count = new_mem->num_pages;
805         int ret;
806
807         page_count = new_mem->num_pages;
808         while (page_count) {
809                 int line_count = (page_count > 8191) ? 8191 : page_count;
810
811                 ret = RING_SPACE(chan, 11);
812                 if (ret)
813                         return ret;
814
815                 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
816                 OUT_RING  (chan, upper_32_bits(src_offset));
817                 OUT_RING  (chan, lower_32_bits(src_offset));
818                 OUT_RING  (chan, upper_32_bits(dst_offset));
819                 OUT_RING  (chan, lower_32_bits(dst_offset));
820                 OUT_RING  (chan, PAGE_SIZE);
821                 OUT_RING  (chan, PAGE_SIZE);
822                 OUT_RING  (chan, PAGE_SIZE);
823                 OUT_RING  (chan, line_count);
824                 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
825                 OUT_RING  (chan, 0x00000110);
826
827                 page_count -= line_count;
828                 src_offset += (PAGE_SIZE * line_count);
829                 dst_offset += (PAGE_SIZE * line_count);
830         }
831
832         return 0;
833 }
834
835 static int
836 nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
837                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
838 {
839         struct nvkm_mem *node = old_mem->mm_node;
840         int ret = RING_SPACE(chan, 7);
841         if (ret == 0) {
842                 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
843                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
844                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
845                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
846                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
847                 OUT_RING  (chan, 0x00000000 /* COPY */);
848                 OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
849         }
850         return ret;
851 }
852
853 static int
854 nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
855                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
856 {
857         struct nvkm_mem *node = old_mem->mm_node;
858         int ret = RING_SPACE(chan, 7);
859         if (ret == 0) {
860                 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
861                 OUT_RING  (chan, new_mem->num_pages << PAGE_SHIFT);
862                 OUT_RING  (chan, upper_32_bits(node->vma[0].offset));
863                 OUT_RING  (chan, lower_32_bits(node->vma[0].offset));
864                 OUT_RING  (chan, upper_32_bits(node->vma[1].offset));
865                 OUT_RING  (chan, lower_32_bits(node->vma[1].offset));
866                 OUT_RING  (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
867         }
868         return ret;
869 }
870
871 static int
872 nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
873 {
874         int ret = RING_SPACE(chan, 6);
875         if (ret == 0) {
876                 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
877                 OUT_RING  (chan, handle);
878                 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
879                 OUT_RING  (chan, chan->drm->ntfy.handle);
880                 OUT_RING  (chan, chan->vram.handle);
881                 OUT_RING  (chan, chan->vram.handle);
882         }
883
884         return ret;
885 }
886
887 static int
888 nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
889                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
890 {
891         struct nvkm_mem *node = old_mem->mm_node;
892         u64 length = (new_mem->num_pages << PAGE_SHIFT);
893         u64 src_offset = node->vma[0].offset;
894         u64 dst_offset = node->vma[1].offset;
895         int src_tiled = !!node->memtype;
896         int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
897         int ret;
898
899         while (length) {
900                 u32 amount, stride, height;
901
902                 ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
903                 if (ret)
904                         return ret;
905
906                 amount  = min(length, (u64)(4 * 1024 * 1024));
907                 stride  = 16 * 4;
908                 height  = amount / stride;
909
910                 if (src_tiled) {
911                         BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
912                         OUT_RING  (chan, 0);
913                         OUT_RING  (chan, 0);
914                         OUT_RING  (chan, stride);
915                         OUT_RING  (chan, height);
916                         OUT_RING  (chan, 1);
917                         OUT_RING  (chan, 0);
918                         OUT_RING  (chan, 0);
919                 } else {
920                         BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
921                         OUT_RING  (chan, 1);
922                 }
923                 if (dst_tiled) {
924                         BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
925                         OUT_RING  (chan, 0);
926                         OUT_RING  (chan, 0);
927                         OUT_RING  (chan, stride);
928                         OUT_RING  (chan, height);
929                         OUT_RING  (chan, 1);
930                         OUT_RING  (chan, 0);
931                         OUT_RING  (chan, 0);
932                 } else {
933                         BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
934                         OUT_RING  (chan, 1);
935                 }
936
937                 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
938                 OUT_RING  (chan, upper_32_bits(src_offset));
939                 OUT_RING  (chan, upper_32_bits(dst_offset));
940                 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
941                 OUT_RING  (chan, lower_32_bits(src_offset));
942                 OUT_RING  (chan, lower_32_bits(dst_offset));
943                 OUT_RING  (chan, stride);
944                 OUT_RING  (chan, stride);
945                 OUT_RING  (chan, stride);
946                 OUT_RING  (chan, height);
947                 OUT_RING  (chan, 0x00000101);
948                 OUT_RING  (chan, 0x00000000);
949                 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
950                 OUT_RING  (chan, 0);
951
952                 length -= amount;
953                 src_offset += amount;
954                 dst_offset += amount;
955         }
956
957         return 0;
958 }
959
960 static int
961 nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
962 {
963         int ret = RING_SPACE(chan, 4);
964         if (ret == 0) {
965                 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
966                 OUT_RING  (chan, handle);
967                 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
968                 OUT_RING  (chan, chan->drm->ntfy.handle);
969         }
970
971         return ret;
972 }
973
974 static inline uint32_t
975 nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
976                       struct nouveau_channel *chan, struct ttm_mem_reg *mem)
977 {
978         if (mem->mem_type == TTM_PL_TT)
979                 return NvDmaTT;
980         return chan->vram.handle;
981 }
982
983 static int
984 nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
985                   struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
986 {
987         u32 src_offset = old_mem->start << PAGE_SHIFT;
988         u32 dst_offset = new_mem->start << PAGE_SHIFT;
989         u32 page_count = new_mem->num_pages;
990         int ret;
991
992         ret = RING_SPACE(chan, 3);
993         if (ret)
994                 return ret;
995
996         BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
997         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
998         OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
999
1000         page_count = new_mem->num_pages;
1001         while (page_count) {
1002                 int line_count = (page_count > 2047) ? 2047 : page_count;
1003
1004                 ret = RING_SPACE(chan, 11);
1005                 if (ret)
1006                         return ret;
1007
1008                 BEGIN_NV04(chan, NvSubCopy,
1009                                  NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
1010                 OUT_RING  (chan, src_offset);
1011                 OUT_RING  (chan, dst_offset);
1012                 OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
1013                 OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
1014                 OUT_RING  (chan, PAGE_SIZE); /* line_length */
1015                 OUT_RING  (chan, line_count);
1016                 OUT_RING  (chan, 0x00000101);
1017                 OUT_RING  (chan, 0x00000000);
1018                 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
1019                 OUT_RING  (chan, 0);
1020
1021                 page_count -= line_count;
1022                 src_offset += (PAGE_SIZE * line_count);
1023                 dst_offset += (PAGE_SIZE * line_count);
1024         }
1025
1026         return 0;
1027 }
1028
1029 static int
1030 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
1031                      struct ttm_mem_reg *mem)
1032 {
1033         struct nvkm_mem *old_node = bo->mem.mm_node;
1034         struct nvkm_mem *new_node = mem->mm_node;
1035         u64 size = (u64)mem->num_pages << PAGE_SHIFT;
1036         int ret;
1037
1038         ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
1039                           NV_MEM_ACCESS_RW, &old_node->vma[0]);
1040         if (ret)
1041                 return ret;
1042
1043         ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
1044                           NV_MEM_ACCESS_RW, &old_node->vma[1]);
1045         if (ret) {
1046                 nvkm_vm_put(&old_node->vma[0]);
1047                 return ret;
1048         }
1049
1050         nvkm_vm_map(&old_node->vma[0], old_node);
1051         nvkm_vm_map(&old_node->vma[1], new_node);
1052         return 0;
1053 }
1054
1055 static int
1056 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1057                      bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1058 {
1059         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1060         struct nouveau_channel *chan = drm->ttm.chan;
1061         struct nouveau_cli *cli = (void *)chan->user.client;
1062         struct nouveau_fence *fence;
1063         int ret;
1064
1065         /* create temporary vmas for the transfer and attach them to the
1066          * old nvkm_mem node, these will get cleaned up after ttm has
1067          * destroyed the ttm_mem_reg
1068          */
1069         if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1070                 ret = nouveau_bo_move_prep(drm, bo, new_mem);
1071                 if (ret)
1072                         return ret;
1073         }
1074
1075         mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
1076         ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
1077         if (ret == 0) {
1078                 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
1079                 if (ret == 0) {
1080                         ret = nouveau_fence_new(chan, false, &fence);
1081                         if (ret == 0) {
1082                                 ret = ttm_bo_move_accel_cleanup(bo,
1083                                                                 &fence->base,
1084                                                                 evict,
1085                                                                 no_wait_gpu,
1086                                                                 new_mem);
1087                                 nouveau_fence_unref(&fence);
1088                         }
1089                 }
1090         }
1091         mutex_unlock(&cli->mutex);
1092         return ret;
1093 }
1094
1095 void
1096 nouveau_bo_move_init(struct nouveau_drm *drm)
1097 {
1098         static const struct {
1099                 const char *name;
1100                 int engine;
1101                 s32 oclass;
1102                 int (*exec)(struct nouveau_channel *,
1103                             struct ttm_buffer_object *,
1104                             struct ttm_mem_reg *, struct ttm_mem_reg *);
1105                 int (*init)(struct nouveau_channel *, u32 handle);
1106         } _methods[] = {
1107                 {  "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1108                 {  "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1109                 {  "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
1110                 {  "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1111                 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1112                 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1113                 {  "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1114                 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1115                 {  "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1116                 {  "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1117                 {  "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
1118                 {},
1119                 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
1120         }, *mthd = _methods;
1121         const char *name = "CPU";
1122         int ret;
1123
1124         do {
1125                 struct nouveau_channel *chan;
1126
1127                 if (mthd->engine)
1128                         chan = drm->cechan;
1129                 else
1130                         chan = drm->channel;
1131                 if (chan == NULL)
1132                         continue;
1133
1134                 ret = nvif_object_init(&chan->user,
1135                                        mthd->oclass | (mthd->engine << 16),
1136                                        mthd->oclass, NULL, 0,
1137                                        &drm->ttm.copy);
1138                 if (ret == 0) {
1139                         ret = mthd->init(chan, drm->ttm.copy.handle);
1140                         if (ret) {
1141                                 nvif_object_fini(&drm->ttm.copy);
1142                                 continue;
1143                         }
1144
1145                         drm->ttm.move = mthd->exec;
1146                         drm->ttm.chan = chan;
1147                         name = mthd->name;
1148                         break;
1149                 }
1150         } while ((++mthd)->exec);
1151
1152         NV_INFO(drm, "MM: using %s for buffer copies\n", name);
1153 }
1154
1155 static int
1156 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
1157                       bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1158 {
1159         struct ttm_place placement_memtype = {
1160                 .fpfn = 0,
1161                 .lpfn = 0,
1162                 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1163         };
1164         struct ttm_placement placement;
1165         struct ttm_mem_reg tmp_mem;
1166         int ret;
1167
1168         placement.num_placement = placement.num_busy_placement = 1;
1169         placement.placement = placement.busy_placement = &placement_memtype;
1170
1171         tmp_mem = *new_mem;
1172         tmp_mem.mm_node = NULL;
1173         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1174         if (ret)
1175                 return ret;
1176
1177         ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1178         if (ret)
1179                 goto out;
1180
1181         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
1182         if (ret)
1183                 goto out;
1184
1185         ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
1186 out:
1187         ttm_bo_mem_put(bo, &tmp_mem);
1188         return ret;
1189 }
1190
1191 static int
1192 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
1193                       bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1194 {
1195         struct ttm_place placement_memtype = {
1196                 .fpfn = 0,
1197                 .lpfn = 0,
1198                 .flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
1199         };
1200         struct ttm_placement placement;
1201         struct ttm_mem_reg tmp_mem;
1202         int ret;
1203
1204         placement.num_placement = placement.num_busy_placement = 1;
1205         placement.placement = placement.busy_placement = &placement_memtype;
1206
1207         tmp_mem = *new_mem;
1208         tmp_mem.mm_node = NULL;
1209         ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
1210         if (ret)
1211                 return ret;
1212
1213         ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
1214         if (ret)
1215                 goto out;
1216
1217         ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
1218         if (ret)
1219                 goto out;
1220
1221 out:
1222         ttm_bo_mem_put(bo, &tmp_mem);
1223         return ret;
1224 }
1225
1226 static void
1227 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1228 {
1229         struct nouveau_bo *nvbo = nouveau_bo(bo);
1230         struct nvkm_vma *vma;
1231
1232         /* ttm can now (stupidly) pass the driver bos it didn't create... */
1233         if (bo->destroy != nouveau_bo_del_ttm)
1234                 return;
1235
1236         list_for_each_entry(vma, &nvbo->vma_list, head) {
1237                 if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
1238                               (new_mem->mem_type == TTM_PL_VRAM ||
1239                                nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
1240                         nvkm_vm_map(vma, new_mem->mm_node);
1241                 } else {
1242                         nvkm_vm_unmap(vma);
1243                 }
1244         }
1245 }
1246
1247 static int
1248 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
1249                    struct nouveau_drm_tile **new_tile)
1250 {
1251         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1252         struct drm_device *dev = drm->dev;
1253         struct nouveau_bo *nvbo = nouveau_bo(bo);
1254         u64 offset = new_mem->start << PAGE_SHIFT;
1255
1256         *new_tile = NULL;
1257         if (new_mem->mem_type != TTM_PL_VRAM)
1258                 return 0;
1259
1260         if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
1261                 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
1262                                                 nvbo->tile_mode,
1263                                                 nvbo->tile_flags);
1264         }
1265
1266         return 0;
1267 }
1268
1269 static void
1270 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
1271                       struct nouveau_drm_tile *new_tile,
1272                       struct nouveau_drm_tile **old_tile)
1273 {
1274         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1275         struct drm_device *dev = drm->dev;
1276         struct fence *fence = reservation_object_get_excl(bo->resv);
1277
1278         nv10_bo_put_tile_region(dev, *old_tile, fence);
1279         *old_tile = new_tile;
1280 }
1281
1282 static int
1283 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1284                 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
1285 {
1286         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1287         struct nouveau_bo *nvbo = nouveau_bo(bo);
1288         struct ttm_mem_reg *old_mem = &bo->mem;
1289         struct nouveau_drm_tile *new_tile = NULL;
1290         int ret = 0;
1291
1292         if (nvbo->pin_refcnt)
1293                 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1294
1295         if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1296                 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1297                 if (ret)
1298                         return ret;
1299         }
1300
1301         /* Fake bo copy. */
1302         if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1303                 BUG_ON(bo->mem.mm_node != NULL);
1304                 bo->mem = *new_mem;
1305                 new_mem->mm_node = NULL;
1306                 goto out;
1307         }
1308
1309         /* Hardware assisted copy. */
1310         if (drm->ttm.move) {
1311                 if (new_mem->mem_type == TTM_PL_SYSTEM)
1312                         ret = nouveau_bo_move_flipd(bo, evict, intr,
1313                                                     no_wait_gpu, new_mem);
1314                 else if (old_mem->mem_type == TTM_PL_SYSTEM)
1315                         ret = nouveau_bo_move_flips(bo, evict, intr,
1316                                                     no_wait_gpu, new_mem);
1317                 else
1318                         ret = nouveau_bo_move_m2mf(bo, evict, intr,
1319                                                    no_wait_gpu, new_mem);
1320                 if (!ret)
1321                         goto out;
1322         }
1323
1324         /* Fallback to software copy. */
1325         ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
1326         if (ret == 0)
1327                 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
1328
1329 out:
1330         if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1331                 if (ret)
1332                         nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1333                 else
1334                         nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1335         }
1336
1337         return ret;
1338 }
1339
1340 static int
1341 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1342 {
1343         struct nouveau_bo *nvbo = nouveau_bo(bo);
1344
1345         return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
1346 }
1347
1348 static int
1349 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1350 {
1351         struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1352         struct nouveau_drm *drm = nouveau_bdev(bdev);
1353         struct nvkm_mem *node = mem->mm_node;
1354         int ret;
1355
1356         mem->bus.addr = NULL;
1357         mem->bus.offset = 0;
1358         mem->bus.size = mem->num_pages << PAGE_SHIFT;
1359         mem->bus.base = 0;
1360         mem->bus.is_iomem = false;
1361         if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1362                 return -EINVAL;
1363         switch (mem->mem_type) {
1364         case TTM_PL_SYSTEM:
1365                 /* System memory */
1366                 return 0;
1367         case TTM_PL_TT:
1368 #if __OS_HAS_AGP
1369                 if (drm->agp.stat == ENABLED) {
1370                         mem->bus.offset = mem->start << PAGE_SHIFT;
1371                         mem->bus.base = drm->agp.base;
1372                         mem->bus.is_iomem = !drm->dev->agp->cant_use_aperture;
1373                 }
1374 #endif
1375                 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
1376                         /* untiled */
1377                         break;
1378                 /* fallthrough, tiled memory */
1379         case TTM_PL_VRAM:
1380                 mem->bus.offset = mem->start << PAGE_SHIFT;
1381                 mem->bus.base = nv_device_resource_start(nvxx_device(&drm->device), 1);
1382                 mem->bus.is_iomem = true;
1383                 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
1384                         struct nvkm_bar *bar = nvxx_bar(&drm->device);
1385                         int page_shift = 12;
1386                         if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
1387                                 page_shift = node->page_shift;
1388
1389                         ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
1390                                             &node->bar_vma);
1391                         if (ret)
1392                                 return ret;
1393
1394                         nvkm_vm_map(&node->bar_vma, node);
1395                         mem->bus.offset = node->bar_vma.offset;
1396                 }
1397                 break;
1398         default:
1399                 return -EINVAL;
1400         }
1401         return 0;
1402 }
1403
1404 static void
1405 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1406 {
1407         struct nvkm_mem *node = mem->mm_node;
1408
1409         if (!node->bar_vma.node)
1410                 return;
1411
1412         nvkm_vm_unmap(&node->bar_vma);
1413         nvkm_vm_put(&node->bar_vma);
1414 }
1415
1416 static int
1417 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1418 {
1419         struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1420         struct nouveau_bo *nvbo = nouveau_bo(bo);
1421         struct nvif_device *device = &drm->device;
1422         u32 mappable = nv_device_resource_len(nvxx_device(device), 1) >> PAGE_SHIFT;
1423         int i, ret;
1424
1425         /* as long as the bo isn't in vram, and isn't tiled, we've got
1426          * nothing to do here.
1427          */
1428         if (bo->mem.mem_type != TTM_PL_VRAM) {
1429                 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
1430                     !nouveau_bo_tile_layout(nvbo))
1431                         return 0;
1432
1433                 if (bo->mem.mem_type == TTM_PL_SYSTEM) {
1434                         nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
1435
1436                         ret = nouveau_bo_validate(nvbo, false, false);
1437                         if (ret)
1438                                 return ret;
1439                 }
1440                 return 0;
1441         }
1442
1443         /* make sure bo is in mappable vram */
1444         if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
1445             bo->mem.start + bo->mem.num_pages < mappable)
1446                 return 0;
1447
1448         for (i = 0; i < nvbo->placement.num_placement; ++i) {
1449                 nvbo->placements[i].fpfn = 0;
1450                 nvbo->placements[i].lpfn = mappable;
1451         }
1452
1453         for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
1454                 nvbo->busy_placements[i].fpfn = 0;
1455                 nvbo->busy_placements[i].lpfn = mappable;
1456         }
1457
1458         nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
1459         return nouveau_bo_validate(nvbo, false, false);
1460 }
1461
1462 static int
1463 nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1464 {
1465         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1466         struct nouveau_drm *drm;
1467         struct nvkm_device *device;
1468         struct drm_device *dev;
1469         struct device *pdev;
1470         unsigned i;
1471         int r;
1472         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1473
1474         if (ttm->state != tt_unpopulated)
1475                 return 0;
1476
1477         if (slave && ttm->sg) {
1478                 /* make userspace faulting work */
1479                 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1480                                                  ttm_dma->dma_address, ttm->num_pages);
1481                 ttm->state = tt_unbound;
1482                 return 0;
1483         }
1484
1485         drm = nouveau_bdev(ttm->bdev);
1486         device = nvxx_device(&drm->device);
1487         dev = drm->dev;
1488         pdev = nv_device_base(device);
1489
1490         /*
1491          * Objects matching this condition have been marked as force_coherent,
1492          * so use the DMA API for them.
1493          */
1494         if (!nv_device_is_cpu_coherent(device) &&
1495             ttm->caching_state == tt_uncached)
1496                 return ttm_dma_populate(ttm_dma, dev->dev);
1497
1498 #if __OS_HAS_AGP
1499         if (drm->agp.stat == ENABLED) {
1500                 return ttm_agp_tt_populate(ttm);
1501         }
1502 #endif
1503
1504 #ifdef CONFIG_SWIOTLB
1505         if (swiotlb_nr_tbl()) {
1506                 return ttm_dma_populate((void *)ttm, dev->dev);
1507         }
1508 #endif
1509
1510         r = ttm_pool_populate(ttm);
1511         if (r) {
1512                 return r;
1513         }
1514
1515         for (i = 0; i < ttm->num_pages; i++) {
1516                 dma_addr_t addr;
1517
1518                 addr = dma_map_page(pdev, ttm->pages[i], 0, PAGE_SIZE,
1519                                     DMA_BIDIRECTIONAL);
1520
1521                 if (dma_mapping_error(pdev, addr)) {
1522                         while (--i) {
1523                                 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1524                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
1525                                 ttm_dma->dma_address[i] = 0;
1526                         }
1527                         ttm_pool_unpopulate(ttm);
1528                         return -EFAULT;
1529                 }
1530
1531                 ttm_dma->dma_address[i] = addr;
1532         }
1533         return 0;
1534 }
1535
1536 static void
1537 nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1538 {
1539         struct ttm_dma_tt *ttm_dma = (void *)ttm;
1540         struct nouveau_drm *drm;
1541         struct nvkm_device *device;
1542         struct drm_device *dev;
1543         struct device *pdev;
1544         unsigned i;
1545         bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1546
1547         if (slave)
1548                 return;
1549
1550         drm = nouveau_bdev(ttm->bdev);
1551         device = nvxx_device(&drm->device);
1552         dev = drm->dev;
1553         pdev = nv_device_base(device);
1554
1555         /*
1556          * Objects matching this condition have been marked as force_coherent,
1557          * so use the DMA API for them.
1558          */
1559         if (!nv_device_is_cpu_coherent(device) &&
1560             ttm->caching_state == tt_uncached) {
1561                 ttm_dma_unpopulate(ttm_dma, dev->dev);
1562                 return;
1563         }
1564
1565 #if __OS_HAS_AGP
1566         if (drm->agp.stat == ENABLED) {
1567                 ttm_agp_tt_unpopulate(ttm);
1568                 return;
1569         }
1570 #endif
1571
1572 #ifdef CONFIG_SWIOTLB
1573         if (swiotlb_nr_tbl()) {
1574                 ttm_dma_unpopulate((void *)ttm, dev->dev);
1575                 return;
1576         }
1577 #endif
1578
1579         for (i = 0; i < ttm->num_pages; i++) {
1580                 if (ttm_dma->dma_address[i]) {
1581                         dma_unmap_page(pdev, ttm_dma->dma_address[i], PAGE_SIZE,
1582                                        DMA_BIDIRECTIONAL);
1583                 }
1584         }
1585
1586         ttm_pool_unpopulate(ttm);
1587 }
1588
1589 void
1590 nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
1591 {
1592         struct reservation_object *resv = nvbo->bo.resv;
1593
1594         if (exclusive)
1595                 reservation_object_add_excl_fence(resv, &fence->base);
1596         else if (fence)
1597                 reservation_object_add_shared_fence(resv, &fence->base);
1598 }
1599
1600 struct ttm_bo_driver nouveau_bo_driver = {
1601         .ttm_tt_create = &nouveau_ttm_tt_create,
1602         .ttm_tt_populate = &nouveau_ttm_tt_populate,
1603         .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
1604         .invalidate_caches = nouveau_bo_invalidate_caches,
1605         .init_mem_type = nouveau_bo_init_mem_type,
1606         .evict_flags = nouveau_bo_evict_flags,
1607         .move_notify = nouveau_bo_move_ntfy,
1608         .move = nouveau_bo_move,
1609         .verify_access = nouveau_bo_verify_access,
1610         .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1611         .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1612         .io_mem_free = &nouveau_ttm_io_mem_free,
1613 };
1614
1615 struct nvkm_vma *
1616 nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nvkm_vm *vm)
1617 {
1618         struct nvkm_vma *vma;
1619         list_for_each_entry(vma, &nvbo->vma_list, head) {
1620                 if (vma->vm == vm)
1621                         return vma;
1622         }
1623
1624         return NULL;
1625 }
1626
1627 int
1628 nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nvkm_vm *vm,
1629                    struct nvkm_vma *vma)
1630 {
1631         const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1632         int ret;
1633
1634         ret = nvkm_vm_get(vm, size, nvbo->page_shift,
1635                              NV_MEM_ACCESS_RW, vma);
1636         if (ret)
1637                 return ret;
1638
1639         if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
1640             (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
1641              nvbo->page_shift != vma->vm->mmu->lpg_shift))
1642                 nvkm_vm_map(vma, nvbo->bo.mem.mm_node);
1643
1644         list_add_tail(&vma->head, &nvbo->vma_list);
1645         vma->refcount = 1;
1646         return 0;
1647 }
1648
1649 void
1650 nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nvkm_vma *vma)
1651 {
1652         if (vma->node) {
1653                 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
1654                         nvkm_vm_unmap(vma);
1655                 nvkm_vm_put(vma);
1656                 list_del(&vma->head);
1657         }
1658 }