2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/pci.h>
14 #include <asm/pci_dma.h>
16 static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO;
18 static struct kmem_cache *dma_region_table_cache;
19 static struct kmem_cache *dma_page_table_cache;
21 static unsigned long *dma_alloc_cpu_table(void)
23 unsigned long *table, *entry;
25 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
29 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
30 *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
34 static void dma_free_cpu_table(void *table)
36 kmem_cache_free(dma_region_table_cache, table);
39 static unsigned long *dma_alloc_page_table(void)
41 unsigned long *table, *entry;
43 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
47 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
48 *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
52 static void dma_free_page_table(void *table)
54 kmem_cache_free(dma_page_table_cache, table);
57 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
61 if (reg_entry_isvalid(*entry))
62 sto = get_rt_sto(*entry);
64 sto = dma_alloc_cpu_table();
68 set_rt_sto(entry, sto);
69 validate_rt_entry(entry);
70 entry_clr_protected(entry);
75 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
79 if (reg_entry_isvalid(*entry))
80 pto = get_st_pto(*entry);
82 pto = dma_alloc_page_table();
85 set_st_pto(entry, pto);
86 validate_st_entry(entry);
87 entry_clr_protected(entry);
92 static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
94 unsigned long *sto, *pto;
95 unsigned int rtx, sx, px;
97 rtx = calc_rtx(dma_addr);
98 sto = dma_get_seg_table_origin(&rto[rtx]);
102 sx = calc_sx(dma_addr);
103 pto = dma_get_page_table_origin(&sto[sx]);
107 px = calc_px(dma_addr);
111 static void dma_update_cpu_trans(struct zpci_dev *zdev, void *page_addr,
112 dma_addr_t dma_addr, int flags)
114 unsigned long *entry;
116 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
122 if (flags & ZPCI_PTE_INVALID) {
123 invalidate_pt_entry(entry);
126 set_pt_pfaa(entry, page_addr);
127 validate_pt_entry(entry);
130 if (flags & ZPCI_TABLE_PROTECTED)
131 entry_set_protected(entry);
133 entry_clr_protected(entry);
136 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
137 dma_addr_t dma_addr, size_t size, int flags)
139 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
140 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
141 dma_addr_t start_dma_addr = dma_addr;
142 unsigned long irq_flags;
148 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
149 if (!zdev->dma_table) {
150 dev_err(&zdev->pdev->dev, "Missing DMA table\n");
154 for (i = 0; i < nr_pages; i++) {
155 dma_update_cpu_trans(zdev, page_addr, dma_addr, flags);
156 page_addr += PAGE_SIZE;
157 dma_addr += PAGE_SIZE;
161 * rpcit is not required to establish new translations when previously
162 * invalid translation-table entries are validated, however it is
163 * required when altering previously valid entries.
165 if (!zdev->tlb_refresh &&
166 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
168 * TODO: also need to check that the old entry is indeed INVALID
169 * and not only for one page but for the whole range...
170 * -> now we WARN_ON in that case but with lazy unmap that
171 * needs to be redone!
174 rc = rpcit_instr((u64) zdev->fh << 32, start_dma_addr,
175 nr_pages * PAGE_SIZE);
178 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
182 static void dma_free_seg_table(unsigned long entry)
184 unsigned long *sto = get_rt_sto(entry);
187 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
188 if (reg_entry_isvalid(sto[sx]))
189 dma_free_page_table(get_st_pto(sto[sx]));
191 dma_free_cpu_table(sto);
194 static void dma_cleanup_tables(struct zpci_dev *zdev)
196 unsigned long *table;
199 if (!zdev || !zdev->dma_table)
202 table = zdev->dma_table;
203 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
204 if (reg_entry_isvalid(table[rtx]))
205 dma_free_seg_table(table[rtx]);
207 dma_free_cpu_table(table);
208 zdev->dma_table = NULL;
211 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
214 unsigned long boundary_size = 0x1000000;
216 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
217 start, size, 0, boundary_size, 0);
220 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
222 unsigned long offset, flags;
224 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
225 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
227 offset = __dma_alloc_iommu(zdev, 0, size);
230 zdev->next_bit = offset + size;
231 if (zdev->next_bit >= zdev->iommu_pages)
234 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
238 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
242 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
243 if (!zdev->iommu_bitmap)
245 bitmap_clear(zdev->iommu_bitmap, offset, size);
246 if (offset >= zdev->next_bit)
247 zdev->next_bit = offset + size;
249 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
252 int dma_set_mask(struct device *dev, u64 mask)
254 if (!dev->dma_mask || !dma_supported(dev, mask))
257 *dev->dma_mask = mask;
260 EXPORT_SYMBOL_GPL(dma_set_mask);
262 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
263 unsigned long offset, size_t size,
264 enum dma_data_direction direction,
265 struct dma_attrs *attrs)
267 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
268 unsigned long nr_pages, iommu_page_index;
269 unsigned long pa = page_to_phys(page) + offset;
270 int flags = ZPCI_PTE_VALID;
273 WARN_ON_ONCE(offset > PAGE_SIZE);
275 /* This rounds up number of pages based on size and offset */
276 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
277 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
278 if (iommu_page_index == -1)
281 /* Use rounded up size */
282 size = nr_pages * PAGE_SIZE;
284 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
285 if (dma_addr + size > zdev->end_dma) {
286 dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
287 dma_addr, size, zdev->end_dma);
291 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
292 flags |= ZPCI_TABLE_PROTECTED;
294 if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
295 atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
296 return dma_addr + offset;
300 dma_free_iommu(zdev, iommu_page_index, nr_pages);
302 dev_err(dev, "Failed to map addr: %lx\n", pa);
303 return DMA_ERROR_CODE;
306 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
307 size_t size, enum dma_data_direction direction,
308 struct dma_attrs *attrs)
310 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
311 unsigned long iommu_page_index;
314 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
315 dma_addr = dma_addr & PAGE_MASK;
316 if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
317 ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
318 dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);
320 atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
321 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
322 dma_free_iommu(zdev, iommu_page_index, npages);
325 static void *s390_dma_alloc(struct device *dev, size_t size,
326 dma_addr_t *dma_handle, gfp_t flag,
327 struct dma_attrs *attrs)
329 struct zpci_dev *zdev = get_zdev(container_of(dev, struct pci_dev, dev));
334 size = PAGE_ALIGN(size);
335 page = alloc_pages(flag, get_order(size));
339 atomic64_add(size / PAGE_SIZE, (atomic64_t *) &zdev->fmb->allocated_pages);
340 pa = page_to_phys(page);
341 memset((void *) pa, 0, size);
343 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
344 size, DMA_BIDIRECTIONAL, NULL);
345 if (dma_mapping_error(dev, map)) {
346 free_pages(pa, get_order(size));
355 static void s390_dma_free(struct device *dev, size_t size,
356 void *pa, dma_addr_t dma_handle,
357 struct dma_attrs *attrs)
359 s390_dma_unmap_pages(dev, dma_handle, PAGE_ALIGN(size),
360 DMA_BIDIRECTIONAL, NULL);
361 free_pages((unsigned long) pa, get_order(size));
364 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
365 int nr_elements, enum dma_data_direction dir,
366 struct dma_attrs *attrs)
368 int mapped_elements = 0;
369 struct scatterlist *s;
372 for_each_sg(sg, s, nr_elements, i) {
373 struct page *page = sg_page(s);
374 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
375 s->length, dir, NULL);
376 if (!dma_mapping_error(dev, s->dma_address)) {
377 s->dma_length = s->length;
383 return mapped_elements;
386 for_each_sg(sg, s, mapped_elements, i) {
388 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
397 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
398 int nr_elements, enum dma_data_direction dir,
399 struct dma_attrs *attrs)
401 struct scatterlist *s;
404 for_each_sg(sg, s, nr_elements, i) {
405 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
411 int zpci_dma_init_device(struct zpci_dev *zdev)
413 unsigned int bitmap_order;
416 spin_lock_init(&zdev->iommu_bitmap_lock);
417 spin_lock_init(&zdev->dma_table_lock);
419 zdev->dma_table = dma_alloc_cpu_table();
420 if (!zdev->dma_table) {
425 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
426 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
427 bitmap_order = get_order(zdev->iommu_pages / 8);
428 pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
429 zdev->iommu_size, zdev->iommu_pages, bitmap_order);
431 zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
433 if (!zdev->iommu_bitmap) {
438 rc = zpci_register_ioat(zdev,
440 zdev->start_dma + PAGE_OFFSET,
441 zdev->start_dma + zdev->iommu_size - 1,
442 (u64) zdev->dma_table);
448 dma_free_cpu_table(zdev->dma_table);
453 void zpci_dma_exit_device(struct zpci_dev *zdev)
455 zpci_unregister_ioat(zdev, 0);
456 dma_cleanup_tables(zdev);
457 free_pages((unsigned long) zdev->iommu_bitmap,
458 get_order(zdev->iommu_pages / 8));
459 zdev->iommu_bitmap = NULL;
463 static int __init dma_alloc_cpu_table_caches(void)
465 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
466 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
468 if (!dma_region_table_cache)
471 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
472 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
474 if (!dma_page_table_cache) {
475 kmem_cache_destroy(dma_region_table_cache);
481 int __init zpci_dma_init(void)
483 return dma_alloc_cpu_table_caches();
486 void zpci_dma_exit(void)
488 kmem_cache_destroy(dma_page_table_cache);
489 kmem_cache_destroy(dma_region_table_cache);
492 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
494 static int __init dma_debug_do_init(void)
496 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
499 fs_initcall(dma_debug_do_init);
501 struct dma_map_ops s390_dma_ops = {
502 .alloc = s390_dma_alloc,
503 .free = s390_dma_free,
504 .map_sg = s390_dma_map_sg,
505 .unmap_sg = s390_dma_unmap_sg,
506 .map_page = s390_dma_map_pages,
507 .unmap_page = s390_dma_unmap_pages,
508 /* if we support direct DMA this must be conditional */
510 /* dma_supported is unconditionally true without a callback */
512 EXPORT_SYMBOL_GPL(s390_dma_ops);