2 * Copyright IBM Corp. 2012
5 * Jan Glauber <jang@linux.vnet.ibm.com>
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/export.h>
11 #include <linux/iommu-helper.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/vmalloc.h>
14 #include <linux/pci.h>
15 #include <asm/pci_dma.h>
17 static struct kmem_cache *dma_region_table_cache;
18 static struct kmem_cache *dma_page_table_cache;
19 static int s390_iommu_strict;
21 static int zpci_refresh_global(struct zpci_dev *zdev)
23 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
24 zdev->iommu_pages * PAGE_SIZE);
27 unsigned long *dma_alloc_cpu_table(void)
29 unsigned long *table, *entry;
31 table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
35 for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
36 *entry = ZPCI_TABLE_INVALID;
40 static void dma_free_cpu_table(void *table)
42 kmem_cache_free(dma_region_table_cache, table);
45 static unsigned long *dma_alloc_page_table(void)
47 unsigned long *table, *entry;
49 table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
53 for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
54 *entry = ZPCI_PTE_INVALID;
58 static void dma_free_page_table(void *table)
60 kmem_cache_free(dma_page_table_cache, table);
63 static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
67 if (reg_entry_isvalid(*entry))
68 sto = get_rt_sto(*entry);
70 sto = dma_alloc_cpu_table();
74 set_rt_sto(entry, sto);
75 validate_rt_entry(entry);
76 entry_clr_protected(entry);
81 static unsigned long *dma_get_page_table_origin(unsigned long *entry)
85 if (reg_entry_isvalid(*entry))
86 pto = get_st_pto(*entry);
88 pto = dma_alloc_page_table();
91 set_st_pto(entry, pto);
92 validate_st_entry(entry);
93 entry_clr_protected(entry);
98 unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
100 unsigned long *sto, *pto;
101 unsigned int rtx, sx, px;
103 rtx = calc_rtx(dma_addr);
104 sto = dma_get_seg_table_origin(&rto[rtx]);
108 sx = calc_sx(dma_addr);
109 pto = dma_get_page_table_origin(&sto[sx]);
113 px = calc_px(dma_addr);
117 void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
119 if (flags & ZPCI_PTE_INVALID) {
120 invalidate_pt_entry(entry);
122 set_pt_pfaa(entry, page_addr);
123 validate_pt_entry(entry);
126 if (flags & ZPCI_TABLE_PROTECTED)
127 entry_set_protected(entry);
129 entry_clr_protected(entry);
132 static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
133 dma_addr_t dma_addr, size_t size, int flags)
135 unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
136 u8 *page_addr = (u8 *) (pa & PAGE_MASK);
137 dma_addr_t start_dma_addr = dma_addr;
138 unsigned long irq_flags;
139 unsigned long *entry;
145 spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
146 if (!zdev->dma_table) {
151 for (i = 0; i < nr_pages; i++) {
152 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
157 dma_update_cpu_trans(entry, page_addr, flags);
158 page_addr += PAGE_SIZE;
159 dma_addr += PAGE_SIZE;
163 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
164 * translations when previously invalid translation-table entries are
165 * validated. With lazy unmap, it also is skipped for previously valid
166 * entries, but a global rpcit is then required before any address can
167 * be re-used, i.e. after each iommu bitmap wrap-around.
169 if (!zdev->tlb_refresh &&
170 (!s390_iommu_strict ||
171 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
174 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
175 nr_pages * PAGE_SIZE);
177 if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
178 flags = ZPCI_PTE_INVALID;
180 page_addr -= PAGE_SIZE;
181 dma_addr -= PAGE_SIZE;
182 entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
185 dma_update_cpu_trans(entry, page_addr, flags);
190 spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
194 void dma_free_seg_table(unsigned long entry)
196 unsigned long *sto = get_rt_sto(entry);
199 for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
200 if (reg_entry_isvalid(sto[sx]))
201 dma_free_page_table(get_st_pto(sto[sx]));
203 dma_free_cpu_table(sto);
206 void dma_cleanup_tables(unsigned long *table)
213 for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
214 if (reg_entry_isvalid(table[rtx]))
215 dma_free_seg_table(table[rtx]);
217 dma_free_cpu_table(table);
220 static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
221 unsigned long start, int size)
223 unsigned long boundary_size;
225 boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
226 PAGE_SIZE) >> PAGE_SHIFT;
227 return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
228 start, size, 0, boundary_size, 0);
231 static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
233 unsigned long offset, flags;
236 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
237 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
240 offset = __dma_alloc_iommu(zdev, 0, size);
245 zdev->next_bit = offset + size;
246 if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
247 /* global flush after wrap-around with lazy unmap */
248 zpci_refresh_global(zdev);
250 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
254 static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size)
258 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
259 if (!zdev->iommu_bitmap)
261 bitmap_clear(zdev->iommu_bitmap, offset, size);
263 * Lazy flush for unmap: need to move next_bit to avoid address re-use
266 if (!s390_iommu_strict && offset >= zdev->next_bit)
267 zdev->next_bit = offset + size;
269 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
272 static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
277 } __packed data = {rc, addr};
279 zpci_err_hex(&data, sizeof(data));
282 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
283 unsigned long offset, size_t size,
284 enum dma_data_direction direction,
285 struct dma_attrs *attrs)
287 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
288 unsigned long nr_pages, iommu_page_index;
289 unsigned long pa = page_to_phys(page) + offset;
290 int flags = ZPCI_PTE_VALID;
294 /* This rounds up number of pages based on size and offset */
295 nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
296 iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
297 if (iommu_page_index == -1) {
302 /* Use rounded up size */
303 size = nr_pages * PAGE_SIZE;
305 dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
306 if (dma_addr + size > zdev->end_dma) {
311 if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
312 flags |= ZPCI_TABLE_PROTECTED;
314 ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
318 atomic64_add(nr_pages, &zdev->mapped_pages);
319 return dma_addr + (offset & ~PAGE_MASK);
322 dma_free_iommu(zdev, iommu_page_index, nr_pages);
324 zpci_err("map error:\n");
325 zpci_err_dma(ret, pa);
326 return DMA_ERROR_CODE;
329 static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
330 size_t size, enum dma_data_direction direction,
331 struct dma_attrs *attrs)
333 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
334 unsigned long iommu_page_index;
337 npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
338 dma_addr = dma_addr & PAGE_MASK;
339 ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
342 zpci_err("unmap error:\n");
343 zpci_err_dma(ret, dma_addr);
347 atomic64_add(npages, &zdev->unmapped_pages);
348 iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
349 dma_free_iommu(zdev, iommu_page_index, npages);
352 static void *s390_dma_alloc(struct device *dev, size_t size,
353 dma_addr_t *dma_handle, gfp_t flag,
354 struct dma_attrs *attrs)
356 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
361 size = PAGE_ALIGN(size);
362 page = alloc_pages(flag, get_order(size));
366 pa = page_to_phys(page);
367 memset((void *) pa, 0, size);
369 map = s390_dma_map_pages(dev, page, pa % PAGE_SIZE,
370 size, DMA_BIDIRECTIONAL, NULL);
371 if (dma_mapping_error(dev, map)) {
372 free_pages(pa, get_order(size));
376 atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
382 static void s390_dma_free(struct device *dev, size_t size,
383 void *pa, dma_addr_t dma_handle,
384 struct dma_attrs *attrs)
386 struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
388 size = PAGE_ALIGN(size);
389 atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
390 s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
391 free_pages((unsigned long) pa, get_order(size));
394 static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
395 int nr_elements, enum dma_data_direction dir,
396 struct dma_attrs *attrs)
398 int mapped_elements = 0;
399 struct scatterlist *s;
402 for_each_sg(sg, s, nr_elements, i) {
403 struct page *page = sg_page(s);
404 s->dma_address = s390_dma_map_pages(dev, page, s->offset,
405 s->length, dir, NULL);
406 if (!dma_mapping_error(dev, s->dma_address)) {
407 s->dma_length = s->length;
413 return mapped_elements;
416 for_each_sg(sg, s, mapped_elements, i) {
418 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
427 static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
428 int nr_elements, enum dma_data_direction dir,
429 struct dma_attrs *attrs)
431 struct scatterlist *s;
434 for_each_sg(sg, s, nr_elements, i) {
435 s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
441 int zpci_dma_init_device(struct zpci_dev *zdev)
446 * At this point, if the device is part of an IOMMU domain, this would
447 * be a strong hint towards a bug in the IOMMU API (common) code and/or
448 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
450 WARN_ON(zdev->s390_domain);
452 spin_lock_init(&zdev->iommu_bitmap_lock);
453 spin_lock_init(&zdev->dma_table_lock);
455 zdev->dma_table = dma_alloc_cpu_table();
456 if (!zdev->dma_table) {
461 zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
462 zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
463 zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
464 if (!zdev->iommu_bitmap) {
469 rc = zpci_register_ioat(zdev,
471 zdev->start_dma + PAGE_OFFSET,
472 zdev->start_dma + zdev->iommu_size - 1,
473 (u64) zdev->dma_table);
479 dma_free_cpu_table(zdev->dma_table);
484 void zpci_dma_exit_device(struct zpci_dev *zdev)
487 * At this point, if the device is part of an IOMMU domain, this would
488 * be a strong hint towards a bug in the IOMMU API (common) code and/or
489 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
491 WARN_ON(zdev->s390_domain);
493 zpci_unregister_ioat(zdev, 0);
494 dma_cleanup_tables(zdev->dma_table);
495 zdev->dma_table = NULL;
496 vfree(zdev->iommu_bitmap);
497 zdev->iommu_bitmap = NULL;
501 static int __init dma_alloc_cpu_table_caches(void)
503 dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
504 ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
506 if (!dma_region_table_cache)
509 dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
510 ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
512 if (!dma_page_table_cache) {
513 kmem_cache_destroy(dma_region_table_cache);
519 int __init zpci_dma_init(void)
521 return dma_alloc_cpu_table_caches();
524 void zpci_dma_exit(void)
526 kmem_cache_destroy(dma_page_table_cache);
527 kmem_cache_destroy(dma_region_table_cache);
530 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
532 static int __init dma_debug_do_init(void)
534 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
537 fs_initcall(dma_debug_do_init);
539 struct dma_map_ops s390_dma_ops = {
540 .alloc = s390_dma_alloc,
541 .free = s390_dma_free,
542 .map_sg = s390_dma_map_sg,
543 .unmap_sg = s390_dma_unmap_sg,
544 .map_page = s390_dma_map_pages,
545 .unmap_page = s390_dma_unmap_pages,
546 /* if we support direct DMA this must be conditional */
548 /* dma_supported is unconditionally true without a callback */
550 EXPORT_SYMBOL_GPL(s390_dma_ops);
552 static int __init s390_iommu_setup(char *str)
554 if (!strncmp(str, "strict", 6))
555 s390_iommu_strict = 1;
559 __setup("s390_iommu=", s390_iommu_setup);