3 * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
5 * This code provides a IOMMU for Xen PV guests with PCI passthrough.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License v2.0 as published by
9 * the Free Software Foundation
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * PV guests under Xen are running in an non-contiguous memory architecture.
18 * When PCI pass-through is utilized, this necessitates an IOMMU for
19 * translating bus (DMA) to virtual and vice-versa and also providing a
20 * mechanism to have contiguous pages for device drivers operations (say DMA
23 * Specifically, under Xen the Linux idea of pages is an illusion. It
24 * assumes that pages start at zero and go up to the available memory. To
25 * help with that, the Linux Xen MMU provides a lookup mechanism to
26 * translate the page frame numbers (PFN) to machine frame numbers (MFN)
27 * and vice-versa. The MFN are the "real" frame numbers. Furthermore
28 * memory is not contiguous. Xen hypervisor stitches memory for guests
29 * from different pools, which means there is no guarantee that PFN==MFN
30 * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are
31 * allocated in descending order (high to low), meaning the guest might
32 * never get any MFN's under the 4GB mark.
36 #include <linux/bootmem.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/export.h>
39 #include <xen/swiotlb-xen.h>
41 #include <xen/xen-ops.h>
42 #include <xen/hvc-console.h>
44 * Used to do a quick range check in swiotlb_tbl_unmap_single and
45 * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this
49 static char *xen_io_tlb_start, *xen_io_tlb_end;
50 static unsigned long xen_io_tlb_nslabs;
52 * Quick lookup value of the bus address of the IOTLB.
57 static dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
59 return phys_to_machine(XPADDR(paddr)).maddr;
62 static phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
64 return machine_to_phys(XMADDR(baddr)).paddr;
67 static dma_addr_t xen_virt_to_bus(void *address)
69 return xen_phys_to_bus(virt_to_phys(address));
72 static int check_pages_physically_contiguous(unsigned long pfn,
76 unsigned long next_mfn;
80 next_mfn = pfn_to_mfn(pfn);
81 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
83 for (i = 1; i < nr_pages; i++) {
84 if (pfn_to_mfn(++pfn) != ++next_mfn)
90 static int range_straddles_page_boundary(phys_addr_t p, size_t size)
92 unsigned long pfn = PFN_DOWN(p);
93 unsigned int offset = p & ~PAGE_MASK;
95 if (offset + size <= PAGE_SIZE)
97 if (check_pages_physically_contiguous(pfn, offset, size))
102 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
104 unsigned long mfn = PFN_DOWN(dma_addr);
105 unsigned long pfn = mfn_to_local_pfn(mfn);
108 /* If the address is outside our domain, it CAN
109 * have the same virtual address as another address
110 * in our domain. Therefore _only_ check address within our domain.
112 if (pfn_valid(pfn)) {
113 paddr = PFN_PHYS(pfn);
114 return paddr >= virt_to_phys(xen_io_tlb_start) &&
115 paddr < virt_to_phys(xen_io_tlb_end);
120 static int max_dma_bits = 32;
123 xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs)
128 dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
132 int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE);
135 rc = xen_create_contiguous_region(
136 (unsigned long)buf + (i << IO_TLB_SHIFT),
137 get_order(slabs << IO_TLB_SHIFT),
139 } while (rc && dma_bits++ < max_dma_bits);
144 } while (i < nslabs);
147 static unsigned long xen_set_nslabs(unsigned long nr_tbl)
150 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
151 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
153 xen_io_tlb_nslabs = nr_tbl;
155 return xen_io_tlb_nslabs << IO_TLB_SHIFT;
158 enum xen_swiotlb_err {
159 XEN_SWIOTLB_UNKNOWN = 0,
164 static const char *xen_swiotlb_error(enum xen_swiotlb_err err)
167 case XEN_SWIOTLB_ENOMEM:
168 return "Cannot allocate Xen-SWIOTLB buffer\n";
169 case XEN_SWIOTLB_EFIXUP:
170 return "Failed to get contiguous memory for DMA from Xen!\n"\
171 "You either: don't have the permissions, do not have"\
172 " enough free memory under 4GB, or the hypervisor memory"\
173 " is too fragmented!";
179 void __init xen_swiotlb_init(int verbose)
183 enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
184 unsigned int repeat = 3;
186 xen_io_tlb_nslabs = swiotlb_nr_tbl();
188 bytes = xen_set_nslabs(xen_io_tlb_nslabs);
190 * Get IO TLB memory from any location.
192 xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
193 if (!xen_io_tlb_start) {
194 m_ret = XEN_SWIOTLB_ENOMEM;
197 xen_io_tlb_end = xen_io_tlb_start + bytes;
199 * And replace that memory with pages under 4GB.
201 rc = xen_swiotlb_fixup(xen_io_tlb_start,
205 free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
206 m_ret = XEN_SWIOTLB_EFIXUP;
209 start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
210 swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose);
215 xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */
216 (xen_io_tlb_nslabs >> 1));
217 printk(KERN_INFO "Xen-SWIOTLB: Lowering to %luMB\n",
218 (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20);
221 xen_raw_printk("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
222 panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc);
226 xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
227 dma_addr_t *dma_handle, gfp_t flags,
228 struct dma_attrs *attrs)
231 int order = get_order(size);
232 u64 dma_mask = DMA_BIT_MASK(32);
233 unsigned long vstart;
238 * Ignore region specifiers - the kernel's ideas of
239 * pseudo-phys memory layout has nothing to do with the
240 * machine physical layout. We can't allocate highmem
241 * because we can't return a pointer to it.
243 flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
245 if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
248 vstart = __get_free_pages(flags, order);
249 ret = (void *)vstart;
254 if (hwdev && hwdev->coherent_dma_mask)
255 dma_mask = hwdev->coherent_dma_mask;
257 phys = virt_to_phys(ret);
258 dev_addr = xen_phys_to_bus(phys);
259 if (((dev_addr + size - 1 <= dma_mask)) &&
260 !range_straddles_page_boundary(phys, size))
261 *dma_handle = dev_addr;
263 if (xen_create_contiguous_region(vstart, order,
264 fls64(dma_mask)) != 0) {
265 free_pages(vstart, order);
268 *dma_handle = virt_to_machine(ret).maddr;
270 memset(ret, 0, size);
273 EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
276 xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
277 dma_addr_t dev_addr, struct dma_attrs *attrs)
279 int order = get_order(size);
281 u64 dma_mask = DMA_BIT_MASK(32);
283 if (dma_release_from_coherent(hwdev, order, vaddr))
286 if (hwdev && hwdev->coherent_dma_mask)
287 dma_mask = hwdev->coherent_dma_mask;
289 phys = virt_to_phys(vaddr);
291 if (((dev_addr + size - 1 > dma_mask)) ||
292 range_straddles_page_boundary(phys, size))
293 xen_destroy_contiguous_region((unsigned long)vaddr, order);
295 free_pages((unsigned long)vaddr, order);
297 EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
301 * Map a single buffer of the indicated size for DMA in streaming mode. The
302 * physical address to use is returned.
304 * Once the device is given the dma address, the device owns this memory until
305 * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed.
307 dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
308 unsigned long offset, size_t size,
309 enum dma_data_direction dir,
310 struct dma_attrs *attrs)
312 phys_addr_t phys = page_to_phys(page) + offset;
313 dma_addr_t dev_addr = xen_phys_to_bus(phys);
316 BUG_ON(dir == DMA_NONE);
318 * If the address happens to be in the device's DMA window,
319 * we can safely return the device addr and not worry about bounce
322 if (dma_capable(dev, dev_addr, size) &&
323 !range_straddles_page_boundary(phys, size) && !swiotlb_force)
327 * Oh well, have to allocate and map a bounce buffer.
329 map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir);
331 return DMA_ERROR_CODE;
333 dev_addr = xen_virt_to_bus(map);
336 * Ensure that the address returned is DMA'ble
338 if (!dma_capable(dev, dev_addr, size)) {
339 swiotlb_tbl_unmap_single(dev, map, size, dir);
344 EXPORT_SYMBOL_GPL(xen_swiotlb_map_page);
347 * Unmap a single streaming mode DMA translation. The dma_addr and size must
348 * match what was provided for in a previous xen_swiotlb_map_page call. All
349 * other usages are undefined.
351 * After this call, reads by the cpu to the buffer are guaranteed to see
352 * whatever the device wrote there.
354 static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
355 size_t size, enum dma_data_direction dir)
357 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
359 BUG_ON(dir == DMA_NONE);
361 /* NOTE: We use dev_addr here, not paddr! */
362 if (is_xen_swiotlb_buffer(dev_addr)) {
363 swiotlb_tbl_unmap_single(hwdev, phys_to_virt(paddr), size, dir);
367 if (dir != DMA_FROM_DEVICE)
371 * phys_to_virt doesn't work with hihgmem page but we could
372 * call dma_mark_clean() with hihgmem page here. However, we
373 * are fine since dma_mark_clean() is null on POWERPC. We can
374 * make dma_mark_clean() take a physical address if necessary.
376 dma_mark_clean(phys_to_virt(paddr), size);
379 void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
380 size_t size, enum dma_data_direction dir,
381 struct dma_attrs *attrs)
383 xen_unmap_single(hwdev, dev_addr, size, dir);
385 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page);
388 * Make physical memory consistent for a single streaming mode DMA translation
391 * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer
392 * using the cpu, yet do not wish to teardown the dma mapping, you must
393 * call this function before doing so. At the next point you give the dma
394 * address back to the card, you must first perform a
395 * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer
398 xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
399 size_t size, enum dma_data_direction dir,
400 enum dma_sync_target target)
402 phys_addr_t paddr = xen_bus_to_phys(dev_addr);
404 BUG_ON(dir == DMA_NONE);
406 /* NOTE: We use dev_addr here, not paddr! */
407 if (is_xen_swiotlb_buffer(dev_addr)) {
408 swiotlb_tbl_sync_single(hwdev, phys_to_virt(paddr), size, dir,
413 if (dir != DMA_FROM_DEVICE)
416 dma_mark_clean(phys_to_virt(paddr), size);
420 xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
421 size_t size, enum dma_data_direction dir)
423 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU);
425 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu);
428 xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
429 size_t size, enum dma_data_direction dir)
431 xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE);
433 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device);
436 * Map a set of buffers described by scatterlist in streaming mode for DMA.
437 * This is the scatter-gather version of the above xen_swiotlb_map_page
438 * interface. Here the scatter gather list elements are each tagged with the
439 * appropriate dma address and length. They are obtained via
440 * sg_dma_{address,length}(SG).
442 * NOTE: An implementation may be able to use a smaller number of
443 * DMA address/length pairs than there are SG table elements.
444 * (for example via virtual mapping capabilities)
445 * The routine returns the number of addr/length pairs actually
446 * used, at most nents.
448 * Device ownership issues as mentioned above for xen_swiotlb_map_page are the
452 xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
453 int nelems, enum dma_data_direction dir,
454 struct dma_attrs *attrs)
456 struct scatterlist *sg;
459 BUG_ON(dir == DMA_NONE);
461 for_each_sg(sgl, sg, nelems, i) {
462 phys_addr_t paddr = sg_phys(sg);
463 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
466 !dma_capable(hwdev, dev_addr, sg->length) ||
467 range_straddles_page_boundary(paddr, sg->length)) {
468 void *map = swiotlb_tbl_map_single(hwdev,
473 /* Don't panic here, we expect map_sg users
474 to do proper error handling. */
475 xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
477 sgl[0].dma_length = 0;
478 return DMA_ERROR_CODE;
480 sg->dma_address = xen_virt_to_bus(map);
482 sg->dma_address = dev_addr;
483 sg->dma_length = sg->length;
487 EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs);
490 xen_swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
491 enum dma_data_direction dir)
493 return xen_swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
495 EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg);
498 * Unmap a set of streaming mode DMA translations. Again, cpu read rules
499 * concerning calls here are the same as for swiotlb_unmap_page() above.
502 xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
503 int nelems, enum dma_data_direction dir,
504 struct dma_attrs *attrs)
506 struct scatterlist *sg;
509 BUG_ON(dir == DMA_NONE);
511 for_each_sg(sgl, sg, nelems, i)
512 xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
515 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
518 xen_swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
519 enum dma_data_direction dir)
521 return xen_swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
523 EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg);
526 * Make physical memory consistent for a set of streaming mode DMA translations
529 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
533 xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
534 int nelems, enum dma_data_direction dir,
535 enum dma_sync_target target)
537 struct scatterlist *sg;
540 for_each_sg(sgl, sg, nelems, i)
541 xen_swiotlb_sync_single(hwdev, sg->dma_address,
542 sg->dma_length, dir, target);
546 xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
547 int nelems, enum dma_data_direction dir)
549 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU);
551 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu);
554 xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
555 int nelems, enum dma_data_direction dir)
557 xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
559 EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device);
562 xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
566 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error);
569 * Return whether the given device DMA address mask can be supported
570 * properly. For example, if your device can only drive the low 24-bits
571 * during bus mastering, then you would pass 0x00ffffff as the mask to
575 xen_swiotlb_dma_supported(struct device *hwdev, u64 mask)
577 return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask;
579 EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported);