Dom0 is not actually capable of issuing outer_inv_range or outer_clean_range calls. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
111 lines
2.5 KiB
C
111 lines
2.5 KiB
C
#include <linux/cpu.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/highmem.h>
|
|
|
|
#include <xen/features.h>
|
|
|
|
|
|
/* functions called by SWIOTLB */
|
|
|
|
static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
|
|
size_t size, enum dma_data_direction dir,
|
|
void (*op)(const void *, size_t, int))
|
|
{
|
|
unsigned long pfn;
|
|
size_t left = size;
|
|
|
|
pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
|
|
offset %= PAGE_SIZE;
|
|
|
|
do {
|
|
size_t len = left;
|
|
void *vaddr;
|
|
|
|
if (!pfn_valid(pfn))
|
|
{
|
|
/* TODO: cache flush */
|
|
} else {
|
|
struct page *page = pfn_to_page(pfn);
|
|
|
|
if (PageHighMem(page)) {
|
|
if (len + offset > PAGE_SIZE)
|
|
len = PAGE_SIZE - offset;
|
|
|
|
if (cache_is_vipt_nonaliasing()) {
|
|
vaddr = kmap_atomic(page);
|
|
op(vaddr + offset, len, dir);
|
|
kunmap_atomic(vaddr);
|
|
} else {
|
|
vaddr = kmap_high_get(page);
|
|
if (vaddr) {
|
|
op(vaddr + offset, len, dir);
|
|
kunmap_high(page);
|
|
}
|
|
}
|
|
} else {
|
|
vaddr = page_address(page) + offset;
|
|
op(vaddr, len, dir);
|
|
}
|
|
}
|
|
|
|
offset = 0;
|
|
pfn++;
|
|
left -= len;
|
|
} while (left);
|
|
}
|
|
|
|
static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
/* Cannot use __dma_page_dev_to_cpu because we don't have a
|
|
* struct page for handle */
|
|
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
|
|
}
|
|
|
|
static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir)
|
|
{
|
|
|
|
dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
|
|
}
|
|
|
|
void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
|
|
size_t size, enum dma_data_direction dir,
|
|
struct dma_attrs *attrs)
|
|
|
|
{
|
|
if (!__generic_dma_ops(hwdev)->unmap_page)
|
|
return;
|
|
if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
|
return;
|
|
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
}
|
|
|
|
void xen_dma_sync_single_for_cpu(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
|
|
return;
|
|
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
|
|
}
|
|
|
|
void xen_dma_sync_single_for_device(struct device *hwdev,
|
|
dma_addr_t handle, size_t size, enum dma_data_direction dir)
|
|
{
|
|
if (!__generic_dma_ops(hwdev)->sync_single_for_device)
|
|
return;
|
|
__xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
|
|
}
|
|
|
|
int __init xen_mm32_init(void)
|
|
{
|
|
if (!xen_initial_domain())
|
|
return 0;
|
|
|
|
return 0;
|
|
}
|
|
arch_initcall(xen_mm32_init);
|