unmap
int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { unsigned long orig_iova = iova; unsigned int min_pagesz; size_t orig_size = size; int ret = 0; if (unlikely(domain->ops->map == NULL || domain->ops->pgsize_bitmap == 0UL)) return -ENODEV; if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) return -EINVAL;
/* find out the minimum page size supported */ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
/* * both the virtual address and the physical one, as well as * the size of the mapping, must be aligned (at least) to the * size of the smallest page supported by the hardware */ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) { pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n", iova, &paddr, size, min_pagesz); return -EINVAL; }
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) { size_t pgsize = iommu_pgsize(domain, iova | paddr, size); pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n", iova, &paddr, pgsize);
ret = domain->ops->map(domain, iova, paddr, pgsize, prot); if (ret) break;
iova += pgsize; paddr += pgsize; size -= pgsize; } /* unroll mapping in case something went wrong */ if (ret) iommu_unmap(domain, orig_iova, orig_size - size); else trace_map(orig_iova, paddr, orig_size);
return ret; } EXPORT_SYMBOL_GPL(iommu_map);
static inline int iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, int gfp_order, int prot) { return -ENODEV; }