From: Jianxiong Gao jxgao@google.com
commit: c32a77fd18780a5192dfb6eec69f239faebf28fd
Factor out a helper to find the number of slots for a given size.
Signed-off-by: Christoph Hellwig hch@lst.de Acked-by: Jianxiong Gao jxgao@google.com Tested-by: Jianxiong Gao jxgao@google.com Signed-off-by: Konrad Rzeszutek Wilk konrad.wilk@oracle.com Signed-off-by: Jianxiong Gao jxgao@google.com Signed-off-by: Greg Kroah-Hartman gregkh@linuxfoundation.org --- kernel/dma/swiotlb.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-)
--- a/kernel/dma/swiotlb.c +++ b/kernel/dma/swiotlb.c @@ -194,6 +194,11 @@ static inline unsigned long io_tlb_offse return val & (IO_TLB_SEGSIZE - 1); }
+static inline unsigned long nr_slots(u64 val) +{ + return DIV_ROUND_UP(val, IO_TLB_SIZE); +} + /* * Early SWIOTLB allocation may be too early to allow an architecture to * perform the desired operations. This function allows the architecture to @@ -493,20 +498,20 @@ phys_addr_t swiotlb_tbl_map_single(struc
tbl_dma_addr &= mask;
- offset_slots = ALIGN(tbl_dma_addr, IO_TLB_SIZE) >> IO_TLB_SHIFT; + offset_slots = nr_slots(tbl_dma_addr);
/* * Carefully handle integer overflow which can occur when mask == ~0UL. */ max_slots = mask + 1 - ? ALIGN(mask + 1, IO_TLB_SIZE) >> IO_TLB_SHIFT + ? nr_slots(mask + 1) : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/* * For mappings greater than or equal to a page, we limit the stride * (and hence alignment) to a page size. */ - nslots = ALIGN(alloc_size, IO_TLB_SIZE) >> IO_TLB_SHIFT; + nslots = nr_slots(alloc_size); if (alloc_size >= PAGE_SIZE) stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT)); else @@ -602,7 +607,7 @@ void swiotlb_tbl_unmap_single(struct dev enum dma_data_direction dir, unsigned long attrs) { unsigned long flags; - int i, count, nslots = ALIGN(alloc_size, IO_TLB_SIZE) >> IO_TLB_SHIFT; + int i, count, nslots = nr_slots(alloc_size); int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; phys_addr_t orig_addr = io_tlb_orig_addr[index];