参照元

説明

引数

返り値

参考

実装

phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
                                   dma_addr_t tbl_dma_addr,
                                   phys_addr_t orig_addr, size_t size,
                                   enum dma_data_direction dir)
{
        unsigned long flags;
        phys_addr_t tlb_addr;
        unsigned int nslots, stride, index, wrap;
        int i;
        unsigned long mask;
        unsigned long offset_slots;
        unsigned long max_slots;
        if (no_iotlb_memory)
                panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
        mask = dma_get_seg_boundary(hwdev);
        tbl_dma_addr &= mask;

        offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
        /*
         * Carefully handle integer overflow which can occur when mask == ~0UL.
         */
        max_slots = mask + 1
                    ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
                    : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);

        /*
         * For mappings greater than a page, we limit the stride (and
         * hence alignment) to a page size.
         */
        nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
        if (size > PAGE_SIZE)
                stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
        else
                stride = 1;

        BUG_ON(!nslots);
        /*
         * Find suitable number of IO TLB entries size that will fit this
         * request and allocate a buffer from that IO TLB pool.
         */
        spin_lock_irqsave(&io_tlb_lock, flags);
        index = ALIGN(io_tlb_index, stride);
        if (index >= io_tlb_nslabs)
                index = 0;
        wrap = index;
        do {
                while (iommu_is_span_boundary(index, nslots, offset_slots,
                                              max_slots)) {
                        index += stride;
                        if (index >= io_tlb_nslabs)
                                index = 0;
                        if (index == wrap)
                                goto not_found;
                }
                /*
                 * If we find a slot that indicates we have 'nslots' number of
                 * contiguous buffers, we allocate the buffers from that slot
                 * and mark the entries as '0' indicating unavailable.
                 */
                if (io_tlb_list[index] >= nslots) {
                        int count = 0;

                        for (i = index; i < (int) (index + nslots); i++)
                                io_tlb_list[i] = 0;
                        for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
                                io_tlb_list[i] = ++count;
                        tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
                        /*
                         * Update the indices to avoid searching in the next
                         * round.
                         */
                        io_tlb_index = ((index + nslots) < io_tlb_nslabs
                                        ? (index + nslots) : 0);

                        goto found;
                }
                index += stride;
                if (index >= io_tlb_nslabs)
                        index = 0;
        } while (index != wrap);

not_found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);
        if (printk_ratelimit())
                dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
        return SWIOTLB_MAP_ERROR;
found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);

         /*
         * Save away the mapping from the original address to the DMA address.
         * This is needed when we sync the memory.  Then we sync the buffer if
         * needed.
         */
        for (i = 0; i < nslots; i++)
                io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
        if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
                swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
        return tlb_addr;
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);

コメント


トップ   編集 凍結 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS
Last-modified: 2016-08-03 (水) 18:54:47