*参照元 [#qcdc1c67]
#backlinks

*説明 [#gd685f2a]
-パス: [[linux-4.4.1/lib/swiotlb.c]]

-FIXME: これは何?
--説明


**引数 [#nfebf719]
-struct device *hwdev
--
--[[linux-4.4.1/device]]
-dma_addr_t tbl_dma_addr
--
--[[linux-4.4.1/dma_addr_t]]
-phys_addr_t orig_addr
--
--[[linux-4.4.1/phys_addr_t]]
-size_t size
--
-enum dma_data_direction dir
--
--[[linux-4.4.1/dma_data_direction]]


**返り値 [#fb9cc93e]
-phys_addr_t
--
--[[linux-4.4.1/phys_addr_t]]


**参考 [#z2b56b9b]


*実装 [#j3779f06]
 phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
                                    dma_addr_t tbl_dma_addr,
                                    phys_addr_t orig_addr, size_t size,
                                    enum dma_data_direction dir)
 {
         unsigned long flags;
         phys_addr_t tlb_addr;
         unsigned int nslots, stride, index, wrap;
         int i;
         unsigned long mask;
         unsigned long offset_slots;
         unsigned long max_slots;
 
-
--[[linux-4.4.1/phys_addr_t]]

         if (no_iotlb_memory)
                 panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
 
-
--[[linux-4.4.1/no_iotlb_memory(global)]]
--[[linux-4.4.1/panic()]]

         mask = dma_get_seg_boundary(hwdev);
 
-
--[[linux-4.4.1/dma_get_seg_boundary()]]

         tbl_dma_addr &= mask;
 
         offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
 
-
--[[linux-4.4.1/ALIGN()]]

         /*
          * Carefully handle integer overflow which can occur when mask == ~0UL.
          */
         max_slots = mask + 1
                     ? ALIGN(mask + 1, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT
                     : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
 
         /*
          * For mappings greater than a page, we limit the stride (and
          * hence alignment) to a page size.
          */
         nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
         if (size > PAGE_SIZE)
                 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
         else
                 stride = 1;
 
         BUG_ON(!nslots);
 
-
--[[linux-4.4.1/BUG_ON()]]

         /*
          * Find suitable number of IO TLB entries size that will fit this
          * request and allocate a buffer from that IO TLB pool.
          */
         spin_lock_irqsave(&io_tlb_lock, flags);
         index = ALIGN(io_tlb_index, stride);
         if (index >= io_tlb_nslabs)
                 index = 0;
         wrap = index;
 
-
--[[linux-4.4.1/spin_lock_irqsave()]]
--[[linux-4.4.1/io_tlb_lock(global)]]
--[[linux-4.4.1/io_tlb_index(global)]]
--[[linux-4.4.1/io_tlb_nslabs(global)]]

         do {
                 while (iommu_is_span_boundary(index, nslots, offset_slots,
                                               max_slots)) {
                         index += stride;
                         if (index >= io_tlb_nslabs)
                                 index = 0;
                         if (index == wrap)
                                 goto not_found;
                 }
 
-
--[[linux-4.4.1/iommu_is_span_boundary()]]

                 /*
                  * If we find a slot that indicates we have 'nslots' number of
                  * contiguous buffers, we allocate the buffers from that slot
                  * and mark the entries as '0' indicating unavailable.
                  */
                 if (io_tlb_list[index] >= nslots) {
                         int count = 0;
 
                         for (i = index; i < (int) (index + nslots); i++)
                                 io_tlb_list[i] = 0;
                         for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
                                 io_tlb_list[i] = ++count;
                         tlb_addr = io_tlb_start + (index << IO_TLB_SHIFT);
 
-
--[[linux-4.4.1/OFFSET()]]

                         /*
                          * Update the indices to avoid searching in the next
                          * round.
                          */
                         io_tlb_index = ((index + nslots) < io_tlb_nslabs
                                         ? (index + nslots) : 0);
 
                         goto found;
                 }
                 index += stride;
                 if (index >= io_tlb_nslabs)
                         index = 0;
         } while (index != wrap);
 
 not_found:
         spin_unlock_irqrestore(&io_tlb_lock, flags);
         if (printk_ratelimit())
                 dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes)\n", size);
-
--[[linux-4.4.1/spin_unlock_irq_restore()]]
--[[linux-4.4.1/printk_ratelimit()]]
--[[linux-4.4.1/dev_warn()]]

         return SWIOTLB_MAP_ERROR;
 found:
         spin_unlock_irqrestore(&io_tlb_lock, flags);
 
          /*
          * Save away the mapping from the original address to the DMA address.
          * This is needed when we sync the memory.  Then we sync the buffer if
          * needed.
          */
         for (i = 0; i < nslots; i++)
                 io_tlb_orig_addr[index+i] = orig_addr + (i << IO_TLB_SHIFT);
         if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
                 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_TO_DEVICE);
 
-
--[[linux-4.4.1/swiotlb_bounce()]]

         return tlb_addr;
 }
 EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
-
--[[linux-4.4.1/EXPORT_SYMBOL_GPL()]]


*コメント [#ua881b49]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS