linux-4.4.1/swiotlb_tbl_map_single()
をテンプレートにして作成
[
トップ
] [
新規
|
一覧
|
検索
|
最終更新
|
ヘルプ
|
ログイン
]
開始行:
*参照元 [#qcdc1c67]
#backlinks
*説明 [#gd685f2a]
-パス: [[linux-4.4.1/lib/swiotlb.c]]
-FIXME: これは何?
--説明
**引数 [#nfebf719]
-struct device *hwdev
--
--[[linux-4.4.1/device]]
-dma_addr_t tbl_dma_addr
--
--[[linux-4.4.1/dma_addr_t]]
-phys_addr_t orig_addr
--
--[[linux-4.4.1/phys_addr_t]]
-size_t size
--
-enum dma_data_direction dir
--
--[[linux-4.4.1/dma_data_direction]]
**返り値 [#fb9cc93e]
-phys_addr_t
--
--[[linux-4.4.1/phys_addr_t]]
**参考 [#z2b56b9b]
*実装 [#j3779f06]
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_ad...
phys_addr_t orig_addr...
enum dma_data_directi...
{
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
-
--[[linux-4.4.1/phys_addr_t]]
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer e...
-
--[[linux-4.4.1/no_iotlb_memory(global)]]
--[[linux-4.4.1/panic()]]
mask = dma_get_seg_boundary(hwdev);
-
--[[linux-4.4.1/dma_get_seg_boundary()]]
tbl_dma_addr &= mask;
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_S...
-
--[[linux-4.4.1/ALIGN()]]
/*
* Carefully handle integer overflow which can o...
*/
max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT)...
: 1UL << (BITS_PER_LONG - IO_TLB_SHI...
/*
* For mappings greater than a page, we limit th...
* hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TL...
if (size > PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIF...
else
stride = 1;
BUG_ON(!nslots);
-
--[[linux-4.4.1/BUG_ON()]]
/*
* Find suitable number of IO TLB entries size t...
* request and allocate a buffer from that IO TL...
*/
spin_lock_irqsave(&io_tlb_lock, flags);
index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
index = 0;
wrap = index;
-
--[[linux-4.4.1/spin_lock_irqsave()]]
--[[linux-4.4.1/io_tlb_lock(global)]]
--[[linux-4.4.1/io_tlb_index(global)]]
--[[linux-4.4.1/io_tlb_nslabs(global)]]
do {
while (iommu_is_span_boundary(index, nsl...
max_slots)...
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
if (index == wrap)
goto not_found;
}
-
--[[linux-4.4.1/iommu_is_span_boundary()]]
/*
* If we find a slot that indicates we h...
* contiguous buffers, we allocate the b...
* and mark the entries as '0' indicatin...
*/
if (io_tlb_list[index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index...
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, I...
io_tlb_list[i] = ++count;
tlb_addr = io_tlb_start + (index...
-
--[[linux-4.4.1/OFFSET()]]
/*
* Update the indices to avoid s...
* round.
*/
io_tlb_index = ((index + nslots)...
? (index + nslot...
goto found;
}
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
} while (index != wrap);
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
if (printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full ...
-
--[[linux-4.4.1/spin_unlock_irq_restore()]]
--[[linux-4.4.1/printk_ratelimit()]]
--[[linux-4.4.1/dev_warn()]]
return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
* Save away the mapping from the original addre...
* This is needed when we sync the memory. Then...
* needed.
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + ...
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTI...
swiotlb_bounce(orig_addr, tlb_addr, size...
-
--[[linux-4.4.1/swiotlb_bounce()]]
return tlb_addr;
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
-
--[[linux-4.4.1/EXPORT_SYMBOL_GPL()]]
*コメント [#ua881b49]
終了行:
*参照元 [#qcdc1c67]
#backlinks
*説明 [#gd685f2a]
-パス: [[linux-4.4.1/lib/swiotlb.c]]
-FIXME: これは何?
--説明
**引数 [#nfebf719]
-struct device *hwdev
--
--[[linux-4.4.1/device]]
-dma_addr_t tbl_dma_addr
--
--[[linux-4.4.1/dma_addr_t]]
-phys_addr_t orig_addr
--
--[[linux-4.4.1/phys_addr_t]]
-size_t size
--
-enum dma_data_direction dir
--
--[[linux-4.4.1/dma_data_direction]]
**返り値 [#fb9cc93e]
-phys_addr_t
--
--[[linux-4.4.1/phys_addr_t]]
**参考 [#z2b56b9b]
*実装 [#j3779f06]
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_ad...
phys_addr_t orig_addr...
enum dma_data_directi...
{
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
int i;
unsigned long mask;
unsigned long offset_slots;
unsigned long max_slots;
-
--[[linux-4.4.1/phys_addr_t]]
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer e...
-
--[[linux-4.4.1/no_iotlb_memory(global)]]
--[[linux-4.4.1/panic()]]
mask = dma_get_seg_boundary(hwdev);
-
--[[linux-4.4.1/dma_get_seg_boundary()]]
tbl_dma_addr &= mask;
offset_slots = ALIGN(tbl_dma_addr, 1 << IO_TLB_S...
-
--[[linux-4.4.1/ALIGN()]]
/*
* Carefully handle integer overflow which can o...
*/
max_slots = mask + 1
? ALIGN(mask + 1, 1 << IO_TLB_SHIFT)...
: 1UL << (BITS_PER_LONG - IO_TLB_SHI...
/*
* For mappings greater than a page, we limit th...
* hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TL...
if (size > PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIF...
else
stride = 1;
BUG_ON(!nslots);
-
--[[linux-4.4.1/BUG_ON()]]
/*
* Find suitable number of IO TLB entries size t...
* request and allocate a buffer from that IO TL...
*/
spin_lock_irqsave(&io_tlb_lock, flags);
index = ALIGN(io_tlb_index, stride);
if (index >= io_tlb_nslabs)
index = 0;
wrap = index;
-
--[[linux-4.4.1/spin_lock_irqsave()]]
--[[linux-4.4.1/io_tlb_lock(global)]]
--[[linux-4.4.1/io_tlb_index(global)]]
--[[linux-4.4.1/io_tlb_nslabs(global)]]
do {
while (iommu_is_span_boundary(index, nsl...
max_slots)...
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
if (index == wrap)
goto not_found;
}
-
--[[linux-4.4.1/iommu_is_span_boundary()]]
/*
* If we find a slot that indicates we h...
* contiguous buffers, we allocate the b...
* and mark the entries as '0' indicatin...
*/
if (io_tlb_list[index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index...
io_tlb_list[i] = 0;
for (i = index - 1; (OFFSET(i, I...
io_tlb_list[i] = ++count;
tlb_addr = io_tlb_start + (index...
-
--[[linux-4.4.1/OFFSET()]]
/*
* Update the indices to avoid s...
* round.
*/
io_tlb_index = ((index + nslots)...
? (index + nslot...
goto found;
}
index += stride;
if (index >= io_tlb_nslabs)
index = 0;
} while (index != wrap);
not_found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
if (printk_ratelimit())
dev_warn(hwdev, "swiotlb buffer is full ...
-
--[[linux-4.4.1/spin_unlock_irq_restore()]]
--[[linux-4.4.1/printk_ratelimit()]]
--[[linux-4.4.1/dev_warn()]]
return SWIOTLB_MAP_ERROR;
found:
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
* Save away the mapping from the original addre...
* This is needed when we sync the memory. Then...
* needed.
*/
for (i = 0; i < nslots; i++)
io_tlb_orig_addr[index+i] = orig_addr + ...
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTI...
swiotlb_bounce(orig_addr, tlb_addr, size...
-
--[[linux-4.4.1/swiotlb_bounce()]]
return tlb_addr;
}
EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
-
--[[linux-4.4.1/EXPORT_SYMBOL_GPL()]]
*コメント [#ua881b49]
ページ名: