- 追加された行はこの色です。
- 削除された行はこの色です。
*参照元 [#w9505546]
#backlinks
*説明 [#k5f4c15e]
-パス: [[linux-4.4.1/drivers/staging/android/ion/ion.c]]
-FIXME: これは何?
--説明
**引数 [#ecbf3c1a]
-struct ion_heap *heap
-
--[[linux-4.4.1/ion_heap]]
-struct ion_device *dev
-
--[[linux-4.4.1/ion_device]]
-unsigned long len
--
-unsigned long align
--
-unsigned long flags
--
**返り値 [#x32d0f70]
-struct ion_buffer *
--
--[[linux-4.4.1/ion_buffer]]
**参考 [#za01f3b9]
*実装 [#ha64a526]
/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct ion_device *dev,
unsigned long len,
unsigned long align,
unsigned long flags)
{
struct ion_buffer *buffer;
struct sg_table *table;
struct scatterlist *sg;
int i, ret;
-
--[[linux-4.4.1/ion_buffer]]
--[[linux-4.4.1/sg_table]]
--[[linux-4.4.1/scatterlist]]
buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
-
--[[linux-4.4.1/kzalloc()]]
--[[linux-4.4.1/GFP_KERNEL]]
--[[linux-4.4.1/ERR_PTR()]]
buffer->heap = heap;
buffer->flags = flags;
kref_init(&buffer->ref);
-
--[[linux-4.4.1/kref_init()]]
ret = heap->ops->allocate(heap, buffer, len, align, flags);
-
-heap->ops は 型
--[[linux-4.4.1/]]
if (ret) {
if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
goto err2;
ion_heap_freelist_drain(heap, 0);
ret = heap->ops->allocate(heap, buffer, len, align,
flags);
if (ret)
goto err2;
}
-
--[[linux-4.4.1/ion_heap_freelist_drain()]]
buffer->dev = dev;
buffer->size = len;
table = heap->ops->map_dma(heap, buffer);
if (WARN_ONCE(table == NULL,
"heap->ops->map_dma should return ERR_PTR on error"))
table = ERR_PTR(-EINVAL);
if (IS_ERR(table)) {
ret = -EINVAL;
goto err1;
}
-
--[[linux-4.4.1/WARN_ONCE()]]
--[[linux-4.4.1/IS_ERR()]]
buffer->sg_table = table;
if (ion_buffer_fault_user_mappings(buffer)) {
-
--[[linux-4.4.1/ion_buffer_fault_user_mappings()]]
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
struct scatterlist *sg;
int i, j, k = 0;
-
--[[linux-4.4.1/PAGE_ALIGN()]]
--[[linux-4.4.1/PAGE_SIZE]]
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
if (!buffer->pages) {
ret = -ENOMEM;
goto err;
}
-
--[[linux-4.4.1/page]]
--[[linux-4.4.1/vmalloc()]]
for_each_sg(table->sgl, sg, table->nents, i) {
struct page *page = sg_page(sg);
for (j = 0; j < sg->length / PAGE_SIZE; j++)
buffer->pages[k++] = page++;
}
-
--[[linux-4.4.1/for_each_sg()]]
--[[linux-4.4.1/sg_page()]]
}
buffer->dev = dev;
buffer->size = len;
INIT_LIST_HEAD(&buffer->vmas);
mutex_init(&buffer->lock);
-
--[[linux-4.4.1/INIT_LIST_HEAD()]]
--[[linux-4.4.1/mutex_init()]]
/*
* this will set up dma addresses for the sglist -- it is not
* technically correct as per the dma api -- a specific
* device isn't really taking ownership here. However, in practice on
* our systems the only dma_address space is physical addresses.
* Additionally, we can't afford the overhead of invalidating every
* allocation via dma_map_sg. The implicit contract here is that
* memory coming from the heaps is ready for dma, ie if it has a
* cached mapping that mapping has been invalidated
*/
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
sg_dma_address(sg) = sg_phys(sg);
-
--[[linux-4.4.1/sg_dma_address()]]
--[[linux-4.4.1/sg_phys()]]
mutex_lock(&dev->buffer_lock);
ion_buffer_add(dev, buffer);
mutex_unlock(&dev->buffer_lock);
return buffer;
-
--[[linux-4.4.1/mutex_lock()]]
--[[linux-4.4.1/ion_buffer_add()]]
--[[linux-4.4.1/mutex_unlock()]]
err:
heap->ops->unmap_dma(heap, buffer);
err1:
heap->ops->free(buffer);
err2:
kfree(buffer);
return ERR_PTR(ret);
-
--[[linux-4.4.1/kfree()]]
}
*コメント [#nc2855c6]