参照元

説明

引数

返り値

参考

実装

/* this function should only be called while dev->lock is held */
static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
                                     struct ion_device *dev,
                                     unsigned long len,
                                     unsigned long align,
                                     unsigned long flags)
{
        struct ion_buffer *buffer;
        struct sg_table *table;
        struct scatterlist *sg;
        int i, ret;
        buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
        if (!buffer)
                return ERR_PTR(-ENOMEM);
        buffer->heap = heap;
        buffer->flags = flags;
        kref_init(&buffer->ref);
        ret = heap->ops->allocate(heap, buffer, len, align, flags);
        if (ret) {
                if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
                        goto err2;

                ion_heap_freelist_drain(heap, 0);
                ret = heap->ops->allocate(heap, buffer, len, align,
                                          flags);
                if (ret)
                        goto err2;
        }
        buffer->dev = dev;
        buffer->size = len;

        table = heap->ops->map_dma(heap, buffer);
        if (WARN_ONCE(table == NULL,
                        "heap->ops->map_dma should return ERR_PTR on error"))
                table = ERR_PTR(-EINVAL);
        if (IS_ERR(table)) {
                ret = -EINVAL;
                goto err1;
        }
        buffer->sg_table = table;
        if (ion_buffer_fault_user_mappings(buffer)) {
                int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
                struct scatterlist *sg;
                int i, j, k = 0;
                buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
                if (!buffer->pages) {
                        ret = -ENOMEM;
                        goto err;
                }
                for_each_sg(table->sgl, sg, table->nents, i) {
                        struct page *page = sg_page(sg);

                        for (j = 0; j < sg->length / PAGE_SIZE; j++)
                                buffer->pages[k++] = page++;
                }
        }

        buffer->dev = dev;
        buffer->size = len;
        INIT_LIST_HEAD(&buffer->vmas);
        mutex_init(&buffer->lock);
        /*
         * this will set up dma addresses for the sglist -- it is not
         * technically correct as per the dma api -- a specific
         * device isn't really taking ownership here.  However, in practice on
         * our systems the only dma_address space is physical addresses.
         * Additionally, we can't afford the overhead of invalidating every
         * allocation via dma_map_sg. The implicit contract here is that
         * memory coming from the heaps is ready for dma, ie if it has a
         * cached mapping that mapping has been invalidated
         */
        for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
                sg_dma_address(sg) = sg_phys(sg);
        mutex_lock(&dev->buffer_lock);
        ion_buffer_add(dev, buffer);
        mutex_unlock(&dev->buffer_lock);
        return buffer;
err:
        heap->ops->unmap_dma(heap, buffer);
err1:
        heap->ops->free(buffer);
err2:
        kfree(buffer);
        return ERR_PTR(ret);
}

コメント


トップ   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS