*参照元 [#h34a4b8c]
#backlinks

*説明 [#w9629841]
-パス: [[linux-4.4.1/mm/cma.c]]

-FIXME: これは何?
--説明


**引数 [#j3df9d51]
-phys_addr_t base
--
--[[linux-4.4.1/phys_addr_t]]
-phys_addr_t size
--
-unsigned int order_per_bit
--
-struct cma **res_cma
--
--[[linux-4.4.1/cma]]


**返り値 [#abd139f2]
-int
--


**参考 [#qedf71a6]


*実装 [#p23835ba]
 /**
  * cma_init_reserved_mem() - create custom contiguous area from reserved memory
  * @base: Base address of the reserved area
  * @size: Size of the reserved area (in bytes),
  * @order_per_bit: Order of pages represented by one bit on bitmap.
  * @res_cma: Pointer to store the created cma region.
  *
  * This function creates custom contiguous area from already reserved memory.
  */
 int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
                                  unsigned int order_per_bit,
                                  struct cma **res_cma)
 {
         struct cma *cma;
         phys_addr_t alignment;
 
         /* Sanity checks */
         if (cma_area_count == ARRAY_SIZE(cma_areas)) {
                 pr_err("Not enough slots for CMA reserved regions!\n");
                 return -ENOSPC;
         }
 
-
--[[linux-4.4.1/cma_area_count(global)]]
--[[linux-4.4.1/cma_areas(global)]]
--[[linux-4.4.1/ARRAY_SIZE()]]
--[[linux-4.4.1/pr_err()]]

         if (!size || !memblock_is_region_reserved(base, size))
                 return -EINVAL;
 
-
--[[linux-4.4.1/memblock_is_region_reserved()]]

         /* ensure minimal alignment required by mm core */
         alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
 
-
--[[linux-4.4.1/PAGE_SIZE]]
--[[linux-4.4.1/MAX_ORDER]]
--[[linux-4.4.1/max()]]
--[[linux-4.4.1/pageblock_order(global)]]

         /* alignment should be aligned with order_per_bit */
         if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
                 return -EINVAL;
 
-
--[[linux-4.4.1/IS_ALIGNED()]]
--[[linux-4.4.1/PAGE_SHIFT]]

         if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
                 return -EINVAL;
 
         /*
          * Each reserved area must be initialised later, when more kernel
          * subsystems (like slab allocator) are available.
          */
         cma = &cma_areas[cma_area_count];
         cma->base_pfn = PFN_DOWN(base);
         cma->count = size >> PAGE_SHIFT;
         cma->order_per_bit = order_per_bit;
         *res_cma = cma;
         cma_area_count++;
         totalcma_pages += (size / PAGE_SIZE);
 
-
--[[linux-4.4.1/cma_areas(global)]]
--[[linux-4.4.1/PFN_DOWN()]]
--[[linux-4.4.1/totalcma_pages(global)]]

         return 0;
 }


*コメント [#ef0af007]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS