*参照元 [#fa1a18a0]
#backlinks

*説明 [#ef6a6611]
-パス: [[linux-4.4.1/mm/cma.c]]

-FIXME: これは何?
--説明


**引数 [#ifd6864d]
-phys_addr_t base
--
--[[linux-4.4.1/phys_addr_t]]
-phys_addr_t size
--
-phys_addr_t limit
--
-phys_addr_t alignment
--
-unsigned int order_per_bit
--
-bool fixed
--
-struct cma **res_cma
--
--[[linux-4.4.1/cma]]


**返り値 [#m5152279]
-int
--


**参考 [#q668b5e2]


*実装 [#ka25b461]
 /**
  * cma_declare_contiguous() - reserve custom contiguous area
  * @base: Base address of the reserved area optional, use 0 for any
  * @size: Size of the reserved area (in bytes),
  * @limit: End address of the reserved memory (optional, 0 for any).
  * @alignment: Alignment for the CMA area, should be power of 2 or zero
  * @order_per_bit: Order of pages represented by one bit on bitmap.
  * @fixed: hint about where to place the reserved area
  * @res_cma: Pointer to store the created cma region.
  *
  * This function reserves memory from early allocator. It should be
  * called by arch specific code once the early allocator (memblock or bootmem)
  * has been activated and all other subsystems have already allocated/reserved
  * memory. This function allows to create custom reserved areas.
  *
  * If @fixed is true, reserve contiguous area at exactly @base.  If false,
  * reserve in range from @base to @limit.
  */
 int __init cma_declare_contiguous(phys_addr_t base,
 			phys_addr_t size, phys_addr_t limit,
 			phys_addr_t alignment, unsigned int order_per_bit,
 			bool fixed, struct cma **res_cma)
 {
 	phys_addr_t memblock_end = memblock_end_of_DRAM();
 	phys_addr_t highmem_start;
 	int ret = 0;
 
-
--[[linux-4.4.1/__init]]
--[[linux-4.4.1/memblock_end_of_DRAM()]]

 #ifdef CONFIG_X86
 	/*
 	 * high_memory isn't direct mapped memory so retrieving its physical
 	 * address isn't appropriate.  But it would be useful to check the
 	 * physical address of the highmem boundary so it's justifiable to get
 	 * the physical address from it.  On x86 there is a validation check for
 	 * this case, so the following workaround is needed to avoid it.
 	 */
 	highmem_start = __pa_nodebug(high_memory);
 #else
 	highmem_start = __pa(high_memory);
 #endif
-
--[[linux-4.4.1/CONFIG_X86]]
--[[linux-4.4.1/__pa_nodebug()]]
--[[linux-4.4.1/high_memory(global)]]
--[[linux-4.4.1/__pa()]]

 	pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
 		__func__, &size, &base, &limit, &alignment);
 
-
--[[linux-4.4.1/pr_debug()]]

 	if (cma_area_count == ARRAY_SIZE(cma_areas)) {
 		pr_err("Not enough slots for CMA reserved regions!\n");
 		return -ENOSPC;
 	}
 
-
--[[linux-4.4.1/ARRAY_SIZE()]]
--[[linux-4.4.1/cma_areas(global)]]
--[[linux-4.4.1/pr_err()]]

 	if (!size)
 		return -EINVAL;
 
 	if (alignment && !is_power_of_2(alignment))
 		return -EINVAL;
 
-
--[[linux-4.4.1/is_power_of_2()]]

 	/*
 	 * Sanitise input arguments.
 	 * Pages both ends in CMA area could be merged into adjacent unmovable
 	 * migratetype page by page allocator's buddy algorithm. In the case,
 	 * you couldn't get a contiguous memory, which is not what we want.
 	 */
 	alignment = max(alignment,
 		(phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
 	base = ALIGN(base, alignment);
 	size = ALIGN(size, alignment);
 	limit &= ~(alignment - 1);
 
-
--[[linux-4.4.1/ALIGN()]]

 	if (!base)
 		fixed = false;
 
 	/* size should be aligned with order_per_bit */
 	if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
 		return -EINVAL;
 
-
--[[linux-4.4.1/IS_ALIGNED()]]

 	/*
 	 * If allocating at a fixed base the request region must not cross the
 	 * low/high memory boundary.
 	 */
 	if (fixed && base < highmem_start && base + size > highmem_start) {
 		ret = -EINVAL;
 		pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
 			&base, &highmem_start);
 		goto err;
 	}
 
-
--[[linux-4.4.1/highmem_start(global)]]

 	/*
 	 * If the limit is unspecified or above the memblock end, its effective
 	 * value will be the memblock end. Set it explicitly to simplify further
 	 * checks.
 	 */
 	if (limit == 0 || limit > memblock_end)
 		limit = memblock_end;
 
 	/* Reserve memory */
 	if (fixed) {
 		if (memblock_is_region_reserved(base, size) ||
 		    memblock_reserve(base, size) < 0) {
 			ret = -EBUSY;
 			goto err;
 		}
-
--[[linux-4.4.1/memblock_is_region_reserved()]]
--[[linux-4.4.1/memblock_reserve()]]

 	} else {
 		phys_addr_t addr = 0;
 
 		/*
 		 * All pages in the reserved area must come from the same zone.
 		 * If the requested region crosses the low/high memory boundary,
 		 * try allocating from high memory first and fall back to low
 		 * memory in case of failure.
 		 */
 		if (base < highmem_start && limit > highmem_start) {
 			addr = memblock_alloc_range(size, alignment,
 						    highmem_start, limit,
 						    MEMBLOCK_NONE);
 			limit = highmem_start;
 		}
 
-
--[[linux-4.4.1/memblock_alloc_range()]]

 		if (!addr) {
 			addr = memblock_alloc_range(size, alignment, base,
 						    limit,
 						    MEMBLOCK_NONE);
 			if (!addr) {
 				ret = -ENOMEM;
 				goto err;
 			}
 		}
 
 		/*
 		 * kmemleak scans/reads tracked objects for pointers to other
 		 * objects but this address isn't mapped and accessible
 		 */
 		kmemleak_ignore(phys_to_virt(addr));
 		base = addr;
-
--[[linux-4.4.1/kmemleak_ignore()]]
--[[linux-4.4.1/phys_to_virt()]]

 	}
 
 	ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
 	if (ret)
 		goto err;
 
-
--[[linux-4.4.1/cma_init_reserved_mem()]]

 	pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
 		&base);
 	return 0;
 
-
--[[linux-4.4.1/pr_info()]]

 err:
 	pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
 	return ret;
-
--[[linux-4.4.1/pr_err()]]

 }


*コメント [#r8e8cade]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS