- 追加された行はこの色です。
- 削除された行はこの色です。
*参照元 [#w78d0e0f]
#backlinks
*説明 [#r31fd3b3]
-パス: [[linux-4.4.1/mm/page_alloc.c]]
-FIXME: これは何?
--説明
**引数 [#ube119f6]
-
--
**返り値 [#ea0d2cb7]
-
--
**参考 [#oc405a9c]
*実装 [#jc9b8e85]
/*
* This is the 'heart' of the zoned buddy allocator.
*/
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
struct zoneref *preferred_zoneref;
struct page *page = NULL;
unsigned int cpuset_mems_cookie;
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = {
.high_zoneidx = gfp_zone(gfp_mask),
.nodemask = nodemask,
.migratetype = gfpflags_to_migratetype(gfp_mask),
};
-
--[[linux-4.4.1/zoneref]]
--[[linux-4.4.1/page]]
--[[linux-4.4.1/gfp_t]]
--[[linux-4.4.1/alloc_context]]
--[[linux-4.4.1/gfp_zone()]]
--[[linux-4.4.1/gfpflags_to_migratetype()]]
gfp_mask &= gfp_allowed_mask;
-
--[[linux-4.4.1/gfp_allowed_mask(global)]]
lockdep_trace_alloc(gfp_mask);
might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
-
--[[linux-4.4.1/lockdep_trace_alloc()]]
--[[linux-4.4.1/might_sleep_if()]]
if (should_fail_alloc_page(gfp_mask, order))
return NULL;
-
--[[linux-4.4.1/should_fail_alloc_page()]]
/*
* Check the zones suitable for the gfp_mask contain at least one
* valid zone. It's possible to have an empty zonelist as a result
* of __GFP_THISNODE and a memoryless node
*/
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
-
--[[linux-4.4.1/unlikely()]]
--[[linux-4.4.1/IS_ENABLED()]]
--[[linux-4.4.1/CONFIG_CMA]]
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();
-
--[[linux-4.4.1/read_mems_allowed_begin()]]
/* We set it here, as __alloc_pages_slowpath might have changed it */
ac.zonelist = zonelist;
/* Dirty zone balancing only done in the fast path */
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
/* The preferred zone is used for statistics later */
preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
ac.nodemask ? : &cpuset_current_mems_allowed,
&ac.preferred_zone);
if (!ac.preferred_zone)
goto out;
ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
-
--[[linux-4.4.1/first_zones_zonelist()]]
--[[linux-4.4.1/cpuset_current_mems_allowed(global)]]
--[[linux-4.4.1/zonelist_zone_idx()]]
/* First allocation attempt */
alloc_mask = gfp_mask|__GFP_HARDWALL;
page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
-
--[[linux-4.4.1/get_page_from_freelist()]]
if (unlikely(!page)) {
/*
* Runtime PM, block IO and its error handling path
* can deadlock because I/O on the device might not
* complete.
*/
alloc_mask = memalloc_noio_flags(gfp_mask);
ac.spread_dirty_pages = false;
page = __alloc_pages_slowpath(alloc_mask, order, &ac);
}
-
--[[linux-4.4.1/memalloc_noio_flags()]]
--[[linux-4.4.1/__alloc_pages_slowpath()]]
if (kmemcheck_enabled && page)
kmemcheck_pagealloc_alloc(page, order, gfp_mask);
-
--[[linux-4.4.1/kmemcheck_pagealloc_alloc()]]
trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
-
--[[linux-4.4.1/trace_mm_page_alloc()]]
out:
/*
* When updating a task's mems_allowed, it is possible to race with
* parallel threads in such a way that an allocation can fail while
* the mask is being updated. If a page allocation is about to fail,
* check if the cpuset changed during allocation and if so, retry.
*/
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
-
--[[linux-4.4.1/read_mems_allowed_retry()]]
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
-
--[[linux-4.4.1/__alloc_pages_nodemask()]]
*コメント [#r527e668]