*参照元 [#l1e9098f]
#backlinks

*説明 [#r84b798a]
-パス: [[linux-4.4.1/mm/page_alloc.c]]

-FIXME: これは何?
--説明


**引数 [#v4249ee9]
-gfp_t gfp_mask
--
--[[linux-4.4.1/gfp_t]]
-unsigned int order
--
-struct alloc_context *ac
--
--[[linux-4.4.1/alloc_context]]


**返り値 [#scb12eb4]
-struct page *
--
--[[linux-4.4.1/page]]


**参考 [#p6f52271]


*実装 [#rbda4e19]
 static inline struct page *
 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                 struct alloc_context *ac)
 {
         bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
         struct page *page = NULL;
         int alloc_flags;
         unsigned long pages_reclaimed = 0;
         unsigned long did_some_progress;
         enum migrate_mode migration_mode = MIGRATE_ASYNC;
         bool deferred_compaction = false;
         int contended_compaction = COMPACT_CONTENDED_NONE;
 
-
--[[linux-4.4.1/__GFP_DIRECT_RECLAIM]]
--[[linux-4.4.1/migrate_mode]]
--[[linux-4.4.1/COMPACT_CONTENDED_NONE]]

         /*
          * In the slowpath, we sanity check order to avoid ever trying to
          * reclaim >= MAX_ORDER areas which will never succeed. Callers may
          * be using allocators in order of preference for an area that is
          * too large.
          */
         if (order >= MAX_ORDER) {
                 WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
                 return NULL;
         }
 
-
--[[linux-4.4.1/MAX_ORDER]]
--[[linux-4.4.1/WARN_ON_ONCE()]]
--[[linux-4.4.1/__GFP_NOWARN]]

         /*
          * We also sanity check to catch abuse of atomic reserves being used by
          * callers that are not in atomic context.
          */
         if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
                                 (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
                 gfp_mask &= ~__GFP_ATOMIC;
 
-
--[[linux-4.4.1/__GFP_ATOMIC]]

         /*
          * If this allocation cannot block and it is for a specific node, then
          * fail early.  There's no need to wakeup kswapd or retry for a
          * speculative node-specific allocation.
          */
         if (IS_ENABLED(CONFIG_NUMA) && (gfp_mask & __GFP_THISNODE) && !can_direct_reclaim)
                 goto nopage;
 
-
--[[linux-4.4.1/IS_ENABLED()]]
--[[linux-4.4.1/CONFIG_NUMA]]

 retry:
         if (gfp_mask & __GFP_KSWAPD_RECLAIM)
                 wake_all_kswapds(order, ac);
 
-
--[[linux-4.4.1/__GFP_KSWAPD_RECLAIM]]
--[[linux-4.4.1/wake_all_kswapds()]]

         /*
          * OK, we're below the kswapd watermark and have kicked background
          * reclaim. Now things get more complex, so set up alloc_flags according
          * to how we want to proceed.
          */
         alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
-
--[[linux-4.4.1/gfp_to_alloc_flags()]]

         /*
          * Find the true preferred zone if the allocation is unconstrained by
          * cpusets.
          */
         if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
                 struct zoneref *preferred_zoneref;
                 preferred_zoneref = first_zones_zonelist(ac->zonelist,
                                 ac->high_zoneidx, NULL, &ac->preferred_zone);
                 ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
         }
 
-
--[[linux-4.4.1/ALLOC_CPUSET]]
--[[linux-4.4.1/zoneref]]
--[[linux-4.4.1/first_zones_zonelist()]]
--[[linux-4.4.1/zonelist_zone_idx()]]

         /* This is the last chance, in general, before the goto nopage. */
         page = get_page_from_freelist(gfp_mask, order,
                                 alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
         if (page)
                 goto got_pg;
 
-
--[[linux-4.4.1/get_page_from_freelist()]]

         /* Allocate without watermarks if the context allows */
         if (alloc_flags & ALLOC_NO_WATERMARKS) {
                 /*
                  * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
                  * the allocation is high priority and these type of
                  * allocations are system rather than user orientated
                  */
                 ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
 
                 page = __alloc_pages_high_priority(gfp_mask, order, ac);
 
                 if (page) {
                         goto got_pg;
                 }
-
--[[linux-4.4.1/node_zonelist()]]
--[[linux-4.4.1/numa_node_id()]]
--[[linux-4.4.1/__alloc_pages_high_priority()]]

         }
 
         /* Caller is not willing to reclaim, we can't balance anything */
         if (!can_direct_reclaim) {
                 /*
                  * All existing users of the deprecated __GFP_NOFAIL are
                  * blockable, so warn of any new users that actually allow this
                  * type of allocation to fail.
                  */
                 WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
                 goto nopage;
         }
 
         /* Avoid recursion of direct reclaim */
         if (current->flags & PF_MEMALLOC)
                 goto nopage;
 
-
--[[linux-4.4.1/PF_MEMALLOC]]

         /* Avoid allocations with no watermarks from looping endlessly */
         if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
                 goto nopage;
 
-
--[[linux-4.4.1/TIF_MEMDIE]]

         /*
          * Try direct compaction. The first pass is asynchronous. Subsequent
          * attempts after direct reclaim are synchronous
          */
         page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
                                         migration_mode,
                                         &contended_compaction,
                                         &deferred_compaction);
         if (page)
                 goto got_pg;
 
-
--[[linux-4.4.1/__alloc_pages_direct_compact()]]

         /* Checks for THP-specific high-order allocations */
         if (is_thp_gfp_mask(gfp_mask)) {
                 /*
                  * If compaction is deferred for high-order allocations, it is
                  * because sync compaction recently failed. If this is the case
                  * and the caller requested a THP allocation, we do not want
                  * to heavily disrupt the system, so we fail the allocation
                  * instead of entering direct reclaim.
                  */
                 if (deferred_compaction)
                         goto nopage;
 
-
--[[linux-4.4.1/is_thp_gfp_mask()]]

                 /*
                  * In all zones where compaction was attempted (and not
                  * deferred or skipped), lock contention has been detected.
                  * For THP allocation we do not want to disrupt the others
                  * so we fallback to base pages instead.
                  */
                 if (contended_compaction == COMPACT_CONTENDED_LOCK)
                         goto nopage;
 
-
--[[linux-4.4.1/COMPACT_CONTENDED_LOCK]]

                 /*
                  * If compaction was aborted due to need_resched(), we do not
                  * want to further increase allocation latency, unless it is
                  * khugepaged trying to collapse.
                  */
                 if (contended_compaction == COMPACT_CONTENDED_SCHED
                         && !(current->flags & PF_KTHREAD))
                         goto nopage;
-
--[[linux-4.4.1/COMPACT_CONTENDED_SCHED]]
--[[linux-4.4.1/PF_KTHREAD]]

         }
 
         /*
          * It can become very expensive to allocate transparent hugepages at
          * fault, so use asynchronous memory compaction for THP unless it is
          * khugepaged trying to collapse.
          */
         if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
                 migration_mode = MIGRATE_SYNC_LIGHT;
 
-
--[[linux-4.4.1/MIGRATE_SYNC_LIGHT]]

         /* Try direct reclaim and then allocating */
         page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
                                                         &did_some_progress);
         if (page)
                 goto got_pg;
 
-
--[[linux-4.4.1/__alloc_pages_direct_reclaim()]]

         /* Do not loop if specifically requested */
         if (gfp_mask & __GFP_NORETRY)
                 goto noretry;
 
-
--[[linux-4.4.1/__GFP_NORETRY]]

         /* Keep reclaiming pages as long as there is reasonable progress */
         pages_reclaimed += did_some_progress;
         if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
             ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
                 /* Wait for some write requests to complete then retry */
                 wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
                 goto retry;
         }
 
-
--[[linux-4.4.1/PAGE_ALLOC_COSTLY_ORDER]]
--[[linux-4.4.1/__GFP_REPEAT]]
--[[linux-4.4.1/wait_iff_congested()]]
--[[linux-4.4.1/BLK_RW_ASYNC]]

         /* Reclaim has failed us, start killing things */
         page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
         if (page)
                 goto got_pg;
 
-
--[[linux-4.4.1/__alloc_pages_may_oom()]]

         /* Retry as long as the OOM killer is making progress */
         if (did_some_progress)
                 goto retry;
 
 noretry:
         /*
          * High-order allocations do not necessarily loop after
          * direct reclaim and reclaim/compaction depends on compaction
          * being called after reclaim so call directly if necessary
          */
         page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
                                             ac, migration_mode,
                                             &contended_compaction,
                                             &deferred_compaction);
         if (page)
                 goto got_pg;
-
--[[linux-4.4.1/__alloc_pages_direct_compact()]]

 nopage:
         warn_alloc_failed(gfp_mask, order, NULL);
-
--[[linux-4.4.1/warn_alloc_failed()]]

 got_pg:
         return page;
 }


*コメント [#acb650a4]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS