*参照元 [#y828066b]
#backlinks

*説明 [#if59a339]
-パス: [[linux-4.4.1/mm/page_alloc.c]]

-FIXME: これは何?
--説明


**引数 [#e4dc89bd]
-gfp_t gfp_mask
--
--[[linux-4.4.1/gfp_t]]
-unsigned int order
--
-int alloc_flags
--
-const struct alloc_context *ac
--
--[[linux-4.4.1/alloc_context]]


**返り値 [#za072cae]
-struct page *
--
--[[linux-4.4.1/page]]


**参考 [#db247b38]


*実装 [#ld7c75b4]
 /*
  * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
                                                 const struct alloc_context *ac)
 {
         struct zonelist *zonelist = ac->zonelist;
         struct zoneref *z;
         struct page *page = NULL;
         struct zone *zone;
         int nr_fair_skipped = 0;
         bool zonelist_rescan;
 
-
--[[linux-4.4.1/zonelist]]
--[[linux-4.4.1/zoneref]]
--[[linux-4.4.1/zone]]

 zonelist_scan:
         zonelist_rescan = false;
 
         /*
          * Scan zonelist, looking for a zone with enough free.
          * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
          */
         for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
                                                                 ac->nodemask) {
                 unsigned long mark;
 
-
--[[linux-4.4.1/for_each_zone_zonelist_nodemask()]]

                 if (cpusets_enabled() &&
                         (alloc_flags & ALLOC_CPUSET) &&
                         !cpuset_zone_allowed(zone, gfp_mask))
                                 continue;
-
--[[linux-4.4.1/cpusets_enabled()]]
--[[linux-4.4.1/ALLOC_CPUSET]]
--[[linux-4.4.1/cpuset_zone_allowed()]]

                 /*
                  * Distribute pages in proportion to the individual
                  * zone size to ensure fair page aging.  The zone a
                  * page was allocated in should have no effect on the
                  * time the page has in memory before being reclaimed.
                  */
                 if (alloc_flags & ALLOC_FAIR) {
                         if (!zone_local(ac->preferred_zone, zone))
                                 break;
                         if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
                                 nr_fair_skipped++;
                                 continue;
                         }
                 }
-
--[[linux-4.4.1/ALLOC_FAIR]]
--[[linux-4.4.1/zone_local()]]
--[[linux-4.4.1/test_bit()]]
--[[linux-4.4.1/ZONE_FAIR_DEPELETED]]

                 /*
                  * When allocating a page cache page for writing, we
                  * want to get it from a zone that is within its dirty
                  * limit, such that no single zone holds more than its
                  * proportional share of globally allowed dirty pages.
                  * The dirty limits take into account the zone's
                  * lowmem reserves and high watermark so that kswapd
                  * should be able to balance it without having to
                  * write pages from its LRU list.
                  *
                  * This may look like it could increase pressure on
                  * lower zones by failing allocations in higher zones
                  * before they are full.  But the pages that do spill
                  * over are limited as the lower zones are protected
                  * by this very same mechanism.  It should not become
                  * a practical burden to them.
                  *
                  * XXX: For now, allow allocations to potentially
                  * exceed the per-zone dirty limit in the slowpath
                  * (spread_dirty_pages unset) before going into reclaim,
                  * which is important when on a NUMA setup the allowed
                  * zones are together not big enough to reach the
                  * global limit.  The proper fix for these situations
                  * will require awareness of zones in the
                  * dirty-throttling and the flusher threads.
                  */
                 if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
                         continue;
 
-
--[[linux-4.4.1/zone_dirty_ok()]]

                 mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
                 if (!zone_watermark_ok(zone, order, mark,
                                        ac->classzone_idx, alloc_flags)) {
                         int ret;
 
-
--[[linux-4.4.1/ALLOC_WMARK_MASK]]
--[[linux-4.4.1/zone_watermark_ok()]]

                         /* Checked here to keep the fast path fast */
                         BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                         if (alloc_flags & ALLOC_NO_WATERMARKS)
                                 goto try_this_zone;
 
-
--[[linux-4.4.1/BUILD_BUG_ON()]]
--[[linux-4.4.1/ALLOC_NO_WATERMARKS]]
--[[linux-4.4.1/NR_WMARK]]

                         if (zone_reclaim_mode == 0 ||
                             !zone_allows_reclaim(ac->preferred_zone, zone))
                                 continue;
 
-
--[[linux-4.4.1/zone_allows_reclaim()]]

                         ret = zone_reclaim(zone, gfp_mask, order);
                         switch (ret) {
                         case ZONE_RECLAIM_NOSCAN:
                                 /* did not scan */
                                 continue;
                         case ZONE_RECLAIM_FULL:
                                 /* scanned but unreclaimable */
                                 continue;
                         default:
                                 /* did we reclaim enough */
                                 if (zone_watermark_ok(zone, order, mark,
                                                 ac->classzone_idx, alloc_flags))
                                         goto try_this_zone;
 
                                 continue;
                         }
-
--[[linux-4.4.1/zone_reclaim()]]
--[[linux-4.4.1/ZONE_RECLAIM_NOSCAN]]
--[[linux-4.4.1/ZOME_RECLAIM_FULL]]
--[[linux-4.4.1/ZONE_RECLAIM_FULL]]
--[[linux-4.4.1/zone_watermark_ok()]]

                 }
 
 try_this_zone:
                 page = buffered_rmqueue(ac->preferred_zone, zone, order,
                                 gfp_mask, alloc_flags, ac->migratetype);
                 if (page) {
                         if (prep_new_page(page, order, gfp_mask, alloc_flags))
                                 goto try_this_zone;
 
                         /*
                          * If this is a high-order atomic allocation then check
                          * if the pageblock should be reserved for the future
                          */
                         if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
                                 reserve_highatomic_pageblock(page, zone, order);
 
                         return page;
                 }
-
--[[linux-4.4.1/buffered_rmqueue()]]
--[[linux-4.4.1/prep_new_page()]]
--[[linux-4.4.1/unlikely()]]
--[[linux-4.4.1/ALLOC_HARDER]]
--[[linux-4.4.1/reserve_highatomic_pageblock()]]

         }
 
         /*
          * The first pass makes sure allocations are spread fairly within the
          * local node.  However, the local node might have free pages left
          * after the fairness batches are exhausted, and remote zones haven't
          * even been considered yet.  Try once more without fairness, and
          * include remote zones now, before entering the slowpath and waking
          * kswapd: prefer spilling to a remote zone over swapping locally.
          */
         if (alloc_flags & ALLOC_FAIR) {
                 alloc_flags &= ~ALLOC_FAIR;
                 if (nr_fair_skipped) {
                         zonelist_rescan = true;
                         reset_alloc_batches(ac->preferred_zone);
                 }
                 if (nr_online_nodes > 1)
                         zonelist_rescan = true;
         }
 
-
--[[linux-4.4.1/reset_alloc_batches()]]

         if (zonelist_rescan)
                 goto zonelist_scan;
 
         return NULL;
 }


*コメント [#b01ed5dd]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS