*参照元 [#v3a8a1fb]
#backlinks

*説明 [#a7fbdb14]
-パス: [[linux-4.4.1/mm/compaction.c]]

-FIXME: これは何?
--説明


**引数 [#x9175fa9]
-struct compact_control *cc
--
--[[linux-4.4.1/compact_control]]
-unsigned long low_pfn
--
-unsigned long end_pfn
--
-isolate_mode_t isolate_mode
--
--[[linux-4.4.1/isolate_mode_t]]


**返り値 [#wddf765a]
-unsigned long
--


**参考 [#d55440ec]


*実装 [#p25b927c]
 /**
  * isolate_migratepages_block() - isolate all migrate-able pages within
  *				  a single pageblock
  * @cc:		Compaction control structure.
  * @low_pfn:	The first PFN to isolate
  * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
  * @isolate_mode: Isolation mode to be used.
  *
  * Isolate all pages that can be migrated from the range specified by
  * [low_pfn, end_pfn). The range is expected to be within same pageblock.
  * Returns zero if there is a fatal signal pending, otherwise PFN of the
  * first page that was not scanned (which may be both less, equal to or more
  * than end_pfn).
  *
  * The pages are isolated on cc->migratepages list (not required to be empty),
  * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
  * is neither read nor updated.
  */
 static unsigned long
 isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 			unsigned long end_pfn, isolate_mode_t isolate_mode)
 {
 	struct zone *zone = cc->zone;
 	unsigned long nr_scanned = 0, nr_isolated = 0;
 	struct list_head *migratelist = &cc->migratepages;
 	struct lruvec *lruvec;
 	unsigned long flags = 0;
 	bool locked = false;
 	struct page *page = NULL, *valid_page = NULL;
 	unsigned long start_pfn = low_pfn;
 
-
--[[linux-4.4.1/zone]]
--[[linux-4.4.1/list_head]]
--[[linux-4.4.1/lruvec]]
--[[linux-4.4.1/page]]

 	/*
 	 * Ensure that there are not too many pages isolated from the LRU
 	 * list by either parallel reclaimers or compaction. If there are,
 	 * delay for some time until fewer pages are isolated
 	 */
 	while (unlikely(too_many_isolated(zone))) {
 		/* async migration should just abort */
 		if (cc->mode == MIGRATE_ASYNC)
 			return 0;
 
 		congestion_wait(BLK_RW_ASYNC, HZ/10);
 
 		if (fatal_signal_pending(current))
 			return 0;
 	}
 
-
--[[linux-4.4.1/unlikely()]]
--[[linux-4.4.1/too_many_isolated()]]
--[[linux-4.4.1/congestion_wait()]]
--[[linux-4.4.1/fatal_signal_pending()]]

 	if (compact_should_abort(cc))
 		return 0;
 
-
--[[linux-4.4.1/compact_should_abort()]]

 	/* Time to isolate some pages for migration */
 	for (; low_pfn < end_pfn; low_pfn++) {
 		bool is_lru;
 
 		/*
 		 * Periodically drop the lock (if held) regardless of its
 		 * contention, to give chance to IRQs. Abort async compaction
 		 * if contended.
 		 */
 		if (!(low_pfn % SWAP_CLUSTER_MAX)
 		    && compact_unlock_should_abort(&zone->lru_lock, flags,
 								&locked, cc))
 			break;
 
-
--[[linux-4.4.1/compact_unlock_should_abort()]]

 		if (!pfn_valid_within(low_pfn))
 			continue;
 		nr_scanned++;
 
-
--[[linux-4.4.1/pfn_valid_within()]]

 		page = pfn_to_page(low_pfn);
 
 		if (!valid_page)
 			valid_page = page;
 
-
--[[linux-4.4.1/pfn_to_page()]]

 		/*
 		 * Skip if free. We read page order here without zone lock
 		 * which is generally unsafe, but the race window is small and
 		 * the worst thing that can happen is that we skip some
 		 * potential isolation targets.
 		 */
 		if (PageBuddy(page)) {
 			unsigned long freepage_order = page_order_unsafe(page);
 
 			/*
 			 * Without lock, we cannot be sure that what we got is
 			 * a valid page order. Consider only values in the
 			 * valid order range to prevent low_pfn overflow.
 			 */
 			if (freepage_order > 0 && freepage_order < MAX_ORDER)
 				low_pfn += (1UL << freepage_order) - 1;
 			continue;
 		}
 
-
--[[linux-4.4.1/PageBuddy()]]
--[[linux-4.4.1/page_order_unsafe()]]

 		/*
 		 * Check may be lockless but that's ok as we recheck later.
 		 * It's possible to migrate LRU pages and balloon pages
 		 * Skip any other type of page
 		 */
 		is_lru = PageLRU(page);
 		if (!is_lru) {
 			if (unlikely(balloon_page_movable(page))) {
 				if (balloon_page_isolate(page)) {
 					/* Successfully isolated */
 					goto isolate_success;
 				}
 			}
 		}
 
-
--[[linux-4.4.1/PageLRU()]]
--[[linux-4.4.1/balloon_page_movable()]]
--[[linux-4.4.1/balloon_page_isolate()]]

 		/*
 		 * Regardless of being on LRU, compound pages such as THP and
 		 * hugetlbfs are not to be compacted. We can potentially save
 		 * a lot of iterations if we skip them at once. The check is
 		 * racy, but we can consider only valid values and the only
 		 * danger is skipping too much.
 		 */
 		if (PageCompound(page)) {
 			unsigned int comp_order = compound_order(page);
 
 			if (likely(comp_order < MAX_ORDER))
 				low_pfn += (1UL << comp_order) - 1;
 
 			continue;
 		}
 
-
--[[linux-4.4.1/PageCompound()]]
--[[linux-4.4.1/compound_order()]]

 		if (!is_lru)
 			continue;
 
 		/*
 		 * Migration will fail if an anonymous page is pinned in memory,
 		 * so avoid taking lru_lock and isolating it unnecessarily in an
 		 * admittedly racy check.
 		 */
 		if (!page_mapping(page) &&
 		    page_count(page) > page_mapcount(page))
 			continue;
 
-
--[[linux-4.4.1/page_mapping()]]
--[[linux-4.4.1/page_count()]]
--[[linux-4.4.1/page_mapcount()]]

 		/* If we already hold the lock, we can skip some rechecking */
 		if (!locked) {
 			locked = compact_trylock_irqsave(&zone->lru_lock,
 								&flags, cc);
 			if (!locked)
 				break;
 
 			/* Recheck PageLRU and PageCompound under lock */
 			if (!PageLRU(page))
 				continue;
 
 			/*
 			 * Page become compound since the non-locked check,
 			 * and it's on LRU. It can only be a THP so the order
 			 * is safe to read and it's 0 for tail pages.
 			 */
 			if (unlikely(PageCompound(page))) {
 				low_pfn += (1UL << compound_order(page)) - 1;
 				continue;
 			}
 		}
 
-
--[[linux-4.4.1/compact_trylock_irqsave()]]
--[[linux-4.4.1/PageLRU()]]
--[[linux-4.4.1/PageCompound()]]
--[[linux-4.4.1/compound_order()]]

 		lruvec = mem_cgroup_page_lruvec(page, zone);
 
-
--[[linux-4.4.1/mem_cgroup_page_lruvec()]]

 		/* Try isolate the page */
 		if (__isolate_lru_page(page, isolate_mode) != 0)
 			continue;
 
 		VM_BUG_ON_PAGE(PageCompound(page), page);
 
-
--[[linux-4.4.1/__isolate_lru_page()]]
--[[linux-4.4.1/VM_BUG_ON_PAGE()]]
--[[linux-4.4.1/PageCompound()]]

 		/* Successfully isolated */
 		del_page_from_lru_list(page, lruvec, page_lru(page));
 
-
--[[linux-4.4.1/del_page_from_lru_list()]]
--[[linux-4.4.1/page_lru()]]

 isolate_success:
 		list_add(&page->lru, migratelist);
 		cc->nr_migratepages++;
 		nr_isolated++;
 
-
--[[linux-4.4.1/list_add()]]

 		/* Avoid isolating too much */
 		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
 			++low_pfn;
 			break;
 		}
 	}
 
 	/*
 	 * The PageBuddy() check could have potentially brought us outside
 	 * the range to be scanned.
 	 */
 	if (unlikely(low_pfn > end_pfn))
 		low_pfn = end_pfn;
 
 	if (locked)
 		spin_unlock_irqrestore(&zone->lru_lock, flags);
 
-
--[[linux-4.4.1/spin_unlock_irqrestore()]]

 	/*
 	 * Update the pageblock-skip information and cached scanner pfn,
 	 * if the whole pageblock was scanned without isolating any page.
 	 */
 	if (low_pfn == end_pfn)
 		update_pageblock_skip(cc, valid_page, nr_isolated, true);
 
 	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
 						nr_scanned, nr_isolated);
 
 	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
 	if (nr_isolated)
 		count_compact_events(COMPACTISOLATED, nr_isolated);
 
-
--[[linux-4.4.1/update_pageblock_skip()]]
--[[linux-4.4.1/trace_mm_compaction_isolate_migratepages()]]
--[[linux-4.4.1/count_compact_events()]]

 	return low_pfn;
 }


*コメント [#w5343847]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS