*参照元 [#gebe066b]
#backlinks

*説明 [#s0781f90]
-パス: [[linux-4.4.1/mm/huge_memory.c]]

-FIXME: これは何?
--説明


**引数 [#y8d6f11f]
-struct page *page
--
--[[linux-4.4.1/page]]
-struct list_head *list
--
--[[linux-4.4.1/list_head]]


**返り値 [#nae9a977]
-void


**参考 [#j40918bd]


*実装 [#o1fdf8b4]
 static void __split_huge_page_refcount(struct page *page,
 				       struct list_head *list)
 {
 	int i;
 	struct zone *zone = page_zone(page);
 	struct lruvec *lruvec;
 	int tail_count = 0;
 
-
--[[linux-4.4.1/zone]]
--[[linux-4.4.1/lruvec]]

 	/* prevent PageLRU to go away from under us, and freeze lru stats */
 	spin_lock_irq(&zone->lru_lock);
 	lruvec = mem_cgroup_page_lruvec(page, zone);
 
-
--[[linux-4.4.1/spin_lock_irq()]]
--[[linux-4.4.1/mem_cgroup_page_lruvec()]]

 	compound_lock(page);
 	/* complete memcg works before add pages to LRU */
 	mem_cgroup_split_huge_fixup(page);
 
-
--[[linux-4.4.1/compound_lock()]]
--[[linux-4.4.1/mem_cgroup_split_huge_fixup()]]

 	for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
 		struct page *page_tail = page + i;
 
 		/* tail_page->_mapcount cannot change */
 		BUG_ON(page_mapcount(page_tail) < 0);
 		tail_count += page_mapcount(page_tail);
 		/* check for overflow */
 		BUG_ON(tail_count < 0);
 		BUG_ON(atomic_read(&page_tail->_count) != 0);
-
--[[linux-4.4.1/HPAGE_PMD_NR]]
--[[linux-4.4.1/BUG_ON()]]
--[[linux-4.4.1/page_mapcount()]]
--[[linux-4.4.1/atomic_read()]]

 		/*
 		 * tail_page->_count is zero and not changing from
 		 * under us. But get_page_unless_zero() may be running
 		 * from under us on the tail_page. If we used
 		 * atomic_set() below instead of atomic_add(), we
 		 * would then run atomic_set() concurrently with
 		 * get_page_unless_zero(), and atomic_set() is
 		 * implemented in C not using locked ops. spin_unlock
 		 * on x86 sometime uses locked ops because of PPro
 		 * errata 66, 92, so unless somebody can guarantee
 		 * atomic_set() here would be safe on all archs (and
 		 * not only on x86), it's safer to use atomic_add().
 		 */
 		atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
 			   &page_tail->_count);
 
-
--[[linux-4.4.1/atomic_add()]]

 		/* after clearing PageTail the gup refcount can be released */
 		smp_mb__after_atomic();
 
 		page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
 		page_tail->flags |= (page->flags &
 				     ((1L << PG_referenced) |
 				      (1L << PG_swapbacked) |
 				      (1L << PG_mlocked) |
 				      (1L << PG_uptodate) |
 				      (1L << PG_active) |
 				      (1L << PG_unevictable)));
 		page_tail->flags |= (1L << PG_dirty);
 
 		clear_compound_head(page_tail);
 
-
--[[linux-4.4.1/smp_mb__after_atomic()]]
--[[linux-4.4.1/clear_compound_head()]]

 		if (page_is_young(page))
 			set_page_young(page_tail);
 		if (page_is_idle(page))
 			set_page_idle(page_tail);
 
-
--[[linux-4.4.1/page_is_young()]]
--[[linux-4.4.1/set_page_young()]]
--[[linux-4.4.1/page_is_idle()]]
--[[linux-4.4.1/set_page_idle()]]

 		/*
 		 * __split_huge_page_splitting() already set the
 		 * splitting bit in all pmd that could map this
 		 * hugepage, that will ensure no CPU can alter the
 		 * mapcount on the head page. The mapcount is only
 		 * accounted in the head page and it has to be
 		 * transferred to all tail pages in the below code. So
 		 * for this code to be safe, the split the mapcount
 		 * can't change. But that doesn't mean userland can't
 		 * keep changing and reading the page contents while
 		 * we transfer the mapcount, so the pmd splitting
 		 * status is achieved setting a reserved bit in the
 		 * pmd, not by clearing the present bit.
 		*/
 		page_tail->_mapcount = page->_mapcount;
 
 		BUG_ON(page_tail->mapping);
 		page_tail->mapping = page->mapping;
 
 		page_tail->index = page->index + i;
 		page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
 
-
--[[linux-4.4.1/page_cpupid_xchg_last()]]
--[[linux-4.4.1/page_cpupid_last()]]

 		BUG_ON(!PageAnon(page_tail));
 		BUG_ON(!PageUptodate(page_tail));
 		BUG_ON(!PageDirty(page_tail));
 		BUG_ON(!PageSwapBacked(page_tail));
 
 		lru_add_page_tail(page, page_tail, lruvec, list);
-
--[[linux-4.4.1/PageAnon()]]
--[[linux-4.4.1/PageUptodate()]]
--[[linux-4.4.1/PageDirty()]]
--[[linux-4.4.1/PageSwapBacked()]]
--[[linux-4.4.1/lru_add_page_tail()]]

 	}
 	atomic_sub(tail_count, &page->_count);
 	BUG_ON(atomic_read(&page->_count) <= 0);
 
-
--[[linux-4.4.1/atomic_sub()]]

 	__mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
 
-
--[[linux-4.4.1/__mod_zone_page_state()]]

 	ClearPageCompound(page);
 	compound_unlock(page);
 	spin_unlock_irq(&zone->lru_lock);
 
-
--[[linux-4.4.1/ClearPageCompound()]]
--[[linux-4.4.1/compound_unlock()]]
--[[linux-4.4.1/spin_unlock_irq()]]

 	for (i = 1; i < HPAGE_PMD_NR; i++) {
 		struct page *page_tail = page + i;
 		BUG_ON(page_count(page_tail) <= 0);
 		/*
 		 * Tail pages may be freed if there wasn't any mapping
 		 * like if add_to_swap() is running on a lru page that
 		 * had its mapping zapped. And freeing these pages
 		 * requires taking the lru_lock so we do the put_page
 		 * of the tail pages after the split is complete.
 		 */
 		put_page(page_tail);
 	}
 
-
--[[linux-4.4.1/put_page()]]

 	/*
 	 * Only the head page (now become a regular page) is required
 	 * to be pinned by the caller.
 	 */
 	BUG_ON(page_count(page) <= 0);
-
--[[linux-4.4.1/page_count()]]

 }


*コメント [#w1a7dac9]

トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS