*参照元 [#g507626a]
#backlinks

*説明 [#f7dde913]
-パス: [[linux-4.4.1/mm/migrate.c]]

-FIXME: これは何?
--説明


**引数 [#ybcb34f3]
-struct address_space *mapping
--
--[[linux-4.4.1/address_space]]
-struct page *newpage
--
--[[linux-4.4.1/page]]
-struct page *page
--
--[[linux-4.4.1/page]]
-struct buffer_head *head
--
--[[linux-4.4.1/buffer_head]]
-enum migrate_mode mode
--
--[[linux-4.4.1/migrate_mode]]
-int extra_count
--


**返り値 [#i762c90b]
-int
--


**参考 [#l6b54a9f]


*実装 [#r5a5690e]
 /*
  * Replace the page in the mapping.
  *
  * The number of remaining references must be:
  * 1 for anonymous pages without a mapping
  * 2 for pages with a mapping
  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
 int migrate_page_move_mapping(struct address_space *mapping,
 		struct page *newpage, struct page *page,
 		struct buffer_head *head, enum migrate_mode mode,
 		int extra_count)
 {
 	struct zone *oldzone, *newzone;
 	int dirty;
 	int expected_count = 1 + extra_count;
 	void **pslot;
 
-
--[[linux-4.4.1/zone]]

 	if (!mapping) {
 		/* Anonymous page without mapping */
 		if (page_count(page) != expected_count)
 			return -EAGAIN;
 
-
--[[linux-4.4.1/page_count()]]

 		/* No turning back from here */
 		set_page_memcg(newpage, page_memcg(page));
 		newpage->index = page->index;
 		newpage->mapping = page->mapping;
 		if (PageSwapBacked(page))
 			SetPageSwapBacked(newpage);
 
 		return MIGRATEPAGE_SUCCESS;
 	}
 
-
--[[linux-4.4.1/set_page_memcg()]]
--[[linux-4.4.1/page_memcg()]]
--[[linux-4.4.1/PageSwapBacked()]]
--[[linux-4.4.1/SetPageSwapBacked()]]

 	oldzone = page_zone(page);
 	newzone = page_zone(newpage);
 
-
--[[linux-4.4.1/page_zone()]]

 	spin_lock_irq(&mapping->tree_lock);
 
-
--[[linux-4.4.1/spin_lock_irq()]]

 	pslot = radix_tree_lookup_slot(&mapping->page_tree,
  					page_index(page));
 
-
--[[linux-4.4.1/radix_tree_lookup_slot()]]
--[[linux-4.4.1/page_index()]]

 	expected_count += 1 + page_has_private(page);
 	if (page_count(page) != expected_count ||
 		radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
 		spin_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
 
-
--[[linux-4.4.1/page_has_private()]]
--[[linux-4.4.1/page_count()]]
--[[linux-4.4.1/radix_tree_deref_slot_protected()]]
--[[linux-4.4.1/spin_unlock_irq()]]

 	if (!page_freeze_refs(page, expected_count)) {
 		spin_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
 
-
--[[linux-4.4.1/page_freeze_refs()]]
--[[linux-4.4.1/spin_unlock_irq()]]

 	/*
 	 * In the async migration case of moving a page with buffers, lock the
 	 * buffers using trylock before the mapping is moved. If the mapping
 	 * was moved, we later failed to lock the buffers and could not move
 	 * the mapping back due to an elevated page count, we would have to
 	 * block waiting on other references to be dropped.
 	 */
 	if (mode == MIGRATE_ASYNC && head &&
 			!buffer_migrate_lock_buffers(head, mode)) {
 		page_unfreeze_refs(page, expected_count);
 		spin_unlock_irq(&mapping->tree_lock);
 		return -EAGAIN;
 	}
 
-
--[[linux-4.4.1/buffer_migrate_lock_buffers()]]
--[[linux-4.4.1/page_unfreeze_refs()]]
--[[linux-4.4.1/spin_unlock_irq()]]

 	/*
 	 * Now we know that no one else is looking at the page:
 	 * no turning back from here.
 	 */
 	set_page_memcg(newpage, page_memcg(page));
 	newpage->index = page->index;
 	newpage->mapping = page->mapping;
 	if (PageSwapBacked(page))
 		SetPageSwapBacked(newpage);
 
-
--[[linux-4.4.1/set_page_memcg()]]
--[[linux-4.4.1/page_memcg()]]
--[[linux-4.4.1/PageSwapBacked()]]
--[[linux-4.4.1/SetPageSwapBacked()]]

 	get_page(newpage);	/* add cache reference */
 	if (PageSwapCache(page)) {
 		SetPageSwapCache(newpage);
 		set_page_private(newpage, page_private(page));
 	}
 
-
--[[linux-4.4.1/get_page()]]
--[[linux-4.4.1/PageSwapCache()]]
--[[linux-4.4.1/SetPageSwapCache()]]
--[[linux-4.4.1/set_page_private()]]
--[[linux-4.4.1/page_private()]]

 	/* Move dirty while page refs frozen and newpage not yet exposed */
 	dirty = PageDirty(page);
 	if (dirty) {
 		ClearPageDirty(page);
 		SetPageDirty(newpage);
 	}
 
-
--[[linux-4.4.1/PageDirty()]]
--[[linux-4.4.1/ClearPageDirty()]]
--[[linux-4.4.1/SetPageDirty()]]

 	radix_tree_replace_slot(pslot, newpage);
 
-
--[[linux-4.4.1/radix_tree_replace_slot()]]

 	/*
 	 * Drop cache reference from old page by unfreezing
 	 * to one less reference.
 	 * We know this isn't the last reference.
 	 */
 	page_unfreeze_refs(page, expected_count - 1);
 
-
--[[linux-4.4.1/page_unfreeze_refs()]]

 	spin_unlock(&mapping->tree_lock);
 	/* Leave irq disabled to prevent preemption while updating stats */
 
-
--[[linux-4.4.1/spin_unlock()]]

 	/*
 	 * If moved to a different zone then also account
 	 * the page for that zone. Other VM counters will be
 	 * taken care of when we establish references to the
 	 * new page and drop references to the old page.
 	 *
 	 * Note that anonymous pages are accounted for
 	 * via NR_FILE_PAGES and NR_ANON_PAGES if they
 	 * are mapped to swap space.
 	 */
 	if (newzone != oldzone) {
 		__dec_zone_state(oldzone, NR_FILE_PAGES);
 		__inc_zone_state(newzone, NR_FILE_PAGES);
 		if (PageSwapBacked(page) && !PageSwapCache(page)) {
 			__dec_zone_state(oldzone, NR_SHMEM);
 			__inc_zone_state(newzone, NR_SHMEM);
 		}
 		if (dirty && mapping_cap_account_dirty(mapping)) {
 			__dec_zone_state(oldzone, NR_FILE_DIRTY);
 			__inc_zone_state(newzone, NR_FILE_DIRTY);
 		}
 	}
 	local_irq_enable();
 
-
--[[linux-4.4.1/__dec_zone_state()]]
--[[linux-4.4.1/__inc_zone_state()]]
--[[linux-4.4.1/PageSwapBacked()]]
--[[linux-4.4.1/PageSwapCache()]]
--[[linux-4.4.1/mapping_cap_account_dirty()]]

 	return MIGRATEPAGE_SUCCESS;
 }


*コメント [#l0c3c578]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS