*参照元 [#oafea5f0]
#backlinks

*説明 [#b50a9d0c]
-パス: [[linux-4.4.1/mm/migrate.c]]

-FIXME: これは何?
--説明


**引数 [#m80800a0]
-struct page *page
--
--[[linux-4.4.1/page]]
-struct page *newpage
--
--[[linux-4.4.1/page]]
-int force
--
-enum migrate_mode mode
--
--[[linux-4.4.1/migrate_mode]]


**返り値 [#hdf102b3]
-int
--


**参考 [#n757ff62]


*実装 [#s2357c7c]
 static int __unmap_and_move(struct page *page, struct page *newpage,
 				int force, enum migrate_mode mode)
 {
 	int rc = -EAGAIN;
 	int page_was_mapped = 0;
 	struct anon_vma *anon_vma = NULL;
 
-
--[[linux-4.4.1/anon_vma]]

 	if (!trylock_page(page)) {
 		if (!force || mode == MIGRATE_ASYNC)
 			goto out;
 
-
--[[linux-4.4.1/trylock_page()]]

 		/*
 		 * It's not safe for direct compaction to call lock_page.
 		 * For example, during page readahead pages are added locked
 		 * to the LRU. Later, when the IO completes the pages are
 		 * marked uptodate and unlocked. However, the queueing
 		 * could be merging multiple pages for one bio (e.g.
 		 * mpage_readpages). If an allocation happens for the
 		 * second or third page, the process can end up locking
 		 * the same page twice and deadlocking. Rather than
 		 * trying to be clever about what pages can be locked,
 		 * avoid the use of lock_page for direct compaction
 		 * altogether.
 		 */
 		if (current->flags & PF_MEMALLOC)
 			goto out;
 
 		lock_page(page);
-
--[[linux-4.4.1/lock_page()]]

 	}
 
 	if (PageWriteback(page)) {
 		/*
 		 * Only in the case of a full synchronous migration is it
 		 * necessary to wait for PageWriteback. In the async case,
 		 * the retry loop is too short and in the sync-light case,
 		 * the overhead of stalling is too much
 		 */
 		if (mode != MIGRATE_SYNC) {
 			rc = -EBUSY;
 			goto out_unlock;
 		}
 		if (!force)
 			goto out_unlock;
 		wait_on_page_writeback(page);
-
--[[linux-4.4.1/PageWriteback()]]
--[[linux-4.4.1/wait_on_page_writeback()]]

 	}
 
 	/*
 	 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
 	 * we cannot notice that anon_vma is freed while we migrates a page.
 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
 	 * of migration. File cache pages are no problem because of page_lock()
 	 * File Caches may use write_page() or lock_page() in migration, then,
 	 * just care Anon page here.
 	 *
 	 * Only page_get_anon_vma() understands the subtleties of
 	 * getting a hold on an anon_vma from outside one of its mms.
 	 * But if we cannot get anon_vma, then we won't need it anyway,
 	 * because that implies that the anon page is no longer mapped
 	 * (and cannot be remapped so long as we hold the page lock).
 	 */
 	if (PageAnon(page) && !PageKsm(page))
 		anon_vma = page_get_anon_vma(page);
 
-
--[[linux-4.4.1/PageAnon()]]
--[[linux-4.4.1/PageKsm()]]
--[[linux-4.4.1/page_get_anon_vma()]]

 	/*
 	 * Block others from accessing the new page when we get around to
 	 * establishing additional references. We are usually the only one
 	 * holding a reference to newpage at this point. We used to have a BUG
 	 * here if trylock_page(newpage) fails, but would like to allow for
 	 * cases where there might be a race with the previous use of newpage.
 	 * This is much like races on refcount of oldpage: just don't BUG().
 	 */
 	if (unlikely(!trylock_page(newpage)))
 		goto out_unlock;
 
 	if (unlikely(isolated_balloon_page(page))) {
 		/*
 		 * A ballooned page does not need any special attention from
 		 * physical to virtual reverse mapping procedures.
 		 * Skip any attempt to unmap PTEs or to remap swap cache,
 		 * in order to avoid burning cycles at rmap level, and perform
 		 * the page migration right away (proteced by page lock).
 		 */
 		rc = balloon_page_migrate(newpage, page, mode);
 		goto out_unlock_both;
 	}
 
-
--[[linux-4.4.1/isolated_balloon_page()]]
--[[linux-4.4.1/balloon_page_migrate()]]

 	/*
 	 * Corner case handling:
 	 * 1. When a new swap-cache page is read into, it is added to the LRU
 	 * and treated as swapcache but it has no rmap yet.
 	 * Calling try_to_unmap() against a page->mapping==NULL page will
 	 * trigger a BUG.  So handle it here.
 	 * 2. An orphaned page (see truncate_complete_page) might have
 	 * fs-private metadata. The page can be picked up due to memory
 	 * offlining.  Everywhere else except page reclaim, the page is
 	 * invisible to the vm, so the page can not be migrated.  So try to
 	 * free the metadata, so the page can be freed.
 	 */
 	if (!page->mapping) {
 		VM_BUG_ON_PAGE(PageAnon(page), page);
 		if (page_has_private(page)) {
 			try_to_free_buffers(page);
 			goto out_unlock_both;
 		}
-
--[[linux-4.4.1/VM_BUG_ON_PAGE()]]
--[[linux-4.4.1/PageAnon()]]
--[[linux-4.4.1/page_has_private()]]
--[[linux-4.4.1/try_to_free_buffers()]]

 	} else if (page_mapped(page)) {
 		/* Establish migration ptes */
 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
 				page);
 		try_to_unmap(page,
 			TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 		page_was_mapped = 1;
-
--[[linux-4.4.1/VM_BUG_ON_PAGE()]]
--[[linux-4.4.1/PageAnon()]]
--[[linux-4.4.1/PageKsm()]]
--[[linux-4.4.1/try_to_unmap()]]

 	}
 
 	if (!page_mapped(page))
 		rc = move_to_new_page(newpage, page, mode);
 
-
--[[linux-4.4.1/page_mapped()]]
--[[linux-4.4.1/move_to_new_page()]]

 	if (page_was_mapped)
 		remove_migration_ptes(page,
 			rc == MIGRATEPAGE_SUCCESS ? newpage : page);
 
-
--[[linux-4.4.1/remove_migration_ptes()]]

 out_unlock_both:
 	unlock_page(newpage);
-
--[[linux-4.4.1/unlock_page()]]

 out_unlock:
 	/* Drop an anon_vma reference if we took one */
 	if (anon_vma)
 		put_anon_vma(anon_vma);
 	unlock_page(page);
-
--[[linux-4.4.1/put_anon_vma()]]

 out:
 	return rc;
 }


*コメント [#v18242a2]


トップ   編集 差分 履歴 添付 複製 名前変更 リロード   新規 一覧 検索 最終更新   ヘルプ   最終更新のRSS