*参照元 [#af300681] #backlinks *説明 [#wefc9caf] -パス: 複数あり --CONFIG_MMU 無効: [[linux-4.4.1/arch/arm/mm/nommu.c]] --CONFIG_MMU 有効: [[linux-4.4.1/arch/arm/mm/flush.c]] --[[linux-4.4.1/CONFIG_MMU]] -FIXME: これは何? --説明 **引数 [#ve3b618b] -struct page *page -- --[[linux-4.4.1/page]] **返り値 [#x38cfde4] -なし **参考 [#q8e3b023] *実装 [#e0d8a643] **CONFIG_MMU 無効: arch/arm/mm/nommu.c [#v90e9ef0] void flush_kernel_dcache_page(struct page *page) { __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); - --[[linux-4.4.1/__cpuc_flush_dcache_area()]] --[[linux-4.4.1/page_address()]] --[[linux-4.4.1/PAGE_SIZE]] } EXPORT_SYMBOL(flush_kernel_dcache_page); - --[[linux-4.4.1/EXPORT_SYMBOL()]] **CONFIG_MMU 有効: arch/arm/mm/flush.c [#a53a55ad] /* * Ensure cache coherency for the kernel mapping of this page. We can * assume that the page is pinned via kmap. * * If the page only exists in the page cache and there are no user * space mappings, this is a no-op since the page was already marked * dirty at creation. Otherwise, we need to flush the dirty kernel * cache lines directly. */ void flush_kernel_dcache_page(struct page *page) { if (cache_is_vivt() || cache_is_vipt_aliasing()) { struct address_space *mapping; - --[[linux-4.4.1/cache_is_vivt()]] --[[linux-4.4.1/cache_is_vipt_aliasing()]] --[[linux-4.4.1/address_space]] mapping = page_mapping(page); - --[[linux-4.4.1/page_mapping()]] if (!mapping || mapping_mapped(mapping)) { void *addr; addr = page_address(page); - --[[linux-4.4.1/mapping_mapped()]] --[[linux-4.4.1/page_address()]] /* * kmap_atomic() doesn't set the page virtual * address for highmem pages, and * kunmap_atomic() takes care of cache * flushing already. */ if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) __cpuc_flush_dcache_area(addr, PAGE_SIZE); - --[[linux-4.4.1/IS_ENABLED()]] --[[linux-4.4.1/CONFIG_HIGHMEM]] --[[linux-4.4.1/__cpuc_flush_dcache_area()]] } } } EXPORT_SYMBOL(flush_kernel_dcache_page); - --[[linux-4.4.1/EXPORT_SYMBOL()]] *コメント [#e7a8ccc1]