*参照元 [#mfe86fff] #backlinks *説明 [#wc78abef] -パス: [[linux-4.4.1/mm/page_alloc.c]] -FIXME: これは何? --説明 **引数 [#v6e491d4] -struct zone *preferred_zone -- --[[linux-4.4.1/zone]] -struct zone *zone -- --[[linux-4.4.1/zone]] -unsigned int order -- -gfp_t gfp_flags -- --[[linux-4.4.1/gfp_t]] -int alloc_flags -- -int migratetype -- **返り値 [#r0956db3] -struct page * -- --[[linux-4.4.1/page]] **参考 [#k381a8c9] *実装 [#h66b6139] /* * Allocate a page from the given zone. Use pcplists for order-0 allocations. */ static inline struct page *buffered_rmqueue(struct zone *preferred_zone, struct zone *zone, unsigned int order, gfp_t gfp_flags, int alloc_flags, int migratetype) { unsigned long flags; struct page *page; bool cold = ((gfp_flags & __GFP_COLD) != 0); - --[[linux-4.4.1/__GFP_COLD]] if (likely(order == 0)) { struct per_cpu_pages *pcp; struct list_head *list; - --[[linux-4.4.1/likely()]] --[[linux-4.4.1/per_cpu_pages]] --[[linux-4.4.1/list_head]] local_irq_save(flags); pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; if (list_empty(list)) { pcp->count += rmqueue_bulk(zone, 0, pcp->batch, list, migratetype, cold); if (unlikely(list_empty(list))) goto failed; } - --[[linux-4.4.1/local_irq_save()]] --[[linux-4.4.1/this_cpu_ptr()]] --[[linux-4.4.1/rmqueue_bulk()]] --[[linux-4.4.1/unlikely()]] --[[linux-4.4.1/list_empty()]] if (cold) page = list_entry(list->prev, struct page, lru); else page = list_entry(list->next, struct page, lru); list_del(&page->lru); pcp->count--; -cold ならリストの終端から、そうでなければリストの先頭からページを取得する。 --[[linux-4.4.1/list_entry()]] --[[linux-4.4.1/list_del()]] } else { if (unlikely(gfp_flags & __GFP_NOFAIL)) { /* * __GFP_NOFAIL is not to be used in new code. * * All __GFP_NOFAIL callers should be fixed so that they * properly detect and handle allocation failures. * * We most definitely don't want callers attempting to * allocate greater than order-1 page units with * __GFP_NOFAIL. */ WARN_ON_ONCE(order > 1); } spin_lock_irqsave(&zone->lock, flags); - --[[linux-4.4.1/__GFP_NOFAIL]] --[[linux-4.4.1/WARN_ON_ONCE()]] --[[linux-4.4.1/spin_lock_irqsave()]] page = NULL; if (alloc_flags & ALLOC_HARDER) { page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); } - --[[linux-4.4.1/__rmqueue_smallest()]] --[[linux-4.4.1/trace_mm_page_alloc_zone_locked()]] --[[linux-4.4.1/MIGRATE_HIGHATOMIC]] if (!page) page = __rmqueue(zone, order, migratetype, gfp_flags); spin_unlock(&zone->lock); - --[[linux-4.4.1/__rmqueue()]] --[[linux-4.4.1/spin_unlock()]] if (!page) goto failed; __mod_zone_freepage_state(zone, -(1 << order), get_pcppage_migratetype(page)); - --[[linux-4.4.1/__mod_zone_freepage_state()]] --[[linux-4.4.1/get_pcppage_migratetype()]] } __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) set_bit(ZONE_FAIR_DEPLETED, &zone->flags); - --[[linux-4.4.1/__mot_zone_page_state()]] --[[linux-4.4.1/atomic_long_read()]] --[[linux-4.4.1/test_bit()]] --[[linux-4.4.1/set_bit()]] __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); local_irq_restore(flags); - --[[linux-4.4.1/__count_zone_vm_events()]] --[[linux-4.4.1/zone_statistics()]] --[[linux-4.4.1/local_irq_restore()]] VM_BUG_ON_PAGE(bad_range(zone, page), page); return page; - --[[linux-4.4.1/VM_BUG_ON_PAGE()]] --[[linux-4.4.1/bad_range()]] failed: local_irq_restore(flags); return NULL; } *コメント [#r91da1a3]