diff options
author | Ingo Molnar <mingo@kernel.org> | 2019-12-30 08:10:51 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2019-12-30 08:10:51 +0100 |
commit | 28336be568bb473d16ba80db0801276fb4f1bbe5 (patch) | |
tree | cf2d7a56e6c3ea08139d8d9a5a58b296bd172136 /mm/memory_hotplug.c | |
parent | 5cbaefe9743bf14c9d3106db0cc19f8cb0a3ca22 (diff) | |
parent | fd6988496e79a6a4bdb514a4655d2920209eb85d (diff) | |
download | linux-28336be568bb473d16ba80db0801276fb4f1bbe5.tar.gz |
Merge tag 'v5.5-rc4' into locking/kcsan, to resolve conflicts
Conflicts:
init/main.c
lib/Kconfig.debug
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r-- | mm/memory_hotplug.c | 102 |
1 files changed, 69 insertions, 33 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3b62a9ff8ea0..55ac23ef11c1 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -49,8 +49,6 @@ * and restore_online_page_callback() for generic callback restore. */ -static void generic_online_page(struct page *page, unsigned int order); - static online_page_callback_t online_page_callback = generic_online_page; static DEFINE_MUTEX(online_page_callback_lock); @@ -278,6 +276,22 @@ static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, return 0; } +static int check_hotplug_memory_addressable(unsigned long pfn, + unsigned long nr_pages) +{ + const u64 max_addr = PFN_PHYS(pfn + nr_pages) - 1; + + if (max_addr >> MAX_PHYSMEM_BITS) { + const u64 max_allowed = (1ull << (MAX_PHYSMEM_BITS + 1)) - 1; + WARN(1, + "Hotplugged memory exceeds maximum addressable address, range=%#llx-%#llx, maximum=%#llx\n", + (u64)PFN_PHYS(pfn), max_addr, max_allowed); + return -E2BIG; + } + + return 0; +} + /* * Reasonably generic function for adding memory. It is * expected that archs that support memory hotplug will @@ -291,6 +305,10 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, unsigned long nr, start_sec, end_sec; struct vmem_altmap *altmap = restrictions->altmap; + err = check_hotplug_memory_addressable(pfn, nr_pages); + if (err) + return err; + if (altmap) { /* * Validate altmap is within bounds of the total request @@ -331,7 +349,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, unsigned long end_pfn) { for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { - if (unlikely(!pfn_valid(start_pfn))) + if (unlikely(!pfn_to_online_page(start_pfn))) continue; if (unlikely(pfn_to_nid(start_pfn) != nid)) @@ -356,7 +374,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, /* pfn is the end pfn of a memory section. */ pfn = end_pfn - 1; for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { - if (unlikely(!pfn_valid(pfn))) + if (unlikely(!pfn_to_online_page(pfn))) continue; if (unlikely(pfn_to_nid(pfn) != nid)) @@ -415,7 +433,7 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, */ pfn = zone_start_pfn; for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) { - if (unlikely(!pfn_valid(pfn))) + if (unlikely(!pfn_to_online_page(pfn))) continue; if (page_zone(pfn_to_page(pfn)) != zone) @@ -471,6 +489,16 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn, struct pglist_data *pgdat = zone->zone_pgdat; unsigned long flags; +#ifdef CONFIG_ZONE_DEVICE + /* + * Zone shrinking code cannot properly deal with ZONE_DEVICE. So + * we will not try to shrink the zones - which is okay as + * set_zone_contiguous() cannot deal with ZONE_DEVICE either way. + */ + if (zone_idx(zone) == ZONE_DEVICE) + return; +#endif + pgdat_resize_lock(zone->zone_pgdat, &flags); shrink_zone_span(zone, start_pfn, start_pfn + nr_pages); update_pgdat_span(pgdat); @@ -570,24 +598,7 @@ int restore_online_page_callback(online_page_callback_t callback) } EXPORT_SYMBOL_GPL(restore_online_page_callback); -void __online_page_set_limits(struct page *page) -{ -} -EXPORT_SYMBOL_GPL(__online_page_set_limits); - -void __online_page_increment_counters(struct page *page) -{ - adjust_managed_page_count(page, 1); -} -EXPORT_SYMBOL_GPL(__online_page_increment_counters); - -void __online_page_free(struct page *page) -{ - __free_reserved_page(page); -} -EXPORT_SYMBOL_GPL(__online_page_free); - -static void generic_online_page(struct page *page, unsigned int order) +void generic_online_page(struct page *page, unsigned int order) { kernel_map_pages(page, 1 << order, 1); __free_pages_core(page, order); @@ -597,6 +608,7 @@ static void generic_online_page(struct page *page, unsigned int order) totalhigh_pages_add(1UL << order); #endif } +EXPORT_SYMBOL_GPL(generic_online_page); static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, void *arg) @@ -1170,7 +1182,8 @@ static bool is_pageblock_removable_nolock(unsigned long pfn) if (!zone_spans_pfn(zone, pfn)) return false; - return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, SKIP_HWPOISON); + return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, + MEMORY_OFFLINE); } /* Checks if this range of memory is likely to be hot-removable. */ @@ -1367,9 +1380,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) return ret; } -/* - * remove from free_area[] and mark all as Reserved. - */ +/* Mark all sections offline and remove all free pages from the buddy. */ static int offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages, void *data) @@ -1387,7 +1398,8 @@ static int check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages, void *data) { - return test_pages_isolated(start_pfn, start_pfn + nr_pages, true); + return test_pages_isolated(start_pfn, start_pfn + nr_pages, + MEMORY_OFFLINE); } static int __init cmdline_parse_movable_node(char *p) @@ -1468,10 +1480,19 @@ static void node_states_clear_node(int node, struct memory_notify *arg) node_clear_state(node, N_MEMORY); } +static int count_system_ram_pages_cb(unsigned long start_pfn, + unsigned long nr_pages, void *data) +{ + unsigned long *nr_system_ram_pages = data; + + *nr_system_ram_pages += nr_pages; + return 0; +} + static int __ref __offline_pages(unsigned long start_pfn, unsigned long end_pfn) { - unsigned long pfn, nr_pages; + unsigned long pfn, nr_pages = 0; unsigned long offlined_pages = 0; int ret, node, nr_isolate_pageblock; unsigned long flags; @@ -1482,6 +1503,22 @@ static int __ref __offline_pages(unsigned long start_pfn, mem_hotplug_begin(); + /* + * Don't allow to offline memory blocks that contain holes. + * Consequently, memory blocks with holes can never get onlined + * via the hotplug path - online_pages() - as hotplugged memory has + * no holes. This way, we e.g., don't have to worry about marking + * memory holes PG_reserved, don't need pfn_valid() checks, and can + * avoid using walk_system_ram_range() later. + */ + walk_system_ram_range(start_pfn, end_pfn - start_pfn, &nr_pages, + count_system_ram_pages_cb); + if (nr_pages != end_pfn - start_pfn) { + ret = -EINVAL; + reason = "memory holes"; + goto failed_removal; + } + /* This makes hotplug much easier...and readable. we assume this for now. .*/ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, @@ -1493,12 +1530,11 @@ static int __ref __offline_pages(unsigned long start_pfn, zone = page_zone(pfn_to_page(valid_start)); node = zone_to_nid(zone); - nr_pages = end_pfn - start_pfn; /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE, - SKIP_HWPOISON | REPORT_FAILURE); + MEMORY_OFFLINE | REPORT_FAILURE); if (ret < 0) { reason = "failure to isolate range"; goto failed_removal; @@ -1740,13 +1776,13 @@ static int __ref try_remove_memory(int nid, u64 start, u64 size) /* remove memmap entry */ firmware_map_remove(start, start + size, "System RAM"); - memblock_free(start, size); - memblock_remove(start, size); /* remove memory block devices before removing memory */ remove_memory_block_devices(start, size); arch_remove_memory(nid, start, size, NULL); + memblock_free(start, size); + memblock_remove(start, size); __release_memory_resource(start, size); try_offline_node(nid); |