diff options
author | Jakub Kicinski <kuba@kernel.org> | 2021-12-16 16:13:19 -0800 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2021-12-16 16:13:19 -0800 |
commit | 7cd2802d7496c1fc76f42dc045b48cc16d11df39 (patch) | |
tree | 41c33f5944bbc686a489801762eb126e07049b81 /mm | |
parent | 0f473bb6ed2d0b8533a079ee133f625f83de5315 (diff) | |
parent | 6441998e2e37131b0a4c310af9156d79d3351c16 (diff) | |
download | linux-7cd2802d7496c1fc76f42dc045b48cc16d11df39.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts.
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/backing-dev.c | 7 | ||||
-rw-r--r-- | mm/damon/core.c | 20 | ||||
-rw-r--r-- | mm/damon/dbgfs.c | 4 | ||||
-rw-r--r-- | mm/damon/vaddr-test.h | 79 | ||||
-rw-r--r-- | mm/damon/vaddr.c | 1 | ||||
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 106 | ||||
-rw-r--r-- | mm/slub.c | 15 |
10 files changed, 116 insertions, 122 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index 28edafc820ad..356f4f2c779e 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -428,7 +428,7 @@ config THP_SWAP # UP and nommu archs use km based percpu allocator # config NEED_PER_CPU_KM - depends on !SMP + depends on !SMP || !MMU bool default y diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1eead4761011..eae96dfe0261 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -945,6 +945,13 @@ void bdi_unregister(struct backing_dev_info *bdi) wb_shutdown(&bdi->wb); cgwb_bdi_unregister(bdi); + /* + * If this BDI's min ratio has been set, use bdi_set_min_ratio() to + * update the global bdi_min_ratio. + */ + if (bdi->min_ratio) + bdi_set_min_ratio(bdi, 0); + if (bdi->dev) { bdi_debug_unregister(bdi); device_unregister(bdi->dev); diff --git a/mm/damon/core.c b/mm/damon/core.c index c381b3c525d0..e92497895202 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -282,7 +282,6 @@ int damon_set_targets(struct damon_ctx *ctx, for (i = 0; i < nr_ids; i++) { t = damon_new_target(ids[i]); if (!t) { - pr_err("Failed to alloc damon_target\n"); /* The caller should do cleanup of the ids itself */ damon_for_each_target_safe(t, next, ctx) damon_destroy_target(t); @@ -312,16 +311,10 @@ int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, unsigned long aggr_int, unsigned long primitive_upd_int, unsigned long min_nr_reg, unsigned long max_nr_reg) { - if (min_nr_reg < 3) { - pr_err("min_nr_regions (%lu) must be at least 3\n", - min_nr_reg); + if (min_nr_reg < 3) return -EINVAL; - } - if (min_nr_reg > max_nr_reg) { - pr_err("invalid nr_regions. min (%lu) > max (%lu)\n", - min_nr_reg, max_nr_reg); + if (min_nr_reg > max_nr_reg) return -EINVAL; - } ctx->sample_interval = sample_int; ctx->aggr_interval = aggr_int; @@ -980,10 +973,11 @@ static unsigned long damos_wmark_wait_us(struct damos *scheme) static void kdamond_usleep(unsigned long usecs) { - if (usecs > 100 * 1000) - schedule_timeout_interruptible(usecs_to_jiffies(usecs)); + /* See Documentation/timers/timers-howto.rst for the thresholds */ + if (usecs > 20 * USEC_PER_MSEC) + schedule_timeout_idle(usecs_to_jiffies(usecs)); else - usleep_range(usecs, usecs + 1); + usleep_idle_range(usecs, usecs + 1); } /* Returns negative error code if it's not activated but should return */ @@ -1038,7 +1032,7 @@ static int kdamond_fn(void *data) ctx->callback.after_sampling(ctx)) done = true; - usleep_range(ctx->sample_interval, ctx->sample_interval + 1); + kdamond_usleep(ctx->sample_interval); if (ctx->primitive.check_accesses) max_nr_accesses = ctx->primitive.check_accesses(ctx); diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index 9b520bb4a3e7..1efac0022e9a 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -210,10 +210,8 @@ static struct damos **str_to_schemes(const char *str, ssize_t len, &wmarks.low, &parsed); if (ret != 18) break; - if (!damos_action_valid(action)) { - pr_err("wrong action %d\n", action); + if (!damos_action_valid(action)) goto fail; - } pos += parsed; scheme = damon_new_scheme(min_sz, max_sz, min_nr_a, max_nr_a, diff --git a/mm/damon/vaddr-test.h b/mm/damon/vaddr-test.h index ecfd0b2ed222..6a1b9272ea12 100644 --- a/mm/damon/vaddr-test.h +++ b/mm/damon/vaddr-test.h @@ -135,7 +135,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, struct damon_addr_range *three_regions, unsigned long *expected, int nr_expected) { - struct damon_ctx *ctx = damon_new_ctx(); struct damon_target *t; struct damon_region *r; int i; @@ -145,7 +144,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, r = damon_new_region(regions[i * 2], regions[i * 2 + 1]); damon_add_region(r, t); } - damon_add_target(ctx, t); damon_va_apply_three_regions(t, three_regions); @@ -154,8 +152,6 @@ static void damon_do_test_apply_three_regions(struct kunit *test, KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); } - - damon_destroy_ctx(ctx); } /* @@ -252,60 +248,59 @@ static void damon_test_apply_three_regions4(struct kunit *test) new_three_regions, expected, ARRAY_SIZE(expected)); } -static void damon_test_split_evenly(struct kunit *test) +static void damon_test_split_evenly_fail(struct kunit *test, + unsigned long start, unsigned long end, unsigned int nr_pieces) { - struct damon_ctx *c = damon_new_ctx(); - struct damon_target *t; - struct damon_region *r; - unsigned long i; - - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), - -EINVAL); - - t = damon_new_target(42); - r = damon_new_region(0, 100); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 0), -EINVAL); + struct damon_target *t = damon_new_target(42); + struct damon_region *r = damon_new_region(start, end); damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 10), 0); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 10u); + KUNIT_EXPECT_EQ(test, + damon_va_evenly_split_region(t, r, nr_pieces), -EINVAL); + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); - i = 0; damon_for_each_region(r, t) { - KUNIT_EXPECT_EQ(test, r->ar.start, i++ * 10); - KUNIT_EXPECT_EQ(test, r->ar.end, i * 10); + KUNIT_EXPECT_EQ(test, r->ar.start, start); + KUNIT_EXPECT_EQ(test, r->ar.end, end); } + damon_free_target(t); +} + +static void damon_test_split_evenly_succ(struct kunit *test, + unsigned long start, unsigned long end, unsigned int nr_pieces) +{ + struct damon_target *t = damon_new_target(42); + struct damon_region *r = damon_new_region(start, end); + unsigned long expected_width = (end - start) / nr_pieces; + unsigned long i = 0; - t = damon_new_target(42); - r = damon_new_region(5, 59); damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 5), 0); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 5u); + KUNIT_EXPECT_EQ(test, + damon_va_evenly_split_region(t, r, nr_pieces), 0); + KUNIT_EXPECT_EQ(test, damon_nr_regions(t), nr_pieces); - i = 0; damon_for_each_region(r, t) { - if (i == 4) + if (i == nr_pieces - 1) break; - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i++); - KUNIT_EXPECT_EQ(test, r->ar.end, 5 + 10 * i); + KUNIT_EXPECT_EQ(test, + r->ar.start, start + i++ * expected_width); + KUNIT_EXPECT_EQ(test, r->ar.end, start + i * expected_width); } - KUNIT_EXPECT_EQ(test, r->ar.start, 5 + 10 * i); - KUNIT_EXPECT_EQ(test, r->ar.end, 59ul); + KUNIT_EXPECT_EQ(test, r->ar.start, start + i * expected_width); + KUNIT_EXPECT_EQ(test, r->ar.end, end); damon_free_target(t); +} - t = damon_new_target(42); - r = damon_new_region(5, 6); - damon_add_region(r, t); - KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(t, r, 2), -EINVAL); - KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1u); +static void damon_test_split_evenly(struct kunit *test) +{ + KUNIT_EXPECT_EQ(test, damon_va_evenly_split_region(NULL, NULL, 5), + -EINVAL); - damon_for_each_region(r, t) { - KUNIT_EXPECT_EQ(test, r->ar.start, 5ul); - KUNIT_EXPECT_EQ(test, r->ar.end, 6ul); - } - damon_free_target(t); - damon_destroy_ctx(c); + damon_test_split_evenly_fail(test, 0, 100, 0); + damon_test_split_evenly_succ(test, 0, 100, 10); + damon_test_split_evenly_succ(test, 5, 59, 5); + damon_test_split_evenly_fail(test, 5, 6, 2); } static struct kunit_case damon_test_cases[] = { diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 47f47f60440e..20a9a9d69eb1 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -627,7 +627,6 @@ int damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, case DAMOS_STAT: return 0; default: - pr_warn("Wrong action %d\n", scheme->action); return -EINVAL; } diff --git a/mm/filemap.c b/mm/filemap.c index daa0e23a6ee6..39c4c46c6133 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3253,8 +3253,6 @@ static struct page *next_uptodate_page(struct page *page, goto skip; if (!PageUptodate(page) || PageReadahead(page)) goto skip; - if (PageHWPoison(page)) - goto skip; if (!trylock_page(page)) goto skip; if (page->mapping != mapping) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index abcd1785c629..a1baa198519a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2973,7 +2973,7 @@ int __alloc_bootmem_huge_page(struct hstate *h, int nid) struct huge_bootmem_page *m = NULL; /* initialize for clang */ int nr_nodes, node; - if (nid >= nr_online_nodes) + if (nid != NUMA_NO_NODE && nid >= nr_online_nodes) return 0; /* do node specific alloc */ if (nid != NUMA_NO_NODE) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6863a834ed42..2ed5f2a0879d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -776,24 +776,6 @@ void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val) rcu_read_unlock(); } -/* - * mod_objcg_mlstate() may be called with irq enabled, so - * mod_memcg_lruvec_state() should be used. - */ -static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, - struct pglist_data *pgdat, - enum node_stat_item idx, int nr) -{ - struct mem_cgroup *memcg; - struct lruvec *lruvec; - - rcu_read_lock(); - memcg = obj_cgroup_memcg(objcg); - lruvec = mem_cgroup_lruvec(memcg, pgdat); - mod_memcg_lruvec_state(lruvec, idx, nr); - rcu_read_unlock(); -} - /** * __count_memcg_events - account VM events in a cgroup * @memcg: the memory cgroup @@ -2137,41 +2119,6 @@ static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, } #endif -/* - * Most kmem_cache_alloc() calls are from user context. The irq disable/enable - * sequence used in this case to access content from object stock is slow. - * To optimize for user context access, there are now two object stocks for - * task context and interrupt context access respectively. - * - * The task context object stock can be accessed by disabling preemption only - * which is cheap in non-preempt kernel. The interrupt context object stock - * can only be accessed after disabling interrupt. User context code can - * access interrupt object stock, but not vice versa. - */ -static inline struct obj_stock *get_obj_stock(unsigned long *pflags) -{ - struct memcg_stock_pcp *stock; - - if (likely(in_task())) { - *pflags = 0UL; - preempt_disable(); - stock = this_cpu_ptr(&memcg_stock); - return &stock->task_obj; - } - - local_irq_save(*pflags); - stock = this_cpu_ptr(&memcg_stock); - return &stock->irq_obj; -} - -static inline void put_obj_stock(unsigned long flags) -{ - if (likely(in_task())) - preempt_enable(); - else - local_irq_restore(flags); -} - /** * consume_stock: Try to consume stocked charge on this cpu. * @memcg: memcg to consume from. @@ -2816,6 +2763,59 @@ retry: */ #define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | __GFP_ACCOUNT) +/* + * Most kmem_cache_alloc() calls are from user context. The irq disable/enable + * sequence used in this case to access content from object stock is slow. + * To optimize for user context access, there are now two object stocks for + * task context and interrupt context access respectively. + * + * The task context object stock can be accessed by disabling preemption only + * which is cheap in non-preempt kernel. The interrupt context object stock + * can only be accessed after disabling interrupt. User context code can + * access interrupt object stock, but not vice versa. + */ +static inline struct obj_stock *get_obj_stock(unsigned long *pflags) +{ + struct memcg_stock_pcp *stock; + + if (likely(in_task())) { + *pflags = 0UL; + preempt_disable(); + stock = this_cpu_ptr(&memcg_stock); + return &stock->task_obj; + } + + local_irq_save(*pflags); + stock = this_cpu_ptr(&memcg_stock); + return &stock->irq_obj; +} + +static inline void put_obj_stock(unsigned long flags) +{ + if (likely(in_task())) + preempt_enable(); + else + local_irq_restore(flags); +} + +/* + * mod_objcg_mlstate() may be called with irq enabled, so + * mod_memcg_lruvec_state() should be used. + */ +static inline void mod_objcg_mlstate(struct obj_cgroup *objcg, + struct pglist_data *pgdat, + enum node_stat_item idx, int nr) +{ + struct mem_cgroup *memcg; + struct lruvec *lruvec; + + rcu_read_lock(); + memcg = obj_cgroup_memcg(objcg); + lruvec = mem_cgroup_lruvec(memcg, pgdat); + mod_memcg_lruvec_state(lruvec, idx, nr); + rcu_read_unlock(); +} + int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s, gfp_t gfp, bool new_page) { diff --git a/mm/slub.c b/mm/slub.c index a8626825a829..abe7db581d68 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5081,6 +5081,7 @@ struct loc_track { unsigned long max; unsigned long count; struct location *loc; + loff_t idx; }; static struct dentry *slab_debugfs_root; @@ -6052,11 +6053,11 @@ __initcall(slab_sysfs_init); #if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS) static int slab_debugfs_show(struct seq_file *seq, void *v) { - - struct location *l; - unsigned int idx = *(unsigned int *)v; struct loc_track *t = seq->private; + struct location *l; + unsigned long idx; + idx = (unsigned long) t->idx; if (idx < t->count) { l = &t->loc[idx]; @@ -6105,16 +6106,18 @@ static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos) { struct loc_track *t = seq->private; - v = ppos; - ++*ppos; + t->idx = ++(*ppos); if (*ppos <= t->count) - return v; + return ppos; return NULL; } static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos) { + struct loc_track *t = seq->private; + + t->idx = *ppos; return ppos; } |