lkml.org 
[lkml]   [2017]   [Dec]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [PATCH v2 2/5] mm: Extends local cpu counter vm_diff_nodestat from s8 to s16
From
Date


On 2017年12月19日 20:38, Michal Hocko wrote:
> On Tue 19-12-17 14:39:23, Kemi Wang wrote:
>> The type s8 used for vm_diff_nodestat[] as local cpu counters has the
>> limitation of global counters update frequency, especially for those
>> monotone increasing type of counters like NUMA counters with more and more
>> cpus/nodes. This patch extends the type of vm_diff_nodestat from s8 to s16
>> without any functionality change.
>>
>> before after
>> sizeof(struct per_cpu_nodestat) 28 68
>
> So it is 40B * num_cpus * num_nodes. Nothing really catastrophic IMHO
> but the changelog is a bit silent about any numbers. This is a
> performance optimization so it should better give us some.
>

This patch does not have any functionality change. So no performance gain
I suppose.
I guess you are talking about performance gain from the third patch which
increases threshold size of NUMA counters.

>> Signed-off-by: Kemi Wang <kemi.wang@intel.com>
>> ---
>> include/linux/mmzone.h | 4 ++--
>> mm/vmstat.c | 16 ++++++++--------
>> 2 files changed, 10 insertions(+), 10 deletions(-)
>>
>> diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
>> index c06d880..2da6b6f 100644
>> --- a/include/linux/mmzone.h
>> +++ b/include/linux/mmzone.h
>> @@ -289,8 +289,8 @@ struct per_cpu_pageset {
>> };
>>
>> struct per_cpu_nodestat {
>> - s8 stat_threshold;
>> - s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
>> + s16 stat_threshold;
>> + s16 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
>> };
>>
>> #endif /* !__GENERATING_BOUNDS.H */
>> diff --git a/mm/vmstat.c b/mm/vmstat.c
>> index 1dd12ae..9c681cc 100644
>> --- a/mm/vmstat.c
>> +++ b/mm/vmstat.c
>> @@ -332,7 +332,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
>> long delta)
>> {
>> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
>> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
>> + s16 __percpu *p = pcp->vm_node_stat_diff + item;
>> long x;
>> long t;
>>
>> @@ -390,13 +390,13 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
>> void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
>> {
>> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
>> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
>> - s8 v, t;
>> + s16 __percpu *p = pcp->vm_node_stat_diff + item;
>> + s16 v, t;
>>
>> v = __this_cpu_inc_return(*p);
>> t = __this_cpu_read(pcp->stat_threshold);
>> if (unlikely(v > t)) {
>> - s8 overstep = t >> 1;
>> + s16 overstep = t >> 1;
>>
>> node_page_state_add(v + overstep, pgdat, item);
>> __this_cpu_write(*p, -overstep);
>> @@ -434,13 +434,13 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
>> void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
>> {
>> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
>> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
>> - s8 v, t;
>> + s16 __percpu *p = pcp->vm_node_stat_diff + item;
>> + s16 v, t;
>>
>> v = __this_cpu_dec_return(*p);
>> t = __this_cpu_read(pcp->stat_threshold);
>> if (unlikely(v < - t)) {
>> - s8 overstep = t >> 1;
>> + s16 overstep = t >> 1;
>>
>> node_page_state_add(v - overstep, pgdat, item);
>> __this_cpu_write(*p, overstep);
>> @@ -533,7 +533,7 @@ static inline void mod_node_state(struct pglist_data *pgdat,
>> enum node_stat_item item, int delta, int overstep_mode)
>> {
>> struct per_cpu_nodestat __percpu *pcp = pgdat->per_cpu_nodestats;
>> - s8 __percpu *p = pcp->vm_node_stat_diff + item;
>> + s16 __percpu *p = pcp->vm_node_stat_diff + item;
>> long o, n, t, z;
>>
>> do {
>> --
>> 2.7.4
>>
>

\
 
 \ /
  Last update: 2017-12-20 04:08    [W:0.370 / U:0.068 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site