Messages in this thread |  | | From | Nick Desaulniers <> | Date | Mon, 1 Jun 2020 12:47:25 -0700 | Subject | Re: [PATCH v2 06/10] x86/percpu: Clean up percpu_add_return_op() |
| |
On Sat, May 30, 2020 at 3:11 PM Brian Gerst <brgerst@gmail.com> wrote: > > The core percpu macros already have a switch on the data size, so the switch > in the x86 code is redundant and produces more dead code. > > Also use appropriate types for the width of the instructions. This avoids > errors when compiling with Clang. > > Signed-off-by: Brian Gerst <brgerst@gmail.com>
I think it would have been ok to carry forward my reviewed by tag here from v1, hidden at the bottom of https://lore.kernel.org/lkml/CAKwvOdn7yC1GVA+6gtNewBSq2BK09y9iNWhv1dPFF5i4kT1+6A@mail.gmail.com/ even though you split removing 'e' into it's own patch.
Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
> --- > arch/x86/include/asm/percpu.h | 51 +++++++++++------------------------ > 1 file changed, 16 insertions(+), 35 deletions(-) > > diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h > index 9bb5440d98d3..0776a11e7e11 100644 > --- a/arch/x86/include/asm/percpu.h > +++ b/arch/x86/include/asm/percpu.h > @@ -199,34 +199,15 @@ do { \ > /* > * Add return operation > */ > -#define percpu_add_return_op(qual, var, val) \ > +#define percpu_add_return_op(size, qual, _var, _val) \ > ({ \ > - typeof(var) paro_ret__ = val; \ > - switch (sizeof(var)) { \ > - case 1: \ > - asm qual ("xaddb %0, "__percpu_arg(1) \ > - : "+q" (paro_ret__), "+m" (var) \ > - : : "memory"); \ > - break; \ > - case 2: \ > - asm qual ("xaddw %0, "__percpu_arg(1) \ > - : "+r" (paro_ret__), "+m" (var) \ > - : : "memory"); \ > - break; \ > - case 4: \ > - asm qual ("xaddl %0, "__percpu_arg(1) \ > - : "+r" (paro_ret__), "+m" (var) \ > - : : "memory"); \ > - break; \ > - case 8: \ > - asm qual ("xaddq %0, "__percpu_arg(1) \ > - : "+r" (paro_ret__), "+m" (var) \ > - : : "memory"); \ > - break; \ > - default: __bad_percpu_size(); \ > - } \ > - paro_ret__ += val; \ > - paro_ret__; \ > + __pcpu_type_##size paro_tmp__ = __pcpu_cast_##size(_val); \ > + asm qual (__pcpu_op2_##size("xadd", "%[tmp]", \ > + __percpu_arg([var])) \ > + : [tmp] __pcpu_reg_##size("+", paro_tmp__), \ > + [var] "+m" (_var) \ > + : : "memory"); \ > + (typeof(_var))(unsigned long) (paro_tmp__ + _val); \ > }) > > /* > @@ -377,16 +358,16 @@ do { \ > #define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(volatile, pcp, nval) > #define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(volatile, pcp, nval) > > -#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(, pcp, val) > -#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(, pcp, val) > -#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(, pcp, val) > +#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(1, , pcp, val) > +#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(2, , pcp, val) > +#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(4, , pcp, val) > #define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) > #define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) > #define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) > > -#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(volatile, pcp, val) > -#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(volatile, pcp, val) > -#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(volatile, pcp, val) > +#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(1, volatile, pcp, val) > +#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(2, volatile, pcp, val) > +#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(4, volatile, pcp, val) > #define this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) > #define this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) > #define this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) > @@ -418,7 +399,7 @@ do { \ > #define raw_cpu_add_8(pcp, val) percpu_add_op(8, , (pcp), val) > #define raw_cpu_and_8(pcp, val) percpu_to_op(8, , "and", (pcp), val) > #define raw_cpu_or_8(pcp, val) percpu_to_op(8, , "or", (pcp), val) > -#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(, pcp, val) > +#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(8, , pcp, val) > #define raw_cpu_xchg_8(pcp, nval) raw_percpu_xchg_op(pcp, nval) > #define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(, pcp, oval, nval) > > @@ -427,7 +408,7 @@ do { \ > #define this_cpu_add_8(pcp, val) percpu_add_op(8, volatile, (pcp), val) > #define this_cpu_and_8(pcp, val) percpu_to_op(8, volatile, "and", (pcp), val) > #define this_cpu_or_8(pcp, val) percpu_to_op(8, volatile, "or", (pcp), val) > -#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(volatile, pcp, val) > +#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(8, volatile, pcp, val) > #define this_cpu_xchg_8(pcp, nval) percpu_xchg_op(volatile, pcp, nval) > #define this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(volatile, pcp, oval, nval) > > -- > 2.25.4 >
-- Thanks, ~Nick Desaulniers
|  |