lkml.org 
[lkml]   [2020]   [Sep]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v2] mm/mempool: Add 'else' to split mutually exclusive case
On Thu, 24 Sep 2020 07:16:41 -0400 Miaohe Lin <linmiaohe@huawei.com> wrote:

> Add else to split mutually exclusive case and avoid some unnecessary check.
> It doesn't seem to change code generation (compiler is smart), but I think
> it helps readability.
>
> ...
>
> --- a/mm/mempool.c
> +++ b/mm/mempool.c
> @@ -58,11 +58,10 @@ static void __check_element(mempool_t *pool, void *element, size_t size)
> static void check_element(mempool_t *pool, void *element)
> {
> /* Mempools backed by slab allocator */
> - if (pool->free == mempool_free_slab || pool->free == mempool_kfree)
> + if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
> __check_element(pool, element, ksize(element));
> -
> /* Mempools backed by page allocator */
> - if (pool->free == mempool_free_pages) {
> + } else if (pool->free == mempool_free_pages) {
> int order = (int)(long)pool->pool_data;
> void *addr = kmap_atomic((struct page *)element);
>
> @@ -82,11 +81,10 @@ static void __poison_element(void *element, size_t size)
> static void poison_element(mempool_t *pool, void *element)
> {
> /* Mempools backed by slab allocator */
> - if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
> + if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
> __poison_element(element, ksize(element));
> -
> /* Mempools backed by page allocator */
> - if (pool->alloc == mempool_alloc_pages) {
> + } else if (pool->alloc == mempool_alloc_pages) {
> int order = (int)(long)pool->pool_data;
> void *addr = kmap_atomic((struct page *)element);
>

OK, I guess. But the comments are now in the wrong place.

--- a/mm/mempool.c~mm-mempool-add-else-to-split-mutually-exclusive-case-fix
+++ a/mm/mempool.c
@@ -60,8 +60,8 @@ static void check_element(mempool_t *poo
/* Mempools backed by slab allocator */
if (pool->free == mempool_free_slab || pool->free == mempool_kfree) {
__check_element(pool, element, ksize(element));
- /* Mempools backed by page allocator */
} else if (pool->free == mempool_free_pages) {
+ /* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);

@@ -83,8 +83,8 @@ static void poison_element(mempool_t *po
/* Mempools backed by slab allocator */
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) {
__poison_element(element, ksize(element));
- /* Mempools backed by page allocator */
} else if (pool->alloc == mempool_alloc_pages) {
+ /* Mempools backed by page allocator */
int order = (int)(long)pool->pool_data;
void *addr = kmap_atomic((struct page *)element);

_
\
 
 \ /
  Last update: 2020-09-25 04:41    [W:0.040 / U:0.620 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site