lkml.org 
[lkml]   [2022]   [Jun]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    From
    SubjectRe: [PATCH v7 09/12] mm/demotion: Demote pages according to allocation fallback order
    Date

    "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:

    > From: Jagdish Gediya <jvgediya@linux.ibm.com>

    [...]

    > -static struct page *alloc_demote_page(struct page *page, unsigned long node)
    > +static struct page *alloc_demote_page(struct page *page, unsigned long private)
    > {
    > - struct migration_target_control mtc = {
    > - /*
    > - * Allocate from 'node', or fail quickly and quietly.
    > - * When this happens, 'page' will likely just be discarded
    > - * instead of migrated.
    > - */
    > - .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) |
    > - __GFP_THISNODE | __GFP_NOWARN |
    > - __GFP_NOMEMALLOC | GFP_NOWAIT,
    > - .nid = node
    > - };
    > + struct page *target_page;
    > + nodemask_t *allowed_mask;
    > + struct migration_target_control *mtc;
    > +
    > + mtc = (struct migration_target_control *)private;
    > +
    > + allowed_mask = mtc->nmask;
    > + /*
    > + * make sure we allocate from the target node first also trying to
    > + * reclaim pages from the target node via kswapd if we are low on
    > + * free memory on target node. If we don't do this and if we have low
    > + * free memory on the target memtier, we would start allocating pages
    > + * from higher memory tiers without even forcing a demotion of cold
    > + * pages from the target memtier. This can result in the kernel placing
    > + * hotpages in higher memory tiers.
    > + */
    > + mtc->nmask = NULL;
    > + mtc->gfp_mask |= __GFP_THISNODE;
    > + target_page = alloc_migration_target(page, (unsigned long)&mtc);

    I finally managed to get a system setup to start testing some of this
    out. However it quickly crashed due to the bad pointer in the above call
    - you need mtc not &mtc here.

    > + if (target_page)
    > + return target_page;
    > +
    > + mtc->gfp_mask &= ~__GFP_THISNODE;
    > + mtc->nmask = allowed_mask;
    >
    > return alloc_migration_target(page, (unsigned long)&mtc);

    And here.

    > }
    > @@ -1487,6 +1500,19 @@ static unsigned int demote_page_list(struct list_head *demote_pages,
    > {
    > int target_nid = next_demotion_node(pgdat->node_id);
    > unsigned int nr_succeeded;
    > + nodemask_t allowed_mask;
    > +
    > + struct migration_target_control mtc = {
    > + /*
    > + * Allocate from 'node', or fail quickly and quietly.
    > + * When this happens, 'page' will likely just be discarded
    > + * instead of migrated.
    > + */
    > + .gfp_mask = (GFP_HIGHUSER_MOVABLE & ~__GFP_RECLAIM) | __GFP_NOWARN |
    > + __GFP_NOMEMALLOC | GFP_NOWAIT,
    > + .nid = target_nid,
    > + .nmask = &allowed_mask
    > + };
    >
    > if (list_empty(demote_pages))
    > return 0;
    > @@ -1494,10 +1520,12 @@ static unsigned int demote_page_list(struct list_head *demote_pages,
    > if (target_nid == NUMA_NO_NODE)
    > return 0;
    >
    > + node_get_allowed_targets(pgdat, &allowed_mask);
    > +
    > /* Demotion ignores all cpuset and mempolicy settings */
    > migrate_pages(demote_pages, alloc_demote_page, NULL,
    > - target_nid, MIGRATE_ASYNC, MR_DEMOTION,
    > - &nr_succeeded);
    > + (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
    > + &nr_succeeded);
    >
    > if (current_is_kswapd())
    > __count_vm_events(PGDEMOTE_KSWAPD, nr_succeeded);

    \
     
     \ /
      Last update: 2022-06-23 05:02    [W:5.997 / U:0.580 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site