Hi Sasha,
On Wed, Jun 26, 2024 at 03:07:08PM -0400, Sasha Levin wrote:
This is a note to let you know that I've just added the patch titled
mm: memblock: replace dereferences of memblock_region.nid with API calls
to the 5.4-stable tree which can be found at: http://www.kernel.org/git/?p=linux/kernel/git/stable/stable-queue.git%3Ba=su...
The filename of the patch is: mm-memblock-replace-dereferences-of-memblock_region..patch and it can be found in the queue-5.4 subdirectory.
If you, or anyone else, feels it should not be added to the stable tree, please let stable@vger.kernel.org know about it.
commit dd8d9169375a725cadd5e3635342a6e2d483cf4c Author: Mike Rapoport rppt@kernel.org Date: Wed Jun 3 15:56:53 2020 -0700
mm: memblock: replace dereferences of memblock_region.nid with API calls
Stable-dep-of: 3ac36aa73073 ("x86/mm/numa: Use NUMA_NO_NODE when calling memblock_set_node()")
The commit 3ac36aa73073 shouldn't be backported to 5.4 or anything before 6.8 for that matter, I don't see a need to bring this in as well.
Signed-off-by: Sasha Levin <sashal@kernel.org>
diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 53ebb4babf3a7..58c83c2b8748f 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -354,13 +354,16 @@ static int __init numa_register_nodes(void) struct memblock_region *mblk; /* Check that valid nid is set to memblks */
- for_each_memblock(memory, mblk)
if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) {
- for_each_memblock(memory, mblk) {
int mblk_nid = memblock_get_region_node(mblk);
if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) { pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
mblk->nid, mblk->base,
}mblk_nid, mblk->base, mblk->base + mblk->size - 1); return -EINVAL;
- }
/* Finally register nodes. */ for_each_node_mask(nid, numa_nodes_parsed) { diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 7316dca7e846a..bd52ce954d59a 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -502,8 +502,10 @@ static void __init numa_clear_kernel_node_hotplug(void) * reserve specific pages for Sandy Bridge graphics. ] */ for_each_memblock(reserved, mb_region) {
if (mb_region->nid != MAX_NUMNODES)
node_set(mb_region->nid, reserved_nodemask);
int nid = memblock_get_region_node(mb_region);
if (nid != MAX_NUMNODES)
}node_set(nid, reserved_nodemask);
/* diff --git a/mm/memblock.c b/mm/memblock.c index a75cc65f03307..d2d85d4d16b74 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1170,13 +1170,15 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid, { struct memblock_type *type = &memblock.memory; struct memblock_region *r;
- int r_nid;
while (++*idx < type->cnt) { r = &type->regions[*idx];
r_nid = memblock_get_region_node(r);
if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size)) continue;
if (nid == MAX_NUMNODES || nid == r->nid)
} if (*idx >= type->cnt) {if (nid == MAX_NUMNODES || nid == r_nid) break;
@@ -1189,7 +1191,7 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid, if (out_end_pfn) *out_end_pfn = PFN_DOWN(r->base + r->size); if (out_nid)
*out_nid = r->nid;
*out_nid = r_nid;
} /** @@ -1730,7 +1732,7 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn, *start_pfn = PFN_DOWN(type->regions[mid].base); *end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
- return type->regions[mid].nid;
- return memblock_get_region_node(&type->regions[mid]);
} #endif diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 0ad582945f54d..4a649111178cc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7214,7 +7214,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) if (!memblock_is_hotpluggable(r)) continue;
nid = r->nid;
nid = memblock_get_region_node(r);
usable_startpfn = PFN_DOWN(r->base); zone_movable_pfn[nid] = zone_movable_pfn[nid] ? @@ -7235,7 +7235,7 @@ static void __init find_zone_movable_pfns_for_nodes(void) if (memblock_is_mirror(r)) continue;
nid = r->nid;
nid = memblock_get_region_node(r);
usable_startpfn = memblock_region_memory_base_pfn(r);