On Thu 17-12-20 13:52:36, Pavel Tatashin wrote: [..]
diff --git a/mm/vmscan.c b/mm/vmscan.c index 469016222cdb..d9546f5897f4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3234,11 +3234,12 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist, unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) {
- gfp_t current_gfp_mask = current_gfp_context(gfp_mask); unsigned long nr_reclaimed; struct scan_control sc = { .nr_to_reclaim = SWAP_CLUSTER_MAX,
.gfp_mask = current_gfp_context(gfp_mask),
.reclaim_idx = gfp_zone(gfp_mask),
.gfp_mask = current_gfp_mask,
.order = order, .nodemask = nodemask, .priority = DEF_PRIORITY,.reclaim_idx = gfp_zone(current_gfp_mask),
@@ -4158,17 +4159,18 @@ static int __node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned in { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order;
- gfp_t current_gfp_mask = current_gfp_context(gfp_mask); struct task_struct *p = current; unsigned int noreclaim_flag; struct scan_control sc = { .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX),
.gfp_mask = current_gfp_context(gfp_mask),
.order = order, .priority = NODE_RECLAIM_PRIORITY, .may_writepage = !!(node_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(node_reclaim_mode & RECLAIM_UNMAP), .may_swap = 1,.gfp_mask = current_gfp_mask,
.reclaim_idx = gfp_zone(gfp_mask),
};.reclaim_idx = gfp_zone(current_gfp_mask),
trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order,
I was hoping we had agreed these are not necessary and they shouldn't be touched in the patch.