On Mon, Jan 13, 2020 at 4:55 PM Mike Kravetz mike.kravetz@oracle.com wrote:
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index dea6143aa0685..e6ab499ba2086 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -46,6 +46,15 @@ struct resv_map { long adds_in_progress; struct list_head region_cache; long region_cache_count; +#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
* here. If these fields are 0, then the mapping is shared.
Will *reservation_counter ALWAYS be non-NULL for private mappings?
*/
struct page_counter *reservation_counter;
unsigned long pages_per_hpage;
struct cgroup_subsys_state *css;
+#endif }; extern struct resv_map *resv_map_alloc(void); void resv_map_release(struct kref *ref); diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index eab8a70d5bcb5..8c320accefe87 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -25,6 +25,33 @@ struct hugetlb_cgroup; #define HUGETLB_CGROUP_MIN_ORDER 2
#ifdef CONFIG_CGROUP_HUGETLB +enum hugetlb_memory_event {
HUGETLB_MAX,
HUGETLB_NR_MEMORY_EVENTS,
+};
+struct hugetlb_cgroup {
struct cgroup_subsys_state css;
/*
* the counter to account for hugepages from hugetlb.
*/
struct page_counter hugepage[HUGE_MAX_HSTATE];
/*
* the counter to account for hugepage reservations from hugetlb.
*/
struct page_counter reserved_hugepage[HUGE_MAX_HSTATE];
atomic_long_t events[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
atomic_long_t events_local[HUGE_MAX_HSTATE][HUGETLB_NR_MEMORY_EVENTS];
/* Handle for "hugetlb.events" */
struct cgroup_file events_file[HUGE_MAX_HSTATE];
/* Handle for "hugetlb.events.local" */
struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
+};
static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page, bool reserved) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e6e8240f1718c..7782977970301 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -665,6 +665,17 @@ struct resv_map *resv_map_alloc(void) INIT_LIST_HEAD(&resv_map->regions);
resv_map->adds_in_progress = 0;
+#ifdef CONFIG_CGROUP_HUGETLB
/*
* Initialize these to 0. On shared mappings, 0's here indicate these
* fields don't do cgroup accounting. On private mappings, these will be
* re-initialized to the proper values, to indicate that hugetlb cgroup
* reservations are to be un-charged from here.
*/
resv_map->reservation_counter = NULL;
resv_map->pages_per_hpage = 0;
resv_map->css = NULL;
+#endif
INIT_LIST_HEAD(&resv_map->region_cache); list_add(&rg->link, &resv_map->region_cache);
@@ -3145,7 +3156,20 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
reserve = (end - start) - region_count(resv, start, end);
kref_put(&resv->refs, resv_map_release);
+#ifdef CONFIG_CGROUP_HUGETLB
/*
* Since we check for HPAGE_RESV_OWNER above, this must a private
* mapping, and these values should be none-zero, and should point to
* the hugetlb_cgroup counter to uncharge for this reservation.
*/
WARN_ON(!resv->reservation_counter);
WARN_ON(!resv->pages_per_hpage);
WARN_ON(!resv->css);
I was once again wondering if these were always non-NULL for private mappings. It seems that reservation_counter (h_gc) would be NULL in these cases from these early checks in hugetlb_cgroup_charge_cgroup().
You are right. I'm fixing in v10 the code and comments to account for h_cg potentially being NULL, but I'm having trouble testing. Looking at the code, I'm a bit confused by the checks. Seems to me hugetlb_cgroup_disabled() is the same as #ifdef CONFIG_CGROUP_HUGETLB; I can't find a way to enable the Kconfig but have that return false unless I hack the code. Also seems to me checking huge_page_order is just super definsive; I skimmed the hugepage sizes allowed code and I can't find an arch that allows you to configure hugetlb page size to < 2^HUGETLB_CGROUP_MIN_ORDER pages. So in reality these will never fire, IIUC.
int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr, bool reserved) { int ret = 0; struct page_counter *counter; struct hugetlb_cgroup *h_cg = NULL;
if (hugetlb_cgroup_disabled()) goto done; /* * We don't charge any cgroup if the compound page have less * than 3 pages. */ if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) goto done; ...
It seems like the following hugetlb_cgroup_uncharge_counter() guards against reservation_counter being NULL (for some of the same reasons).
hugetlb_cgroup_uncharge_counter(resv->reservation_counter,
(end - start) * resv->pages_per_hpage,
resv->css);
+#endif
if (reserve) { /*
@@ -3155,6 +3179,8 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma) gbl_reserve = hugepage_subpool_put_pages(spool, reserve); hugetlb_acct_memory(h, -gbl_reserve); }
kref_put(&resv->refs, resv_map_release);
}
static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr) @@ -4501,6 +4527,7 @@ int hugetlb_reserve_pages(struct inode *inode, struct hstate *h = hstate_inode(inode); struct hugepage_subpool *spool = subpool_inode(inode); struct resv_map *resv_map;
struct hugetlb_cgroup *h_cg; long gbl_reserve; /* This should never happen */
@@ -4534,12 +4561,30 @@ int hugetlb_reserve_pages(struct inode *inode, chg = region_chg(resv_map, from, to);
} else {
/* Private mapping. */ resv_map = resv_map_alloc(); if (!resv_map) return -ENOMEM; chg = to - from;
if (hugetlb_cgroup_charge_cgroup(hstate_index(h),
chg * pages_per_huge_page(h),
&h_cg, true)) {
kref_put(&resv_map->refs, resv_map_release);
return -ENOMEM;
}
Shouldn't this code be in the #ifdef CONFIG_CGROUP_HUGETLB block?
Not necessary AFAICT, hugetlb_cgroup_charge_cgroup stub returns 0 (no-op).