On (22/06/08 11:45), Sergey Senozhatsky wrote:
Something like this?
May be even something like below. Move static initializer to cpu up hook.
--- mm/zsmalloc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 9152fbde33b5..6d3789d834e2 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -453,9 +453,7 @@ MODULE_ALIAS("zpool-zsmalloc"); #endif /* CONFIG_ZPOOL */
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ -static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { - .lock = INIT_LOCAL_LOCK(lock), -}; +static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
static __maybe_unused int is_first_page(struct page *page) { @@ -1113,6 +1111,7 @@ static inline int __zs_cpu_up(struct mapping_area *area) area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL); if (!area->vm_buf) return -ENOMEM; + local_lock_init(&area->lock); return 0; }