Hi Amit,
I fixed some conflicts when merge lsk 4.1 to lsk 4.1-android. Would you like to review the solutions?
After merge solution I also picked your fix patch: 703920 'cgroup: refactor allow_attach handler for 4.4' for allow_attach compiler issue.
Here is the merge conflicts solution:
---
commit c70c579d3c4abeacd9da1383b6b8d2e0817f91e8 Merge: b92a381 7c0ca54 Author: Alex Shi alex.shi@linaro.org Date: Fri Jun 17 15:12:36 2016 +0800
Merge branch 'linux-linaro-lsk-v4.1' into linux-linaro-lsk-v4.1-android
Conflicts: include/linux/cgroup-defs.h kernel/sched/core.c mm/memcontrol.c in kernel/cgroup.c, rewrite to compatible with commit: d09dd4117 cgroup: Add generic cgroup subsystem permission
diff --cc include/linux/cgroup-defs.h index bdb4354,f7dbca9..72e4d67 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@@ -403,21 -427,16 +427,18 @@@ struct cgroup_subsys void (*css_reset)(struct cgroup_subsys_state *css); void (*css_e_css_changed)(struct cgroup_subsys_state *css);
+ int (*allow_attach)(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset); - int (*can_attach)(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset); - void (*cancel_attach)(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset); - void (*attach)(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset); - void (*fork)(struct task_struct *task); - void (*exit)(struct cgroup_subsys_state *css, - struct cgroup_subsys_state *old_css, - struct task_struct *task); + int (*can_attach)(struct cgroup_taskset *tset); + void (*cancel_attach)(struct cgroup_taskset *tset); + void (*attach)(struct cgroup_taskset *tset); + int (*can_fork)(struct task_struct *task, void **priv_p); + void (*cancel_fork)(struct task_struct *task, void *priv); + void (*fork)(struct task_struct *task, void *priv); + void (*exit)(struct task_struct *task); + void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css);
- int disabled; int early_init;
/* diff --cc include/linux/cgroup.h index f5da7ba,56e7af9..c96e20e --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@@ -367,195 -469,87 +469,103 @@@ static inline struct cgroup *task_cgrou return task_css(task, subsys_id)->cgroup; }
- struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, - struct cgroup_subsys_state *parent); - - struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); + /* no synchronization, the result can only be used as a hint */ + static inline bool cgroup_is_populated(struct cgroup *cgrp) + { + return cgrp->populated_cnt; + }
- /** - * css_for_each_child - iterate through children of a css - * @pos: the css * to use as the loop cursor - * @parent: css whose children to walk - * - * Walk @parent's children. Must be called under rcu_read_lock(). - * - * If a subsystem synchronizes ->css_online() and the start of iteration, a - * css which finished ->css_online() is guaranteed to be visible in the - * future iterations and will stay visible until the last reference is put. - * A css which hasn't finished ->css_online() or already finished - * ->css_offline() may show up during traversal. It's each subsystem's - * responsibility to synchronize against on/offlining. - * - * It is allowed to temporarily drop RCU read lock during iteration. The - * caller is responsible for ensuring that @pos remains accessible until - * the start of the next iteration by, for example, bumping the css refcnt. - */ - #define css_for_each_child(pos, parent) \ - for ((pos) = css_next_child(NULL, (parent)); (pos); \ - (pos) = css_next_child((pos), (parent))) + /* returns ino associated with a cgroup */ + static inline ino_t cgroup_ino(struct cgroup *cgrp) + { + return cgrp->kn->ino; + }
- struct cgroup_subsys_state * - css_next_descendant_pre(struct cgroup_subsys_state *pos, - struct cgroup_subsys_state *css); + /* cft/css accessors for cftype->write() operation */ + static inline struct cftype *of_cft(struct kernfs_open_file *of) + { + return of->kn->priv; + }
- struct cgroup_subsys_state * - css_rightmost_descendant(struct cgroup_subsys_state *pos); + struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
- /** - * css_for_each_descendant_pre - pre-order walk of a css's descendants - * @pos: the css * to use as the loop cursor - * @root: css whose descendants to walk - * - * Walk @root's descendants. @root is included in the iteration and the - * first node to be visited. Must be called under rcu_read_lock(). - * - * If a subsystem synchronizes ->css_online() and the start of iteration, a - * css which finished ->css_online() is guaranteed to be visible in the - * future iterations and will stay visible until the last reference is put. - * A css which hasn't finished ->css_online() or already finished - * ->css_offline() may show up during traversal. It's each subsystem's - * responsibility to synchronize against on/offlining. - * - * For example, the following guarantees that a descendant can't escape - * state updates of its ancestors. - * - * my_online(@css) - * { - * Lock @css's parent and @css; - * Inherit state from the parent; - * Unlock both. - * } - * - * my_update_state(@css) - * { - * css_for_each_descendant_pre(@pos, @css) { - * Lock @pos; - * if (@pos == @css) - * Update @css's state; - * else - * Verify @pos is alive and inherit state from its parent; - * Unlock @pos; - * } - * } - * - * As long as the inheriting step, including checking the parent state, is - * enclosed inside @pos locking, double-locking the parent isn't necessary - * while inheriting. The state update to the parent is guaranteed to be - * visible by walking order and, as long as inheriting operations to the - * same @pos are atomic to each other, multiple updates racing each other - * still result in the correct state. It's guaranateed that at least one - * inheritance happens for any css after the latest update to its parent. - * - * If checking parent's state requires locking the parent, each inheriting - * iteration should lock and unlock both @pos->parent and @pos. - * - * Alternatively, a subsystem may choose to use a single global lock to - * synchronize ->css_online() and ->css_offline() against tree-walking - * operations. - * - * It is allowed to temporarily drop RCU read lock during iteration. The - * caller is responsible for ensuring that @pos remains accessible until - * the start of the next iteration by, for example, bumping the css refcnt. - */ - #define css_for_each_descendant_pre(pos, css) \ - for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ - (pos) = css_next_descendant_pre((pos), (css))) + /* cft/css accessors for cftype->seq_*() operations */ + static inline struct cftype *seq_cft(struct seq_file *seq) + { + return of_cft(seq->private); + }
- struct cgroup_subsys_state * - css_next_descendant_post(struct cgroup_subsys_state *pos, - struct cgroup_subsys_state *css); + static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) + { + return of_css(seq->private); + }
- /** - * css_for_each_descendant_post - post-order walk of a css's descendants - * @pos: the css * to use as the loop cursor - * @css: css whose descendants to walk - * - * Similar to css_for_each_descendant_pre() but performs post-order - * traversal instead. @root is included in the iteration and the last - * node to be visited. - * - * If a subsystem synchronizes ->css_online() and the start of iteration, a - * css which finished ->css_online() is guaranteed to be visible in the - * future iterations and will stay visible until the last reference is put. - * A css which hasn't finished ->css_online() or already finished - * ->css_offline() may show up during traversal. It's each subsystem's - * responsibility to synchronize against on/offlining. - * - * Note that the walk visibility guarantee example described in pre-order - * walk doesn't apply the same to post-order walks. + /* + * Name / path handling functions. All are thin wrappers around the kernfs + * counterparts and can be called under any context. */ - #define css_for_each_descendant_post(pos, css) \ - for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ - (pos) = css_next_descendant_post((pos), (css))) - - bool css_has_online_children(struct cgroup_subsys_state *css); - - /* A css_task_iter should be treated as an opaque object */ - struct css_task_iter { - struct cgroup_subsys *ss;
- struct list_head *cset_pos; - struct list_head *cset_head; - - struct list_head *task_pos; - struct list_head *tasks_head; - struct list_head *mg_tasks_head; - }; + static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) + { + return kernfs_name(cgrp->kn, buf, buflen); + }
- void css_task_iter_start(struct cgroup_subsys_state *css, - struct css_task_iter *it); - struct task_struct *css_task_iter_next(struct css_task_iter *it); - void css_task_iter_end(struct css_task_iter *it); + static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf, + size_t buflen) + { + return kernfs_path(cgrp->kn, buf, buflen); + }
- int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); - int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); + static inline void pr_cont_cgroup_name(struct cgroup *cgrp) + { + pr_cont_kernfs_name(cgrp->kn); + }
- struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, - struct cgroup_subsys *ss); - struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, - struct cgroup_subsys *ss); + static inline void pr_cont_cgroup_path(struct cgroup *cgrp) + { + pr_cont_kernfs_path(cgrp->kn); + }
+/* + * Default Android check for whether the current process is allowed to move a + * task across cgroups, either because CAP_SYS_NICE is set or because the uid + * of the calling process is the same as the moved task or because we are + * running as root. + * Returns 0 if this is allowed, or -EACCES otherwise. + */ +int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset); + + #else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
- static inline int cgroup_init_early(void) { return 0; } - static inline int cgroup_init(void) { return 0; } - static inline void cgroup_fork(struct task_struct *p) {} - static inline void cgroup_post_fork(struct task_struct *p) {} - static inline void cgroup_exit(struct task_struct *p) {} - + static inline void css_put(struct cgroup_subsys_state *css) {} + static inline int cgroup_attach_task_all(struct task_struct *from, + struct task_struct *t) { return 0; } static inline int cgroupstats_build(struct cgroupstats *stats, - struct dentry *dentry) - { - return -EINVAL; - } + struct dentry *dentry) { return -EINVAL; }
- static inline void css_put(struct cgroup_subsys_state *css) {} + static inline void cgroup_fork(struct task_struct *p) {} + static inline int cgroup_can_fork(struct task_struct *p, + void *ss_priv[CGROUP_CANFORK_COUNT]) + { return 0; } + static inline void cgroup_cancel_fork(struct task_struct *p, + void *ss_priv[CGROUP_CANFORK_COUNT]) {} + static inline void cgroup_post_fork(struct task_struct *p, + void *ss_priv[CGROUP_CANFORK_COUNT]) {} + static inline void cgroup_exit(struct task_struct *p) {} + static inline void cgroup_free(struct task_struct *p) {}
- /* No cgroups - nothing to do */ - static inline int cgroup_attach_task_all(struct task_struct *from, - struct task_struct *t) - { - return 0; - } + static inline int cgroup_init_early(void) { return 0; } + static inline int cgroup_init(void) { return 0; }
+static inline int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css, + void *tset) +{ + return -EINVAL; +} #endif /* !CONFIG_CGROUPS */
#endif /* _LINUX_CGROUP_H */ diff --cc include/net/sock.h index 4e6159f,e96b2e8..1903243 --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -1050,37 -1050,8 +1050,9 @@@ struct proto void (*destroy_cgroup)(struct mem_cgroup *memcg); struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); #endif + int (*diag_destroy)(struct sock *sk, int err); };
- /* - * Bits in struct cg_proto.flags - */ - enum cg_proto_flags { - /* Currently active and new sockets should be assigned to cgroups */ - MEMCG_SOCK_ACTIVE, - /* It was ever activated; we must disarm static keys on destruction */ - MEMCG_SOCK_ACTIVATED, - }; - - struct cg_proto { - struct page_counter memory_allocated; /* Current allocated memory. */ - struct percpu_counter sockets_allocated; /* Current number of sockets. */ - int memory_pressure; - long sysctl_mem[3]; - unsigned long flags; - /* - * memcg field is used to find which memcg we belong directly - * Each memcg struct can hold more than one cg_proto, so container_of - * won't really cut. - * - * The elegant solution would be having an inverse function to - * proto_cgroup in struct proto, but that means polluting the structure - * for everybody, instead of just for memcg users. - */ - struct mem_cgroup *memcg; - }; - int proto_register(struct proto *prot, int alloc_slab); void proto_unregister(struct proto *prot);
diff --cc include/uapi/linux/magic.h index 23de409,1dd008c..4ae8ef3 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@@ -52,10 -52,9 +52,11 @@@ #define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs" #define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
+#define SDCARDFS_SUPER_MAGIC 0xb550ca10 + #define SMB_SUPER_MAGIC 0x517B #define CGROUP_SUPER_MAGIC 0x27e0eb + #define CGROUP2_SUPER_MAGIC 0x63677270
#define STACK_END_MAGIC 0x57AC6E9D diff --cc kernel/cgroup.c index 0e98a35,862402d..721419e --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@@ -2375,44 -2661,47 +2661,101 @@@ static int cgroup_attach_task(struct cg return ret; }
+int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) +{ + const struct cred *cred = current_cred(), *tcred; + struct task_struct *task; + + if (capable(CAP_SYS_NICE)) + return 0; + + cgroup_taskset_for_each(task, tset) { + tcred = __task_cred(task); + + if (current != task && !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) + return -EACCES; + } + + return 0; +} + +static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) +{ + struct cgroup_subsys_state *css; + int i; + int ret; + + for_each_css(css, i, cgrp) { + if (css->ss->allow_attach) { + ret = css->ss->allow_attach(css, tset); + if (ret) + return ret; + } else { + return -EACCES; + } + } + + return 0; +} + + static int cgroup_procs_write_permission(struct task_struct *task, + struct cgroup *dst_cgrp, + struct kernfs_open_file *of) + { + const struct cred *cred = current_cred(); + const struct cred *tcred = get_task_cred(task); + int ret = 0; + + /* + * even if we're attaching all tasks in the thread group, we only + * need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && - !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) - ret = -EACCES; ++ !uid_eq(cred->euid, tcred->uid) && ++ !uid_eq(cred->euid, tcred->suid)) { ++ /* ++ * if the default permission check fails, give each ++ * cgroup a chance to extend the permission check ++ */ ++ struct cgroup_taskset tset = { ++ .src_csets = LIST_HEAD_INIT(tset.src_csets), ++ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), ++ .csets = &tset.src_csets, ++ }; ++ struct css_set *cset; ++ cset = task_css_set(task); ++ list_add(&cset->mg_node, &tset.src_csets); ++ ret = cgroup_allow_attach(dst_cgrp, &tset); ++ list_del(&tset.src_csets); ++ if (ret) ++ ret = -EACCES; ++ } + + if (!ret && cgroup_on_dfl(dst_cgrp)) { + struct super_block *sb = of->file->f_path.dentry->d_sb; + struct cgroup *cgrp; + struct inode *inode; + + spin_lock_bh(&css_set_lock); + cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); + spin_unlock_bh(&css_set_lock); + + while (!cgroup_is_descendant(dst_cgrp, cgrp)) + cgrp = cgroup_parent(cgrp); + + ret = -ENOMEM; + inode = kernfs_get_inode(sb, cgrp->procs_file.kn); + if (inode) { + ret = inode_permission(inode, MAY_WRITE); + iput(inode); + } + } + + put_cred(tcred); + return ret; + } + /* * Find the task_struct of the task to attach by vpid and pass it along to the * function to attach either it or all tasks in its threadgroup. Will lock diff --cc kernel/sched/core.c index cc328d3,121af05..6ac7147 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@@ -8395,8 -8368,6 +8378,7 @@@ struct cgroup_subsys cpu_cgrp_subsys = .fork = cpu_cgroup_fork, .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach, + .allow_attach = subsys_cgroup_allow_attach, - .exit = cpu_cgroup_exit, .legacy_cftypes = cpu_files, .early_init = 1, }; diff --cc mm/memcontrol.c index 02e5bab,31623c7..20a2a21 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@@ -5164,14 -4896,7 +4896,13 @@@ static int mem_cgroup_can_attach(struc return ret; }
+static int mem_cgroup_allow_attach(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset) +{ + return subsys_cgroup_allow_attach(css, tset); +} + - static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) + static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { if (mc.to) mem_cgroup_clear_mc(); @@@ -5333,17 -5057,10 +5063,15 @@@ static int mem_cgroup_can_attach(struc { return 0; } +static int mem_cgroup_allow_attach(struct cgroup_subsys_state *css, + struct cgroup_taskset *tset) +{ + return 0; +} - static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) + static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { } - static void mem_cgroup_move_task(struct cgroup_subsys_state *css, - struct cgroup_taskset *tset) + static void mem_cgroup_move_task(struct cgroup_taskset *tset) { } #endif
Hi Alex,
On 17 June 2016 at 14:29, Alex Shi alex.shi@linaro.org wrote:
Hi Amit,
I fixed some conflicts when merge lsk 4.1 to lsk 4.1-android. Would you like to review the solutions?
After merge solution I also picked your fix patch: 703920 'cgroup: refactor allow_attach handler for 4.4' for allow_attach compiler issue.
Here is the merge conflicts solution:
Conflict resolutions look good to me.
Regards, Amit Pundir
commit c70c579d3c4abeacd9da1383b6b8d2e0817f91e8 Merge: b92a381 7c0ca54 Author: Alex Shi alex.shi@linaro.org Date: Fri Jun 17 15:12:36 2016 +0800
Merge branch 'linux-linaro-lsk-v4.1' into linux-linaro-lsk-v4.1-android Conflicts: include/linux/cgroup-defs.h kernel/sched/core.c mm/memcontrol.c in kernel/cgroup.c, rewrite to compatible with commit: d09dd4117 cgroup: Add generic cgroup subsystem permission
diff --cc include/linux/cgroup-defs.h index bdb4354,f7dbca9..72e4d67 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@@ -403,21 -427,16 +427,18 @@@ struct cgroup_subsys void (*css_reset)(struct cgroup_subsys_state *css); void (*css_e_css_changed)(struct cgroup_subsys_state *css);
int (*allow_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
int (*can_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
void (*attach)(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
void (*fork)(struct task_struct *task);
void (*exit)(struct cgroup_subsys_state *css,
struct cgroup_subsys_state *old_css,
struct task_struct *task);
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
int (*can_fork)(struct task_struct *task, void **priv_p);
void (*cancel_fork)(struct task_struct *task, void *priv);
void (*fork)(struct task_struct *task, void *priv);
void (*exit)(struct task_struct *task);
void (*free)(struct task_struct *task); void (*bind)(struct cgroup_subsys_state *root_css);
int disabled; int early_init; /*
diff --cc include/linux/cgroup.h index f5da7ba,56e7af9..c96e20e --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@@ -367,195 -469,87 +469,103 @@@ static inline struct cgroup *task_cgrou return task_css(task, subsys_id)->cgroup; }
- struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *parent);
- struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
- /* no synchronization, the result can only be used as a hint */
- static inline bool cgroup_is_populated(struct cgroup *cgrp)
- {
return cgrp->populated_cnt;
- }
- /**
- css_for_each_child - iterate through children of a css
- @pos: the css * to use as the loop cursor
- @parent: css whose children to walk
- Walk @parent's children. Must be called under rcu_read_lock().
- If a subsystem synchronizes ->css_online() and the start of iteration, a
- css which finished ->css_online() is guaranteed to be visible in the
- future iterations and will stay visible until the last reference is put.
- A css which hasn't finished ->css_online() or already finished
- ->css_offline() may show up during traversal. It's each subsystem's
- responsibility to synchronize against on/offlining.
- It is allowed to temporarily drop RCU read lock during iteration. The
- caller is responsible for ensuring that @pos remains accessible until
- the start of the next iteration by, for example, bumping the css refcnt.
- */
- #define css_for_each_child(pos, parent) \
for ((pos) = css_next_child(NULL, (parent)); (pos); \
(pos) = css_next_child((pos), (parent)))
- /* returns ino associated with a cgroup */
- static inline ino_t cgroup_ino(struct cgroup *cgrp)
- {
return cgrp->kn->ino;
- }
- struct cgroup_subsys_state *
- css_next_descendant_pre(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
- /* cft/css accessors for cftype->write() operation */
- static inline struct cftype *of_cft(struct kernfs_open_file *of)
- {
return of->kn->priv;
- }
- struct cgroup_subsys_state *
- css_rightmost_descendant(struct cgroup_subsys_state *pos);
- struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
- /**
- css_for_each_descendant_pre - pre-order walk of a css's descendants
- @pos: the css * to use as the loop cursor
- @root: css whose descendants to walk
- Walk @root's descendants. @root is included in the iteration and the
- first node to be visited. Must be called under rcu_read_lock().
- If a subsystem synchronizes ->css_online() and the start of iteration, a
- css which finished ->css_online() is guaranteed to be visible in the
- future iterations and will stay visible until the last reference is put.
- A css which hasn't finished ->css_online() or already finished
- ->css_offline() may show up during traversal. It's each subsystem's
- responsibility to synchronize against on/offlining.
- For example, the following guarantees that a descendant can't escape
- state updates of its ancestors.
- my_online(@css)
- {
- Lock @css's parent and @css;
- Inherit state from the parent;
- Unlock both.
- }
- my_update_state(@css)
- {
- css_for_each_descendant_pre(@pos, @css) {
Lock @pos;
if (@pos == @css)
Update @css's state;
else
Verify @pos is alive and inherit state from its parent;
Unlock @pos;
- }
- }
- As long as the inheriting step, including checking the parent state, is
- enclosed inside @pos locking, double-locking the parent isn't necessary
- while inheriting. The state update to the parent is guaranteed to be
- visible by walking order and, as long as inheriting operations to the
- same @pos are atomic to each other, multiple updates racing each other
- still result in the correct state. It's guaranateed that at least one
- inheritance happens for any css after the latest update to its parent.
- If checking parent's state requires locking the parent, each inheriting
- iteration should lock and unlock both @pos->parent and @pos.
- Alternatively, a subsystem may choose to use a single global lock to
- synchronize ->css_online() and ->css_offline() against tree-walking
- operations.
- It is allowed to temporarily drop RCU read lock during iteration. The
- caller is responsible for ensuring that @pos remains accessible until
- the start of the next iteration by, for example, bumping the css refcnt.
- */
- #define css_for_each_descendant_pre(pos, css) \
for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
(pos) = css_next_descendant_pre((pos), (css)))
- /* cft/css accessors for cftype->seq_*() operations */
- static inline struct cftype *seq_cft(struct seq_file *seq)
- {
return of_cft(seq->private);
- }
- struct cgroup_subsys_state *
- css_next_descendant_post(struct cgroup_subsys_state *pos,
struct cgroup_subsys_state *css);
- static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
- {
return of_css(seq->private);
- }
- /**
- css_for_each_descendant_post - post-order walk of a css's descendants
- @pos: the css * to use as the loop cursor
- @css: css whose descendants to walk
- Similar to css_for_each_descendant_pre() but performs post-order
- traversal instead. @root is included in the iteration and the last
- node to be visited.
- If a subsystem synchronizes ->css_online() and the start of iteration, a
- css which finished ->css_online() is guaranteed to be visible in the
- future iterations and will stay visible until the last reference is put.
- A css which hasn't finished ->css_online() or already finished
- ->css_offline() may show up during traversal. It's each subsystem's
- responsibility to synchronize against on/offlining.
- Note that the walk visibility guarantee example described in pre-order
- walk doesn't apply the same to post-order walks.
- /*
- Name / path handling functions. All are thin wrappers around the kernfs
*/
- counterparts and can be called under any context.
#define css_for_each_descendant_post(pos, css) \
for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
(pos) = css_next_descendant_post((pos), (css)))
bool css_has_online_children(struct cgroup_subsys_state *css);
/* A css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
struct list_head *cset_pos;
struct list_head *cset_head;
struct list_head *task_pos;
struct list_head *tasks_head;
struct list_head *mg_tasks_head;
};
- static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
- {
return kernfs_name(cgrp->kn, buf, buflen);
- }
- void css_task_iter_start(struct cgroup_subsys_state *css,
struct css_task_iter *it);
- struct task_struct *css_task_iter_next(struct css_task_iter *it);
- void css_task_iter_end(struct css_task_iter *it);
- static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
size_t buflen)
- {
return kernfs_path(cgrp->kn, buf, buflen);
- }
- int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
- int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
- static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
- {
pr_cont_kernfs_name(cgrp->kn);
- }
- struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
struct cgroup_subsys *ss);
- struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss);
- static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
- {
pr_cont_kernfs_path(cgrp->kn);
- }
+/*
- Default Android check for whether the current process is allowed to move a
- task across cgroups, either because CAP_SYS_NICE is set or because the uid
- of the calling process is the same as the moved task or because we are
- running as root.
- Returns 0 if this is allowed, or -EACCES otherwise.
- */
+int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
- static inline int cgroup_init_early(void) { return 0; }
- static inline int cgroup_init(void) { return 0; }
- static inline void cgroup_fork(struct task_struct *p) {}
- static inline void cgroup_post_fork(struct task_struct *p) {}
- static inline void cgroup_exit(struct task_struct *p) {}
- static inline void css_put(struct cgroup_subsys_state *css) {}
- static inline int cgroup_attach_task_all(struct task_struct *from,
static inline int cgroupstats_build(struct cgroupstats *stats,struct task_struct *t) { return 0; }
struct dentry *dentry)
- {
return -EINVAL;
- }
struct dentry *dentry) { return -EINVAL; }
- static inline void css_put(struct cgroup_subsys_state *css) {}
- static inline void cgroup_fork(struct task_struct *p) {}
- static inline int cgroup_can_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT])
- { return 0; }
- static inline void cgroup_cancel_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]) {}
- static inline void cgroup_post_fork(struct task_struct *p,
void *ss_priv[CGROUP_CANFORK_COUNT]) {}
- static inline void cgroup_exit(struct task_struct *p) {}
- static inline void cgroup_free(struct task_struct *p) {}
- /* No cgroups - nothing to do */
- static inline int cgroup_attach_task_all(struct task_struct *from,
struct task_struct *t)
- {
return 0;
- }
- static inline int cgroup_init_early(void) { return 0; }
- static inline int cgroup_init(void) { return 0; }
+static inline int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css,
void *tset)
+{
return -EINVAL;
+} #endif /* !CONFIG_CGROUPS */
#endif /* _LINUX_CGROUP_H */ diff --cc include/net/sock.h index 4e6159f,e96b2e8..1903243 --- a/include/net/sock.h +++ b/include/net/sock.h @@@ -1050,37 -1050,8 +1050,9 @@@ struct proto void (*destroy_cgroup)(struct mem_cgroup *memcg); struct cg_proto *(*proto_cgroup)(struct mem_cgroup *memcg); #endif
int (*diag_destroy)(struct sock *sk, int err);
};
- /*
- Bits in struct cg_proto.flags
- */
- enum cg_proto_flags {
/* Currently active and new sockets should be assigned to cgroups */
MEMCG_SOCK_ACTIVE,
/* It was ever activated; we must disarm static keys on destruction */
MEMCG_SOCK_ACTIVATED,
- };
- struct cg_proto {
struct page_counter memory_allocated; /* Current allocated memory. */
struct percpu_counter sockets_allocated; /* Current number of sockets. */
int memory_pressure;
long sysctl_mem[3];
unsigned long flags;
/*
* memcg field is used to find which memcg we belong directly
* Each memcg struct can hold more than one cg_proto, so container_of
* won't really cut.
*
* The elegant solution would be having an inverse function to
* proto_cgroup in struct proto, but that means polluting the structure
* for everybody, instead of just for memcg users.
*/
struct mem_cgroup *memcg;
- };
- int proto_register(struct proto *prot, int alloc_slab); void proto_unregister(struct proto *prot);
diff --cc include/uapi/linux/magic.h index 23de409,1dd008c..4ae8ef3 --- a/include/uapi/linux/magic.h +++ b/include/uapi/linux/magic.h @@@ -52,10 -52,9 +52,11 @@@ #define REISER2FS_SUPER_MAGIC_STRING "ReIsEr2Fs" #define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
+#define SDCARDFS_SUPER_MAGIC 0xb550ca10
#define SMB_SUPER_MAGIC 0x517B #define CGROUP_SUPER_MAGIC 0x27e0eb
#define CGROUP2_SUPER_MAGIC 0x63677270
#define STACK_END_MAGIC 0x57AC6E9D
diff --cc kernel/cgroup.c index 0e98a35,862402d..721419e --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@@ -2375,44 -2661,47 +2661,101 @@@ static int cgroup_attach_task(struct cg return ret; }
+int subsys_cgroup_allow_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) +{
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
if (capable(CAP_SYS_NICE))
return 0;
cgroup_taskset_for_each(task, tset) {
tcred = __task_cred(task);
if (current != task && !uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid))
return -EACCES;
}
return 0;
+}
+static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) +{
struct cgroup_subsys_state *css;
int i;
int ret;
for_each_css(css, i, cgrp) {
if (css->ss->allow_attach) {
ret = css->ss->allow_attach(css, tset);
if (ret)
return ret;
} else {
return -EACCES;
}
}
return 0;
+}
- static int cgroup_procs_write_permission(struct task_struct *task,
struct cgroup *dst_cgrp,
struct kernfs_open_file *of)
- {
const struct cred *cred = current_cred();
const struct cred *tcred = get_task_cred(task);
int ret = 0;
/*
* even if we're attaching all tasks in the thread group, we only
* need to check permissions on one of them.
*/
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
!uid_eq(cred->euid, tcred->suid))
ret = -EACCES;
++ !uid_eq(cred->euid, tcred->uid) && ++ !uid_eq(cred->euid, tcred->suid)) { ++ /* ++ * if the default permission check fails, give each ++ * cgroup a chance to extend the permission check ++ */ ++ struct cgroup_taskset tset = { ++ .src_csets = LIST_HEAD_INIT(tset.src_csets), ++ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), ++ .csets = &tset.src_csets, ++ }; ++ struct css_set *cset; ++ cset = task_css_set(task); ++ list_add(&cset->mg_node, &tset.src_csets); ++ ret = cgroup_allow_attach(dst_cgrp, &tset); ++ list_del(&tset.src_csets); ++ if (ret) ++ ret = -EACCES; ++ }
if (!ret && cgroup_on_dfl(dst_cgrp)) {
struct super_block *sb = of->file->f_path.dentry->d_sb;
struct cgroup *cgrp;
struct inode *inode;
spin_lock_bh(&css_set_lock);
cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
spin_unlock_bh(&css_set_lock);
while (!cgroup_is_descendant(dst_cgrp, cgrp))
cgrp = cgroup_parent(cgrp);
ret = -ENOMEM;
inode = kernfs_get_inode(sb, cgrp->procs_file.kn);
if (inode) {
ret = inode_permission(inode, MAY_WRITE);
iput(inode);
}
}
put_cred(tcred);
return ret;
- }
- /*
- Find the task_struct of the task to attach by vpid and pass it along to the
- function to attach either it or all tasks in its threadgroup. Will lock
diff --cc kernel/sched/core.c index cc328d3,121af05..6ac7147 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@@ -8395,8 -8368,6 +8378,7 @@@ struct cgroup_subsys cpu_cgrp_subsys = .fork = cpu_cgroup_fork, .can_attach = cpu_cgroup_can_attach, .attach = cpu_cgroup_attach,
.allow_attach = subsys_cgroup_allow_attach,
};.exit = cpu_cgroup_exit, .legacy_cftypes = cpu_files, .early_init = 1,
diff --cc mm/memcontrol.c index 02e5bab,31623c7..20a2a21 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@@ -5164,14 -4896,7 +4896,13 @@@ static int mem_cgroup_can_attach(struc return ret; }
+static int mem_cgroup_allow_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
+{
return subsys_cgroup_allow_attach(css, tset);
+}
- static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
- static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { if (mc.to) mem_cgroup_clear_mc();
@@@ -5333,17 -5057,10 +5063,15 @@@ static int mem_cgroup_can_attach(struc { return 0; } +static int mem_cgroup_allow_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
+{
return 0;
+}
- static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
- static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset) { }
- static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
- static void mem_cgroup_move_task(struct cgroup_taskset *tset) { } #endif
On 06/20/2016 02:01 PM, Amit Pundir wrote:
Hi Alex,
On 17 June 2016 at 14:29, Alex Shi alex.shi@linaro.org wrote:
Hi Amit,
I fixed some conflicts when merge lsk 4.1 to lsk 4.1-android. Would you like to review the solutions?
After merge solution I also picked your fix patch: 703920 'cgroup: refactor allow_attach handler for 4.4' for allow_attach compiler issue.
Here is the merge conflicts solution:
Conflict resolutions look good to me.
pushed. thanks!
linaro-kernel@lists.linaro.org