#include"cgroup-internal.h"#include<linux/ctype.h>#include<linux/kmod.h>#include<linux/sort.h>#include<linux/delay.h>#include<linux/mm.h>#include<linux/sched/signal.h>#include<linux/sched/task.h>#include<linux/magic.h>#include<linux/slab.h>#include<linux/vmalloc.h>#include<linux/delayacct.h>#include<linux/pid_namespace.h>#include<linux/cgroupstats.h>#include<trace/events/cgroup.h>/* * pidlists linger the following amount before being destroyed. The goal * is avoiding frequent destruction in the middle of consecutive read calls * Expiring in the middle is a performance problem not a correctness one. * 1 sec should be enough. */#define CGROUP_PIDLIST_DESTROY_DELAY HZ/* Controllers blocked by the commandline in v1 */staticu16cgroup_no_v1_mask;/* * pidlist destructions need to be flushed on cgroup destruction. Use a * separate workqueue as flush domain. */staticstructworkqueue_struct*cgroup_pidlist_destroy_wq;/* * Protects cgroup_subsys->release_agent_path. Modifying it also requires * cgroup_mutex. Reading requires either cgroup_mutex or this spinlock. */staticDEFINE_SPINLOCK(release_agent_path_lock);boolcgroup1_ssid_disabled(intssid){returncgroup_no_v1_mask&(1<<ssid);}/** * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from' * @from: attach to all cgroups of a given task * @tsk: the task to be attached */intcgroup_attach_task_all(structtask_struct*from,structtask_struct*tsk){structcgroup_root*root;intretval=0;mutex_lock(&cgroup_mutex);percpu_down_write(&cgroup_threadgroup_rwsem);for_each_root(root){structcgroup*from_cgrp;if(root==&cgrp_dfl_root)continue;spin_lock_irq(&css_set_lock);from_cgrp=task_cgroup_from_root(from,root);spin_unlock_irq(&css_set_lock);retval=cgroup_attach_task(from_cgrp,tsk,false);if(retval)break;}percpu_up_write(&cgroup_threadgroup_rwsem);mutex_unlock(&cgroup_mutex);returnretval;}EXPORT_SYMBOL_GPL(cgroup_attach_task_all);/**