How to kill a user process in kernel
demo codes:
<code class="language-plaintext hljs">#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/sched/signal.h>
#include <linux/proc_fs.h>
#include <linux/pid.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/kobject.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
MODULE_AUTHOR("zlcao");
MODULE_LICENSE("GPL");
int seqfile_debug_mode = 0;
EXPORT_SYMBOL(seqfile_debug_mode);
module_param(seqfile_debug_mode, int, 0664);
int pid_number = -1;
EXPORT_SYMBOL(pid_number);
module_param(pid_number, int, 0664);
static void kill_processes(int pid_nr);
// 开始输出任务列表
// my_seq_ops_start()的返回值,会传递给my_seq_ops_next()的v参数
static void *my_seq_ops_start(struct seq_file *m, loff_t *pos)
{
loff_t index = *pos;
struct task_struct *task;
printk("%s line %d, index %lld.count %ld, size %ld here.\n", __func__, __LINE__, index, m->count, m->size);
if(seqfile_debug_mode == 0) {
// 如果缓冲区不足, seq_file可能会重新调用start()函数,
// 并且传入的pos是之前已经遍历到的位置,
// 这里需要根据pos重新计算开始的位置
for_each_process(task) {
if (index-- == 0) {
return task;
}
}
} else {
return NULL + (*pos == 0);
}
return NULL;
}
// 继续遍历, 直到my_seq_ops_next()放回NULL或者错误
static void *my_seq_ops_next(struct seq_file *m, void *v, loff_t *pos)
{
struct task_struct *task = NULL;
if(seqfile_debug_mode == 0) {
task = next_task((struct task_struct *)v);
// 这里加不加好像都没有作用
++ *pos;
// 返回NULL, 遍历结束
if(task == &init_task) {
return NULL;
}
} else {
++ *pos;
}
return task;
}
// 遍历完成/出错时seq_file会调用stop()函数
static void my_seq_ops_stop(struct seq_file *m, void *v)
{
}
static int lookup_pci_devices(struct device *dev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
struct pci_dev *pdev = to_pci_dev(dev);
seq_printf(m, "vendor id 0x%x, device id 0x%x, devname %s.\n", pdev->vendor, pdev->device, dev_name(&pdev->dev));
return 0;
}
static int lookup_pci_drivers(struct device_driver *drv, void *data)
{
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "driver name %s.\n", drv->name);
return 0;
}
static int lookup_platform_devices(struct device *dev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
struct platform_device *platdev = to_platform_device(dev);
seq_printf(m, "devpath %s.\n", platdev->name);
return 0;
}
static int lookup_platform_drivers(struct device_driver *drv, void *data)
{
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "driver name %s.\n", drv->name);
return 0;
}
static int list_device_belongs_todriver_pci(struct device *dev, void *p)
{
struct seq_file *m = (struct seq_file *)p;
struct pci_dev *pdev = to_pci_dev(dev);
seq_printf(m, "vendor id 0x%x, device id 0x%x, devname %s.\n", pdev->vendor, pdev->device, dev_name(&pdev->dev));
return 0;
}
static int list_device_belongs_todriver_platform(struct device *dev, void *p)
{
struct seq_file *m = (struct seq_file *)p;
struct platform_device *platdev = to_platform_device(dev);
seq_printf(m, "platdevname %s.\n", platdev->name);
return 0;
}
static int pcie_device_info(struct pci_dev *pdev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "vendor id 0x%04x, device id 0x%04x, devname %s, belongs to bus %16s, parent bus name %6s subordinate 0x%p.\n", \
pdev->vendor, pdev->device, dev_name(&pdev->dev), pdev->bus->name, pdev->bus->parent? pdev->bus->parent->name : "null", pdev->subordinate);
if(pdev->subordinate) {
seq_printf(m, " subordinate have bus name %s.\n", pdev->subordinate->name);
if(pdev->subordinate->self) {
seq_printf(m, " subordinate have dev name %s.\n", dev_name(&pdev->subordinate->self->dev));
if(pdev->subordinate->self != pdev) {
seq_printf(m, " cant happend!\n");
} else {
seq_printf(m, " surely!\n");
}
}
} else {
seq_printf(m, " subordinate not have.\n");
}
if(pdev->bus->self) {
seq_printf(m, " device belongs to child pci bus %s.\n", dev_name(&pdev->bus->self->dev));
} else {
seq_printf(m, " device belongs to top lvl pci bus.\n");
}
seq_printf(m, "\n");
return 0;
}
static ssize_t zilong_attr_show(struct kobject *kobj, struct attribute *attr,char *buf)
{
return 0;
}
static ssize_t zilong_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
{
return 0;
}
static const struct sysfs_ops zilong_sysfs_ops = {
.show = zilong_attr_show,
.store = zilong_attr_store,
};
static struct kobj_type zilong_ktype = {
.release = NULL,
.sysfs_ops = &zilong_sysfs_ops,
.namespace = NULL,
.get_ownership = NULL,
};
// 此函数将数据写入`seq_file`内部的缓冲区
// `seq_file`会在合适的时候把缓冲区的数据拷贝到应用层
// 参数@V是start/next函数的返回值
static int my_seq_ops_show(struct seq_file *m, void *v)
{
struct task_struct *task = NULL;
struct task_struct *tsk = NULL;
struct task_struct *p = NULL;
struct file *file = m->private;
struct pid *session = NULL;
if(seqfile_debug_mode == 0) {
seq_puts(m, " file=");
seq_file_path(m, file, "\n");
seq_putc(m, ' ');
task = (struct task_struct *)v;
session = task_session(task);
tsk = pid_task(session, PIDTYPE_PID);
if(task->flags & PF_KTHREAD) {
seq_printf(m, "Kernel thread: PID=%u, task: %s, index=%lld, read_pos=%lld, %s.\n", task->tgid, task->comm, m->index, m->read_pos, tsk? "has session" : "no session");
} else {
seq_printf(m, "User thread: PID=%u, task: %s, index=%lld, read_pos=%lld %s.\n", task->tgid, task->comm, m->index, m->read_pos, tsk? "has session" : "no session");
}
} else if(seqfile_debug_mode == 1) {
struct task_struct *g, *p;
static int oldcount = 0;
static int entercount = 0;
char *str;
printk("%s line %d here enter %d times.\n", __func__, __LINE__, ++ entercount);
seq_printf(m, "%s line %d here enter %d times.\n", __func__, __LINE__, ++ entercount);
rcu_read_lock();
for_each_process_thread(g, p) {
struct task_struct *session = pid_task(task_session(g), PIDTYPE_PID);
struct task_struct *thread = pid_task(task_session(p), PIDTYPE_PID);
struct task_struct *ggroup = pid_task(task_pgrp(g), PIDTYPE_PID);
struct task_struct *pgroup = pid_task(task_pgrp(p), PIDTYPE_PID);
struct pid * pid = task_session(g);
if(list_empty(&p->tasks)) {
str = "empty";
} else {
str = "not empty";
}
seq_printf(m, "process %s(pid %d tgid %d,cpu%d) thread %s(pid %d tgid %d,cpu%d),threadnum %d, %d. tasks->prev = %p, tasks->next = %p, p->tasks=%p, %s, process parent %s(pid %d tgid %d), thread parent%s(pid %d, tgid %d, files %p\n)",
g->comm, task_pid_nr(g), task_tgid_nr(g), task_cpu(g), \
p->comm, task_pid_nr(p), task_tgid_nr(p), task_cpu(p), \
get_nr_threads(g), get_nr_threads(p), p->tasks.prev, p->tasks.next, &p->tasks, str, g->real_parent->comm, \
task_pid_nr(g->real_parent),task_tgid_nr(g->real_parent), p->real_parent->comm, task_pid_nr(p->real_parent), task_tgid_nr(p->real_parent), p->files);
if(ggroup) {
seq_printf(m, "ggroup(pid %d tgid %d).", task_pid_nr(ggroup),task_tgid_nr(ggroup));
}
if(pgroup) {
seq_printf(m, "pgroup(pid %d tgid %d).", task_pid_nr(pgroup),task_tgid_nr(pgroup));
}
seq_printf(m, "current smp processor id %d.", smp_processor_id());
if(thread) {
seq_printf(m, "thread session %s(%d).", thread->comm, task_pid_nr(thread));
}
if(session) {
seq_printf(m, "process session %s(%d).", session->comm, task_pid_nr(session));
}
if(oldcount == 0 || oldcount != m->size) {
printk("%s line %d, m->count %ld, m->size %ld.", __func__, __LINE__, m->count, m->size);
oldcount = m->size;
}
if(pid){
seq_printf(m, "pid task %p,pgid task %p, psid_task %p", pid_task(pid, PIDTYPE_PID), pid_task(pid, PIDTYPE_PGID), pid_task(pid, PIDTYPE_SID));
seq_printf(m, "pid task %s,pgid task %s, psid_task %s", pid_task(pid, PIDTYPE_PID)->comm, pid_task(pid, PIDTYPE_PGID)->comm, pid_task(pid, PIDTYPE_SID)->comm);
}
seq_printf(m, "\n");
}
rcu_read_unlock();
} else if(seqfile_debug_mode == 2) {
for_each_process(task) {
struct pid *pgrp = task_pgrp(task);
seq_printf(m, "Group Header %s(%d,cpu%d):\n", task->comm, task_pid_nr(task), task_cpu(task));
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d.\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p));
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
}
} else if (seqfile_debug_mode == 3) {
for_each_process(task) {
struct pid *session = task_session(task);
struct task_struct *tsk = pid_task(session, PIDTYPE_PID);
if(tsk) {
seq_printf(m, "session task %s(%d,cpu%d):", tsk->comm, task_pid_nr(tsk), task_cpu(tsk));
} else {
seq_printf(m, "process %s(%d,cpu%d) has no session task.", task->comm, task_pid_nr(task), task_cpu(task));
}
seq_printf(m, "session header %s(%d,cpu%d):\n", task->comm, task_pid_nr(task), task_cpu(task));
do_each_pid_task(session, PIDTYPE_SID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d, spidtask %s(%d,%d).\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p), pid_task(session, PIDTYPE_SID)->comm, pid_task(session, PIDTYPE_SID)->tgid, pid_task(session, PIDTYPE_SID)->pid);
if(pid_task(session, PIDTYPE_PID)) {
seq_printf(m, "pidtask %s(%d,%d).\n", pid_task(session, PIDTYPE_PID)->comm, pid_task(session, PIDTYPE_PID)->tgid, pid_task(session, PIDTYPE_PID)->pid);
}
} while_each_pid_task(pgrp, PIDTYPE_SID, p);
}
} else if(seqfile_debug_mode == 4) {
struct task_struct *thread, *child;
for_each_process(task) {
seq_printf(m, "process %s(%d,cpu%d):\n", task->comm, task_pid_nr(task), task_cpu(task));
for_each_thread(task, thread) {
list_for_each_entry(child, &thread->children, sibling) {
seq_printf(m, " thread %s(%d,cpu%d) child %s(%d,cpu%d),threadnum %d, %d.\n",
thread->comm, task_pid_nr(thread), task_cpu(thread), \
child->comm, task_pid_nr(child), task_cpu(child), \
get_nr_threads(thread), get_nr_threads(child));
}
}
}
} else if(seqfile_debug_mode == 5) {
struct task_struct *g, *t;
do_each_thread (g, t) {
seq_printf(m, "Process %s(%d cpu%d), thread %s(%d cpu%d), threadnum %d.\n", g->comm, task_pid_nr(g), task_cpu(g), t->comm, task_pid_nr(t), task_cpu(t), get_nr_threads(g));
} while_each_thread (g, t);
} else if(seqfile_debug_mode == 6) {
for_each_process(task) {
struct pid *pid = task_pid(task);
seq_printf(m, "Process %s(%d,cpu%d) pid %d, tgid %d:\n", task->comm, task_pid_nr(task), task_cpu(task), task_pid_vnr(task), task_tgid_vnr(task));
do_each_pid_task(pid, PIDTYPE_TGID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d. pid %d, tgid %d\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p), task_pid_vnr(p), task_tgid_vnr(p));
} while_each_pid_task(pid, PIDTYPE_TGID, p);
}
} else if(seqfile_debug_mode == 7) {
for_each_process(task) {
struct pid *pid = task_pid(task);
seq_printf(m, "Process %s(%d,cpu%d) pid %d, tgid %d:\n", task->comm, task_pid_nr(task), task_cpu(task), task_pid_vnr(task), task_tgid_vnr(task));
do_each_pid_task(pid, PIDTYPE_PID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d. pid %d, tgid %d\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p), task_pid_vnr(p), task_tgid_vnr(p));
} while_each_pid_task(pid, PIDTYPE_PID, p);
}
} else if(seqfile_debug_mode == 8) {
bus_for_each_dev(&pci_bus_type, NULL, (void*)m, lookup_pci_devices);
bus_for_each_drv(&pci_bus_type, NULL, (void*)m, lookup_pci_drivers);
} else if(seqfile_debug_mode == 9) {
struct device_driver *drv;
drv = driver_find("pcieport", &pci_bus_type);
driver_for_each_device(drv, NULL, (void*)m, list_device_belongs_todriver_pci);
} else if(seqfile_debug_mode == 10) {
for_each_process(task) {
seq_printf(m, "Process %s(%d),state 0x%08lx, exit_state 0x%08x, refcount %d, usage %d rcucount %d.", \
task->comm, task->tgid, task->state, task->exit_state, refcount_read(&task->stack_refcount), refcount_read(&task->usage), refcount_read(&task->rcu_users));
if(task->parent) {
seq_printf(m, "parent name %s pid %d.\n", task->parent->comm, task->parent->tgid);
} else {
seq_printf(m, "no parent.\n");
}
}
} else if(seqfile_debug_mode == 11) {
struct pci_bus *bus;
list_for_each_entry(bus, &pci_root_buses, node) {
seq_printf(m, "pcibus name %s.\n", bus->name);
pci_walk_bus(bus, pcie_device_info, (void*)m);
}
} else if(seqfile_debug_mode == 12) {
struct device_driver *drv;
// EXPORT_SYMBOL(usb_bus_type);
// bus_for_each_dev(&usb_bus_type, NULL, (void*)m, lookup_usb_devices);
// bus_for_each_drv(&usb_bus_type, NULL, (void*)m, lookup_usb_drivers);
bus_for_each_dev(&platform_bus_type, NULL, (void*)m, lookup_platform_devices);
bus_for_each_drv(&platform_bus_type, NULL, (void*)m, lookup_platform_drivers);
drv = driver_find("demo_platform", &platform_bus_type);
driver_for_each_device(drv, NULL, (void*)m, list_device_belongs_todriver_platform);
} else if(seqfile_debug_mode == 13) {
static struct kset *class_zilong;
static struct kobject kobj;
int ret;
class_zilong = kset_create_and_add("zilong_class", NULL, NULL);
if (!class_zilong) {
printk("%s line %d, fatal error, create class failure.\n", __func__, __LINE__);
return -ENOMEM;
}
ret = kobject_init_and_add(&kobj, &zilong_ktype, &class_zilong->kobj, "%s-%d", "zilong", 1);
if(ret < 0) {
printk("%s line %d, fatal error, create class failure.\n", __func__, __LINE__);
return -ENOMEM;
}
} else if(seqfile_debug_mode == 14) {
// cad pid is process 1 pid.
int ret = kill_cad_pid(SIGINT, 1);
printk("%s lne %d ret %d.\n", __func__, __LINE__, ret);
} else if(seqfile_debug_mode == 15) {
kill_processes(pid_number);
} else {
printk("%s line %d,cant be here, seqfile_debug_mode = %d.\n", __func__, __LINE__, seqfile_debug_mode);
}
return 0;
}
static struct task_struct *find_lock_task_mm(struct task_struct *p)
{
struct task_struct *t;
rcu_read_lock();
for_each_thread(p, t) {
task_lock(t);
if (likely(t->mm))
goto found;
task_unlock(t);
}
t = NULL;
found:
rcu_read_unlock();
return t;
}
static bool process_shares_task_mm(struct task_struct *p, struct mm_struct *mm)
{
struct task_struct *t;
for_each_thread(p, t) {
struct mm_struct *t_mm = READ_ONCE(t->mm);
if (t_mm)
return t_mm == mm;
}
return false;
}
static void kill_processes(int pid_nr)
{
struct task_struct *victim;
struct task_struct *p;
struct mm_struct *mm;
victim = get_pid_task(find_vpid(pid_nr), PIDTYPE_PID);
if(victim == NULL) {
printk("%s line %d,return.\n", __func__, __LINE__);
return;
}
printk("%s line %d, task has live %d threads total.\n", __func__, __LINE__, atomic_read(&victim->signal->live));
p = find_lock_task_mm(victim);
if (!p) {
put_task_struct(victim);
return;
} else {
get_task_struct(p);
put_task_struct(victim);
victim = p;
}
mm = victim->mm;
mmgrab(mm);
kill_pid(find_vpid(pid_nr), SIGKILL, 1);
task_unlock(victim);
rcu_read_lock();
for_each_process(p) {
if (!process_shares_task_mm(p, mm))
continue;
if (same_thread_group(p, victim))
continue;
if (unlikely(p->flags & PF_KTHREAD))
continue;
kill_pid(get_pid(task_pid(p)), SIGKILL, 1);
}
rcu_read_unlock();
mmdrop(mm);
while(atomic_read(&victim->signal->live)) {
printk("%s line %d, live %d.\n", __func__, __LINE__, atomic_read(&victim->signal->live));
}
put_task_struct(victim);
}
static const struct seq_operations my_seq_ops = {
.start = my_seq_ops_start,
.next = my_seq_ops_next,
.stop = my_seq_ops_stop,
.show = my_seq_ops_show,
};
static int proc_seq_open(struct inode *inode, struct file *file)
{
int ret;
struct seq_file *m;
ret = seq_open(file, &my_seq_ops);
if(!ret) {
m = file->private_data;
m->private = file;
}
return ret;
}
static ssize_t proc_seq_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
{
char debug_string[16];
int debug_no;
memset(debug_string, 0x00, sizeof(debug_string));
if (count >= sizeof(debug_string)) {
printk("%s line %d, fata error, write count exceed max buffer size.\n", __func__, __LINE__);
return -EINVAL;
}
if (copy_from_user(debug_string, buffer, count)) {
printk("%s line %d, fata error, copy from user failure.\n", __func__, __LINE__);
return -EFAULT;
}
if (sscanf(debug_string, "%d", &debug_no) <= 0) {
printk("%s line %d, fata error, read debugno failure.\n", __func__, __LINE__);
return -EFAULT;
}
seqfile_debug_mode = debug_no;
//printk("%s line %d, debug_no %d.\n", __func__, __LINE__, debug_no);
return count;
}
static ssize_t proc_seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
ssize_t ret;
printk("%s line %d enter, fuck size %lld size %ld.\n", __func__, __LINE__, *ppos, size);
ret = seq_read(file, buf, size, ppos);
printk("%s line %d exit, fuck size %lld size %ld,ret = %ld.\n", __func__, __LINE__, *ppos, size, ret);
return ret;
}
static struct file_operations seq_proc_ops = {
.owner = THIS_MODULE,
.open = proc_seq_open,
.release = seq_release,
.read = proc_seq_read,
.write = proc_seq_write,
.llseek = seq_lseek,
.unlocked_ioctl = NULL,
};
static struct proc_dir_entry * entry;
static int proc_hook_init(void)
{
printk("%s line %d, init. seqfile_debug_mode = %d.\n", __func__, __LINE__, seqfile_debug_mode);
entry = proc_create("dumptask", 0644, NULL, &seq_proc_ops);
//entry = proc_create_seq("dumptask", 0644, NULL, &my_seq_ops);
return 0;
}
static void proc_hook_exit(void)
{
proc_remove(entry);
printk("%s line %d, exit.\n", __func__, __LINE__);
return;
}
module_init(proc_hook_init);
module_exit(proc_hook_exit);</code>
get pid of a process

insmod ko and pass pid

run and verfiy


grouped thread status of a process is live.

dmesg

from above picture, we know that the process with multi threads will be kill one by one.
kill 100 threads`s process.
<code class="language-plaintext hljs">#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
static pthread_mutex_t mutex;
static pthread_mutexattr_t attr;
void* func(void* arg)
{
while(1) {
pthread_mutex_lock(&mutex);
printf("%s line %d, get pid %d\n", __func__, __LINE__, getpid());
pthread_mutex_unlock(&mutex);
sleep(1);
}
return NULL;
}
#define thread_count 100
int main(void)
{
int ret;
pthread_t p[thread_count] = {0};
int i = 0;
if(( ret = pthread_mutexattr_init(&attr)) != 0)
{
fprintf(stderr, "create mutex attribute error. msg:%s", strerror(ret));
exit(1);
}
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&mutex, &attr);
char str1[8], str2[8];
sprintf(str1, "abcdefg");
sprintf(str2, "1234567");
for( i = 0; i < thread_count / 2; i ++) {
if((ret = pthread_create(&p[2*i + 0], NULL, func, str1)) != 0) {
fprintf(stderr, "create thread error. msg:%s", strerror(ret));
exit(1);
}
if((ret = pthread_create(&p[2*i + 1], NULL, func, str2)) != 0) {
fprintf(stderr, "create thread error. msg:%s", strerror(ret));
exit(1);
}
}
for(i = 0; i < thread_count; i ++) {
pthread_join(p[i], NULL);
}
return 0;
}</code>

kill result

there are 101 threads total adding lead_threads.


kill order
the killing order is random.





it seems the thread group is always the first thread to be killed, i dont know is this a definition rule

may be need further inverstigation.
confirm:
above gussion is not correct,after the test below, there happens that the first killed threads are not the thread group leader.
we try this use kill -9 #PID, which pid is just a threads but not the process group leader. we can see the first dead threads are not 5894


<code class="language-plaintext hljs">[ 1012.047685] proc_seq_read line 547 exit, fuck size 0 size 131072,ret = 0.
[ 1955.330332] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5981.
[ 1955.330333] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5903.
[ 1955.330334] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5939.
[ 1955.330338] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5894.
[ 1955.330374] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5982.
[ 1955.330375] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5940.
[ 1955.330380] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5983.
[ 1955.330382] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5909.
[ 1955.330386] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5941.
[ 1955.330391] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5984.
[ 1955.330391] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5911.
[ 1955.330394] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5942.
[ 1955.330398] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5985.
[ 1955.330399] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5908.
[ 1955.330402] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5943.
[ 1955.330405] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5986.
[ 1955.330407] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5910.
[ 1955.330409] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5944.
[ 1955.330413] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5955.
[ 1955.330414] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5948.
[ 1955.330416] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5945.
[ 1955.330420] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5954.
[ 1955.330421] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5947.
[ 1955.330423] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5946.
[ 1955.330427] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5952.
[ 1955.330429] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5918.
[ 1955.330429] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5994.
[ 1955.330431] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5921.
[ 1955.330435] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5958.
[ 1955.330437] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5898.
[ 1955.330439] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5922.
[ 1955.330442] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5953.
[ 1955.330443] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5979.
[ 1955.330444] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5913.
[ 1955.330446] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5923.
[ 1955.330448] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5957.
[ 1955.330452] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5897.
[ 1955.330454] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5899.
[ 1955.330454] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5925.
[ 1955.330456] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5959.
[ 1955.330459] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5951.
[ 1955.330461] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5924.
[ 1955.330464] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5960.
[ 1955.330464] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5978.
[ 1955.330466] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5992.
[ 1955.330468] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5895.
[ 1955.330471] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5961.
[ 1955.330473] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5916.
[ 1955.330487] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5987.
[ 1955.330487] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5962.
[ 1955.330489] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5926.
[ 1955.330490] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5975.
[ 1955.330495] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5966.
[ 1955.330496] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5912.
[ 1955.330499] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5927.
[ 1955.330500] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5919.
[ 1955.330502] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5967.
[ 1955.330503] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5988.
[ 1955.330507] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5928.
[ 1955.330509] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5974.
[ 1955.330509] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5968.
[ 1955.330511] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5991.
[ 1955.330515] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5929.
[ 1955.330518] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5969.
[ 1955.330519] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5989.
[ 1955.330522] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5930.
[ 1955.330525] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5970.
[ 1955.330525] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5914.
[ 1955.330526] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5949.
[ 1955.330530] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5931.
[ 1955.330534] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5915.
[ 1955.330534] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5993.
[ 1955.330538] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5932.
[ 1955.330539] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5971.
[ 1955.330542] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5990.
[ 1955.330544] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5905.
[ 1955.330545] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5933.
[ 1955.330546] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5963.
[ 1955.330549] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5950.
[ 1955.330553] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5934.
[ 1955.330554] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5907.
[ 1955.330554] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5956.
[ 1955.330557] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5902.
[ 1955.330562] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5980.
[ 1955.330562] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5935.
[ 1955.330563] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5965.
[ 1955.330564] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5901.
[ 1955.330570] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5936.
[ 1955.330577] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5937.
[ 1955.330578] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5973.
[ 1955.330579] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5900.
[ 1955.330581] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5977.
[ 1955.330584] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5938.
[ 1955.330587] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5906.
[ 1955.330589] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5976.
[ 1955.330589] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5920.
[ 1955.330594] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5972.
[ 1955.330597] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5964.
[ 1955.330600] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5904.
[ 1955.330601] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5917.
[ 1955.330610] do_exit line 775, tsk->tgid = 5894, tsk->pid = 5896.
[ 1985.229336] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6007.
[ 1985.229337] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6060.
[ 1985.229338] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6033.
[ 1985.229339] do_exit line 775, tsk->tgid = 5995, tsk->pid = 5995.
[ 1985.229376] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6003.
[ 1985.229377] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6031.
[ 1985.229377] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6049.
[ 1985.229382] do_exit line 775, tsk->tgid = 5995, tsk->pid = 5997.
[ 1985.229383] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6044.
[ 1985.229389] do_exit line 775, tsk->tgid = 5995, tsk->pid = 5996.
[ 1985.229415] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6008.
[ 1985.229421] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6004.
[ 1985.229437] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6001.
[ 1985.229442] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6087.
[ 1985.229444] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6025.
[ 1985.229450] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6002.
[ 1985.229452] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6050.
[ 1985.229458] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6038.
[ 1985.229466] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6018.
[ 1985.229471] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6006.
[ 1985.229474] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6017.
[ 1985.229479] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6063.
[ 1985.229481] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6035.
[ 1985.229486] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6037.
[ 1985.229493] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6013.
[ 1985.229498] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6021.
[ 1985.229502] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6053.
[ 1985.229507] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6009.
[ 1985.229508] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6030.
[ 1985.229514] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6055.
[ 1985.229525] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6088.
[ 1985.229530] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6065.
[ 1985.229531] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6095.
[ 1985.229536] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6062.
[ 1985.229537] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6056.
[ 1985.229542] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6045.
[ 1985.229550] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6061.
[ 1985.229554] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6085.
[ 1985.229558] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6084.
[ 1985.229566] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6057.
[ 1985.229570] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6040.
[ 1985.229573] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6024.
[ 1985.229578] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6010.
[ 1985.229580] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6000.
[ 1985.229584] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6091.
[ 1985.229593] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6051.
[ 1985.229598] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6047.
[ 1985.229600] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6015.
[ 1985.229605] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6023.
[ 1985.229607] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6093.
[ 1985.229611] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6020.
[ 1985.229621] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6046.
[ 1985.229625] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6034.
[ 1985.229628] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6064.
[ 1985.229633] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6014.
[ 1985.229633] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6022.
[ 1985.229638] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6012.
[ 1985.229649] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6059.
[ 1985.229654] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6036.
[ 1985.229658] do_exit line 775, tsk->tgid = 5995, tsk->pid = 5998.
[ 1985.229659] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6011.
[ 1985.229674] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6005.
[ 1985.229676] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6026.
[ 1985.229687] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6039.
[ 1985.229691] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6054.
[ 1985.229697] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6019.
[ 1985.229701] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6016.
[ 1985.229701] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6086.
[ 1985.229706] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6027.
[ 1985.229719] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6048.
[ 1985.229724] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6052.
[ 1985.229727] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6032.
[ 1985.229729] do_exit line 775, tsk->tgid = 5995, tsk->pid = 5999.
[ 1985.229732] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6029.
[ 1985.229743] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6028.
[ 1985.229755] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6058.
[ 1985.229777] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6042.
[ 1985.229782] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6068.
[ 1985.229786] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6074.
[ 1985.229789] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6080.
[ 1985.229791] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6090.
[ 1985.229794] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6081.
[ 1985.229803] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6094.
[ 1985.229807] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6082.
[ 1985.229814] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6076.
[ 1985.229817] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6070.
[ 1985.229818] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6075.
[ 1985.229822] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6083.
[ 1985.229830] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6043.
[ 1985.229834] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6066.
[ 1985.229840] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6069.
[ 1985.229844] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6072.
[ 1985.229845] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6077.
[ 1985.229855] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6067.
[ 1985.229863] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6089.
[ 1985.229870] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6041.
[ 1985.229872] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6079.
[ 1985.229886] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6092.
[ 1985.229894] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6073.
[ 1985.229898] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6071.
[ 1985.230219] do_exit line 775, tsk->tgid = 5995, tsk->pid = 6078.</code>
如何杀掉所有线程的?
call path is:
do_group_exit->zap_other_threads->while_each_thread(current, t) { sigaddset(&t->pending.signal, SIGKILL);}->signal_wake_up.
向每个线程添加一个SIGKILL信号,并唤醒。

which threads will close the opened device if mutli-thread process killed by signal?
demo char device:
<code class="language-plaintext hljs">#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/device.h>
MODULE_AUTHOR("czl");
MODULE_LICENSE("GPL");
#define DEVICE_NAME "char_demo"
#define DRIVER_NAME "char_demo"
#define DEVICE_MAJOR 0
#define DRIVER_MINOR 0
#define DEVICE_NUM 2
#define DEVICE_SIZE 3000
int major_num = DEVICE_MAJOR;
int minor_num = DRIVER_MINOR;
module_param(major_num, int , 0664);
module_param(minor_num, int, 0664);
static struct class *myclass;
struct reg_dev
{
char *data; // 设备数据
unsigned long size; //设备占用空间大小
struct cdev cdev; //设备结构体
};
struct reg_dev *my_devices;
static int char_demo_open(struct inode *inode, struct file *file){
printk(KERN_EMERG "chardevnode_open is success!imajor %d, iminor %d.\n", imajor(inode), iminor(inode));
return 0;
}
static int char_demo_close(struct inode *inode, struct file *file){
printk(KERN_EMERG "chardevnode_release is success, pid %d, tgid %d.!\n", current->pid, current->tgid);
dump_stack();
return 0;
}
static long char_demo_ioctl(struct file *file, unsigned int cmd, unsigned long arg){
printk(KERN_EMERG "chardevnode_ioctl is success! cmd is %d,arg is %ld \n",cmd,arg);
return 0;
}
ssize_t char_demo_read(struct file *file, char __user *buf, size_t count, loff_t *f_ops){
return 0;
}
ssize_t char_demo_write(struct file *file, const char __user *buf, size_t count, loff_t *f_ops){
return 0;
}
loff_t char_demo_llseek(struct file *file, loff_t offset, int ence){
return 0;
}
struct file_operations char_demo_ops = {
.owner = THIS_MODULE,
.open = char_demo_open,
.release = char_demo_close,
.unlocked_ioctl = char_demo_ioctl,
.read = char_demo_read,
.write = char_demo_write,
.llseek = char_demo_llseek,
};
static int char_demo_init(void)
{
int ret = 0, i; //返回结果和for变量
dev_t num_dev; // 设备号
printk(KERN_INFO"char_demo_exit\n");
printk(KERN_INFO"major is %d \n",major_num);
printk(KERN_INFO"minor is %d \n",minor_num);
/* 如果主设备号不为零,静态注册设备,否则动态注册 */
if(major_num){
num_dev = MKDEV(major_num, minor_num);
ret = register_chrdev_region(num_dev,DEVICE_NUM, DEVICE_NAME);
if(ret != 0) {
printk(KERN_EMERG"register chrdev region failure.\n");
return -1;
}
} else {
ret = alloc_chrdev_region(&num_dev, minor_num, DEVICE_NUM, DEVICE_NAME);
major_num = MAJOR(num_dev); //得到主设备号
minor_num = MINOR(num_dev); //得到次设备号
printk(KERN_INFO"alloc_char_demo , the major is %d ,the minor is %d.\n",major_num,minor_num);
}
if(ret < 0){
printk(KERN_EMERG"char_demo failed\n");
return -1;
}
/* 创建一个类 */
myclass = class_create(THIS_MODULE, DEVICE_NAME);
/* 申请设备空间 */
my_devices = kmalloc(sizeof(struct reg_dev) * DEVICE_NUM, GFP_KERNEL);
if(!my_devices){
ret = -ENOMEM;
goto fail;
}
memset(my_devices, 0, DEVICE_NUM * sizeof(struct reg_dev));
/* 对设备依次初始化 */
for( i = 0 ; i < DEVICE_NUM ; i++){
my_devices[i].data = kmalloc(DEVICE_SIZE, GFP_KERNEL);
memset(my_devices[i].data, 0, sizeof(char) * DEVICE_SIZE);
/* 设备注册到系统 */
cdev_init(&my_devices[i].cdev, &char_demo_ops);
my_devices[i].cdev.owner = THIS_MODULE;
my_devices[i].cdev.ops = &char_demo_ops;
ret = cdev_add(&my_devices[i].cdev, MKDEV(major_num, minor_num +i), 1);
if(ret) {
printk(KERN_EMERG"char_demo %d is fail ! %d\n",i,ret);
} else {
printk(KERN_EMERG"char_demo add %d is success !\n",minor_num + i);
}
/* 创建设备节点 */
device_create(myclass, NULL, MKDEV(major_num, minor_num +i), NULL, DEVICE_NAME"%d",i);
}
printk(KERN_EMERG"char_demo init ok !\n");
return 0;
fail:
/* 注销设备号 */
unregister_chrdev_region(MKDEV(major_num,minor_num),DEVICE_NUM);
printk(KERN_EMERG "kmalloc is fail!\n");
return ret;
}
/* 退出模块 */
static void char_demo_exit(void)
{
int i;
/* 卸载字符设备 */
for(i = 0 ; i < DEVICE_NUM ; i++) {
cdev_del(&(my_devices[i].cdev));
device_destroy(myclass, MKDEV(major_num, minor_num +i));
}
/* 卸载设备类 */
class_destroy(myclass);
/* 释放申请内存 */
kfree(my_devices);
/* 释放设备号 */
unregister_chrdev_region(MKDEV(major_num, minor_num), DEVICE_NUM);
}
module_init(char_demo_init);
module_exit(char_demo_exit);</code>
101 threads process demo
<code class="language-plaintext hljs">#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <sys/ioctl.h>
static pthread_mutex_t mutex;
static pthread_mutexattr_t attr;
static void open_device(void)
{
int fd;
char *char_demo_node0 = "/dev/char_demo0";
fd = open(char_demo_node0,O_RDWR|O_NONBLOCK);
if(fd < 0)
{
printf("%s open fail!!!\n",char_demo_node0);
exit(-1);
}
else
{
printf("%s open success !!!\n",char_demo_node0);
}
}
void* func(void* arg)
{
while(1) {
pthread_mutex_lock(&mutex);
printf("%s line %d, get pid %d\n", __func__, __LINE__, getpid());
pthread_mutex_unlock(&mutex);
sleep(1);
}
return NULL;
}
#define thread_count 100
int main(void)
{
int ret;
pthread_t p[thread_count] = {0};
int i = 0;
open_device();
if(( ret = pthread_mutexattr_init(&attr)) != 0)
{
fprintf(stderr, "create mutex attribute error. msg:%s", strerror(ret));
exit(1);
}
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&mutex, &attr);
char str1[8], str2[8];
sprintf(str1, "abcdefg");
sprintf(str2, "1234567");
for( i = 0; i < thread_count / 2; i ++) {
if((ret = pthread_create(&p[2*i + 0], NULL, func, str1)) != 0) {
fprintf(stderr, "create thread error. msg:%s", strerror(ret));
exit(1);
}
if((ret = pthread_create(&p[2*i + 1], NULL, func, str2)) != 0) {
fprintf(stderr, "create thread error. msg:%s", strerror(ret));
exit(1);
}
}
for(i = 0; i < thread_count; i ++) {
pthread_join(p[i], NULL);
}
return 0;
}</code>
kill process module:
<code class="language-plaintext hljs">#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stat.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/seq_file.h>
#include <linux/sched/signal.h>
#include <linux/proc_fs.h>
#include <linux/pid.h>
#include <linux/pci.h>
#include <linux/usb.h>
#include <linux/kobject.h>
#include <linux/sched/mm.h>
#include <linux/platform_device.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <linux/sched/debug.h>
MODULE_AUTHOR("zlcao");
MODULE_LICENSE("GPL");
int seqfile_debug_mode = 0;
EXPORT_SYMBOL(seqfile_debug_mode);
module_param(seqfile_debug_mode, int, 0664);
int pid_number = -1;
EXPORT_SYMBOL(pid_number);
module_param(pid_number, int, 0664);
static int kobj_created = 0;
static struct kset *class_zilong;
static struct kobject kobj;
static void kill_processes(int pid_nr);
// 开始输出任务列表
// my_seq_ops_start()的返回值,会传递给my_seq_ops_next()的v参数
static void *my_seq_ops_start(struct seq_file *m, loff_t *pos)
{
loff_t index = *pos;
struct task_struct *task;
printk("%s line %d, index %lld.count %ld, size %ld here.\n", __func__, __LINE__, index, m->count, m->size);
if(seqfile_debug_mode == 0) {
// 如果缓冲区不足, seq_file可能会重新调用start()函数,
// 并且传入的pos是之前已经遍历到的位置,
// 这里需要根据pos重新计算开始的位置
for_each_process(task) {
if (index-- == 0) {
return task;
}
}
} else {
return NULL + (*pos == 0);
}
return NULL;
}
// 继续遍历, 直到my_seq_ops_next()放回NULL或者错误
static void *my_seq_ops_next(struct seq_file *m, void *v, loff_t *pos)
{
struct task_struct *task = NULL;
if(seqfile_debug_mode == 0) {
task = next_task((struct task_struct *)v);
// 这里加不加好像都没有作用
++ *pos;
// 返回NULL, 遍历结束
if(task == &init_task) {
return NULL;
}
} else {
++ *pos;
}
return task;
}
// 遍历完成/出错时seq_file会调用stop()函数
static void my_seq_ops_stop(struct seq_file *m, void *v)
{
}
static void pci_reset_sbr(struct pci_dev *dev)
{
uint16_t ctrl;
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
msleep(2);
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
ssleep(1);
}
static struct pci_dev *g_pci_dev = NULL;
static int lookup_pci_devices_reset(struct device *dev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
struct pci_dev *pdev = to_pci_dev(dev);
seq_printf(m, "%s line %d vendor id 0x%x, device id 0x%x, devname %s.\n", __func__, __LINE__, pdev->vendor, pdev->device, dev_name(&pdev->dev));
// find the EHCI device.8086:a12f
//if ((pdev->vendor == 0x1ee0) && (pdev->device == 0xf)) {
if ((pdev->vendor == 0x168c) && (pdev->device == 0x36)) {
//if ((pdev->vendor == 0x8086) && (pdev->device == 0xa12f)) {
seq_printf(m, "%s line %d, do the reset of wireless device, domain 0x%04x, device PCI:%d:%d:%d\n.", \
__func__, __LINE__, pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
g_pci_dev = pdev;
}
return 0;
}
static unsigned int pci_rescan_bus_bridge_resize_priv(struct pci_dev *bridge)
{
unsigned int max;
struct pci_bus *bus = bridge->subordinate;
max = pci_scan_child_bus(bus);
pci_assign_unassigned_bridge_resources(bridge);
pci_bus_add_devices(bus);
return max;
}
static int lookup_pci_devices(struct device *dev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
struct pci_dev *pdev = to_pci_dev(dev);
seq_printf(m, "vendor id 0x%x, device id 0x%x, devname %s.domain: 0x%04x, BDF: %x:%x:%x. sub 0x%p.\n", \
pdev->vendor, pdev->device, dev_name(&pdev->dev),pci_domain_nr(pdev->bus), pdev->bus->number, \
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->subordinate);
return 0;
}
static int lookup_pci_drivers(struct device_driver *drv, void *data)
{
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "driver name %s.\n", drv->name);
return 0;
}
static int lookup_platform_devices(struct device *dev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
struct platform_device *platdev = to_platform_device(dev);
seq_printf(m, "devpath %s.\n", platdev->name);
return 0;
}
static int lookup_platform_drivers(struct device_driver *drv, void *data)
{
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "driver name %s.\n", drv->name);
return 0;
}
static int list_device_belongs_todriver_pci(struct device *dev, void *p)
{
struct seq_file *m = (struct seq_file *)p;
struct pci_dev *pdev = to_pci_dev(dev);
seq_printf(m, "vendor id 0x%x, device id 0x%x, devname %s.\n", pdev->vendor, pdev->device, dev_name(&pdev->dev));
return 0;
}
static int list_device_belongs_todriver_platform(struct device *dev, void *p)
{
struct seq_file *m = (struct seq_file *)p;
struct platform_device *platdev = to_platform_device(dev);
seq_printf(m, "platdevname %s.\n", platdev->name);
return 0;
}
static int pcie_device_info_find_bridge(struct pci_dev *pdev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
struct pci_bus *rootbus = NULL;
rootbus = pci_find_bus(0, 0);
if(pci_is_bridge(pdev)){
seq_printf(m, "bridge find.vendor id 0x%x, device id 0x%x, devname %s. domain:0x%04x, BDF: %x:%x:%x, subordinate 0x%p, busid %d, rootbus %d rootself 0x%p.\n", \
pdev->vendor, pdev->device, dev_name(&pdev->dev), pci_domain_nr(pdev->bus), pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), pdev->subordinate,
pdev->subordinate->number, rootbus->number, rootbus->self);
}
return 0;
}
static int pcie_device_info(struct pci_dev *pdev, void *data)
{
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "vendor id 0x%04x, device id 0x%04x, devname %s, belongs to bus %16s, parent bus name %6s subordinate 0x%p.\n", \
pdev->vendor, pdev->device, dev_name(&pdev->dev), pdev->bus->name, pdev->bus->parent? pdev->bus->parent->name : "null", pdev->subordinate);
if(pdev->subordinate) {
seq_printf(m, " subordinate have bus name %s.\n", pdev->subordinate->name);
if(pdev->subordinate->self) {
seq_printf(m, " subordinate have dev name %s.\n", dev_name(&pdev->subordinate->self->dev));
if(pdev->subordinate->self != pdev) {
seq_printf(m, " cant happend!\n");
} else {
seq_printf(m, " surely!\n");
}
}
} else {
seq_printf(m, " subordinate not have.\n");
}
if(pdev->bus->self) {
seq_printf(m, " device belongs to child pci bus %s.\n", dev_name(&pdev->bus->self->dev));
} else {
seq_printf(m, " device belongs to top lvl pci bus.\n");
}
seq_printf(m, "\n");
return 0;
}
struct zilong_attribute {
struct attribute attr;
ssize_t (*show)(struct kobject *kobj, struct zilong_attribute *attr, char *buf);
ssize_t (*store)(struct kobject *dev, struct zilong_attribute *attr,const char *buf, size_t count);
};
#define to_zilong_attr(_attr) container_of(_attr, struct zilong_attribute, attr)
static ssize_t zilong_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct zilong_attribute *zlattr = to_zilong_attr(attr);
zlattr->show(kobj, zlattr, buf);
return 0;
}
static ssize_t zilong_attr_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
{
ssize_t size;
struct zilong_attribute *zlattr = to_zilong_attr(attr);
size = zlattr->store(kobj, zlattr, buf, count);
return size;
}
static const struct sysfs_ops zilong_sysfs_ops = {
.show = zilong_attr_show,
.store = zilong_attr_store,
};
static struct kobj_type zilong_ktype = {
.release = NULL,
.sysfs_ops = &zilong_sysfs_ops,
.namespace = NULL,
.get_ownership = NULL,
};
ssize_t height_show(struct kobject *kobj, struct zilong_attribute *attr, char *buf)
{
char *p = buf;
p += sprintf(p, "I am 180 cm tall.\n");
printk("%s line %d output %ld.\n", __func__, __LINE__, p - buf);
return p - buf;
}
ssize_t height_store(struct kobject *dev, struct zilong_attribute *attr,const char *buf, size_t count)
{
unsigned long height = 0;
sscanf(buf, "%lx", &height);
if (printk_ratelimit())
printk("%s line %d, height %ld.\n", __func__, __LINE__, height);
return count;
}
struct zilong_attribute height = {
.attr = {
.name = "height",
.mode = 0777,
},
.show = height_show,
.store = height_store,
};
// 此函数将数据写入`seq_file`内部的缓冲区
// `seq_file`会在合适的时候把缓冲区的数据拷贝到应用层
// 参数@V是start/next函数的返回值
static int my_seq_ops_show(struct seq_file *m, void *v)
{
struct task_struct *task = NULL;
struct task_struct *tsk = NULL;
struct task_struct *p = NULL;
struct file *file = m->private;
struct pid *session = NULL;
if(seqfile_debug_mode == 0) {
seq_puts(m, " file=");
seq_file_path(m, file, "\n");
seq_putc(m, ' ');
task = (struct task_struct *)v;
session = task_session(task);
tsk = pid_task(session, PIDTYPE_PID);
if(task->flags & PF_KTHREAD) {
seq_printf(m, "Kernel thread output: PID=%u, task: %s, index=%lld, read_pos=%lld, %s.\n", task_tgid_nr(task),/* task->tgid,*/
task->comm, m->index, m->read_pos, tsk? "has session" : "no session");
} else {
seq_printf(m, "User thread: PID=%u, task: %s, index=%lld, read_pos=%lld %s.\n", task_tgid_nr(task), /* task->tgid,*/
task->comm, m->index, m->read_pos, tsk? "has session" : "no session");
}
seq_printf(m, "==================\n");
sched_show_task(task);
} else if(seqfile_debug_mode == 1) {
struct task_struct *g, *p;
static int oldcount = 0;
static int entercount = 0;
char *str;
printk("%s line %d here enter %d times.\n", __func__, __LINE__, ++ entercount);
seq_printf(m, "%s line %d here enter %d times.\n", __func__, __LINE__, ++ entercount);
rcu_read_lock();
for_each_process_thread(g, p) {
struct task_struct *session = pid_task(task_session(g), PIDTYPE_PID);
struct task_struct *thread = pid_task(task_session(p), PIDTYPE_PID);
struct task_struct *ggroup = pid_task(task_pgrp(g), PIDTYPE_PID);
struct task_struct *pgroup = pid_task(task_pgrp(p), PIDTYPE_PID);
struct pid * pid = task_session(g);
if(list_empty(&p->tasks)) {
str = "empty";
} else {
str = "not empty";
}
seq_printf(m, "process %s(pid %d tgid %d,cpu%d) thread %s(pid %d tgid %d,cpu%d),threadnum %d, %d. tasks->prev = %p, \
tasks->next = %p, p->tasks=%p, %s, process parent %s(pid %d tgid %d), thread parent%s(pid %d, tgid %d, files %p\n)",
g->comm, task_pid_nr(g), task_tgid_nr(g), task_cpu(g), \
p->comm, task_pid_nr(p), task_tgid_nr(p), task_cpu(p), \
get_nr_threads(g), get_nr_threads(p), p->tasks.prev, p->tasks.next, &p->tasks, str, g->real_parent->comm, \
task_pid_nr(g->real_parent),task_tgid_nr(g->real_parent), p->real_parent->comm, task_pid_nr(p->real_parent), task_tgid_nr(p->real_parent), p->files);
if(ggroup) {
seq_printf(m, "ggroup(pid %d tgid %d).", task_pid_nr(ggroup),task_tgid_nr(ggroup));
}
if(pgroup) {
seq_printf(m, "pgroup(pid %d tgid %d).", task_pid_nr(pgroup),task_tgid_nr(pgroup));
}
seq_printf(m, "current smp processor id %d.", smp_processor_id());
if(thread) {
seq_printf(m, "thread session %s(%d).", thread->comm, task_pid_nr(thread));
}
if(session) {
seq_printf(m, "process session %s(%d).", session->comm, task_pid_nr(session));
}
if(oldcount == 0 || oldcount != m->size) {
printk("%s line %d, m->count %ld, m->size %ld.", __func__, __LINE__, m->count, m->size);
oldcount = m->size;
}
if(pid){
seq_printf(m, "pid task %p,pgid task %p, psid_task %p", pid_task(pid, PIDTYPE_PID), pid_task(pid, PIDTYPE_PGID), pid_task(pid, PIDTYPE_SID));
seq_printf(m, "pid task %s,pgid task %s, psid_task %s", pid_task(pid, PIDTYPE_PID)->comm, pid_task(pid, PIDTYPE_PGID)->comm, pid_task(pid, PIDTYPE_SID)->comm);
}
seq_printf(m, "\n");
}
rcu_read_unlock();
} else if(seqfile_debug_mode == 2) {
for_each_process(task) {
struct pid *pgrp = task_pgrp(task);
seq_printf(m, "Group Header %s(%d,cpu%d):\n", task->comm, task_pid_nr(task), task_cpu(task));
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d.\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p));
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
}
} else if (seqfile_debug_mode == 3) {
for_each_process(task) {
struct pid *session = task_session(task);
struct task_struct *tsk = pid_task(session, PIDTYPE_PID);
if(tsk) {
seq_printf(m, "session task %s(%d,cpu%d):", tsk->comm, task_pid_nr(tsk), task_cpu(tsk));
} else {
seq_printf(m, "process %s(%d,cpu%d) has no session task.", task->comm, task_pid_nr(task), task_cpu(task));
}
seq_printf(m, "session header %s(%d,cpu%d):\n", task->comm, task_pid_nr(task), task_cpu(task));
do_each_pid_task(session, PIDTYPE_SID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d, spidtask %s(%d,%d).\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p), pid_task(session, PIDTYPE_SID)->comm, pid_task(session, PIDTYPE_SID)->tgid, pid_task(session, PIDTYPE_SID)->pid);
if(pid_task(session, PIDTYPE_PID)) {
seq_printf(m, "pidtask %s(%d,%d).\n", pid_task(session, PIDTYPE_PID)->comm, pid_task(session, PIDTYPE_PID)->tgid, pid_task(session, PIDTYPE_PID)->pid);
}
} while_each_pid_task(pgrp, PIDTYPE_SID, p);
}
} else if(seqfile_debug_mode == 4) {
struct task_struct *thread, *child;
for_each_process(task) {
seq_printf(m, "process %s(%d,cpu%d):\n", task->comm, task_pid_nr(task), task_cpu(task));
for_each_thread(task, thread) {
list_for_each_entry(child, &thread->children, sibling) {
seq_printf(m, " thread %s(%d,cpu%d) child %s(%d,cpu%d),threadnum %d, %d.\n",
thread->comm, task_pid_nr(thread), task_cpu(thread), \
child->comm, task_pid_nr(child), task_cpu(child), \
get_nr_threads(thread), get_nr_threads(child));
}
}
}
} else if(seqfile_debug_mode == 5) {
struct task_struct *g, *t;
do_each_thread (g, t) {
seq_printf(m, "Process %s(%d cpu%d), thread %s(%d cpu%d), threadnum %d.\n", g->comm, task_pid_nr(g), task_cpu(g), t->comm, task_pid_nr(t), task_cpu(t), get_nr_threads(g));
} while_each_thread (g, t);
} else if(seqfile_debug_mode == 6) {
for_each_process(task) {
struct pid *pid = task_pid(task);
seq_printf(m, "Process %s(%d,cpu%d) pid %d, tgid %d:\n", task->comm, task_pid_nr(task), task_cpu(task), task_pid_vnr(task), task_tgid_vnr(task));
do_each_pid_task(pid, PIDTYPE_TGID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d. pid %d, tgid %d\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p), task_pid_vnr(p), task_tgid_vnr(p));
} while_each_pid_task(pid, PIDTYPE_TGID, p);
}
} else if(seqfile_debug_mode == 7) {
for_each_process(task) {
struct pid *pid = task_pid(task);
seq_printf(m, "Process %s(%d,cpu%d) pid %d, tgid %d:\n", task->comm, task_pid_nr(task), task_cpu(task), task_pid_vnr(task), task_tgid_vnr(task));
do_each_pid_task(pid, PIDTYPE_PID, p) {
seq_printf(m, " process %s(%d,cpu%d) thread %s(%d,cpu%d),threadnum %d, %d. pid %d, tgid %d\n",
task->comm, task_pid_nr(task), task_cpu(task), \
p->comm, task_pid_nr(p), task_cpu(p), \
get_nr_threads(task), get_nr_threads(p), task_pid_vnr(p), task_tgid_vnr(p));
} while_each_pid_task(pid, PIDTYPE_PID, p);
}
} else if(seqfile_debug_mode == 8) {
bus_for_each_dev(&pci_bus_type, NULL, (void*)m, lookup_pci_devices);
bus_for_each_drv(&pci_bus_type, NULL, (void*)m, lookup_pci_drivers);
// class_find_device.
// class_find_device_by_name.
// class_for_each_device.
} else if(seqfile_debug_mode == 9) {
struct device_driver *drv;
drv = driver_find("pcieport", &pci_bus_type);
driver_for_each_device(drv, NULL, (void*)m, list_device_belongs_todriver_pci);
} else if(seqfile_debug_mode == 10) {
for_each_process(task) {
seq_printf(m, "Process %s(%d),state 0x%08lx, exit_state 0x%08x, refcount %d, usage %d rcucount %d.", \
task->comm, task->tgid, task->state, task->exit_state, refcount_read(&task->stack_refcount), refcount_read(&task->usage), refcount_read(&task->rcu_users));
if(task->parent) {
seq_printf(m, "parent name %s pid %d.\n", task->parent->comm, task->parent->tgid);
} else {
seq_printf(m, "no parent.\n");
}
}
} else if(seqfile_debug_mode == 11) {
struct pci_bus *bus;
list_for_each_entry(bus, &pci_root_buses, node) {
seq_printf(m, "pcibus name %s.\n", bus->name);
pci_walk_bus(bus, pcie_device_info, (void*)m);
}
} else if(seqfile_debug_mode == 12) {
struct device_driver *drv;
// EXPORT_SYMBOL(usb_bus_type);
// bus_for_each_dev(&usb_bus_type, NULL, (void*)m, lookup_usb_devices);
// bus_for_each_drv(&usb_bus_type, NULL, (void*)m, lookup_usb_drivers);
bus_for_each_dev(&platform_bus_type, NULL, (void*)m, lookup_platform_devices);
bus_for_each_drv(&platform_bus_type, NULL, (void*)m, lookup_platform_drivers);
drv = driver_find("demo_platform", &platform_bus_type);
driver_for_each_device(drv, NULL, (void*)m, list_device_belongs_todriver_platform);
} else if(seqfile_debug_mode == 13) {
int ret;
class_zilong = kset_create_and_add("zilong_class", NULL, NULL);
if (!class_zilong) {
printk("%s line %d, fatal error, create class failure.\n", __func__, __LINE__);
return -ENOMEM;
}
ret = kobject_init_and_add(&kobj, &zilong_ktype, &class_zilong->kobj, "%s-%d", "zilong", 1);
if(ret < 0) {
printk("%s line %d, fatal error, create kobject failure.\n", __func__, __LINE__);
return -ENOMEM;
}
kobj_created = 1;
ret = sysfs_create_file(&kobj, &height.attr);
if(ret != 0){
printk("%s line %d, fatal error, create sysfs attribute failure.\n", __func__, __LINE__);
return -ENOMEM;
}
kobj_created = 1;
} else if(seqfile_debug_mode == 14) {
// cad pid is process 1 pid.
int ret = kill_cad_pid(SIGINT, 1);
printk("%s lne %d ret %d.\n", __func__, __LINE__, ret);
} else if(seqfile_debug_mode == 15) {
kill_processes(pid_number);
} else if(seqfile_debug_mode == 16) {
struct pci_dev *pdev = NULL;
struct pci_dev *pparent = NULL;
struct pci_bus *bus = NULL;
struct pci_bus *rootbus = NULL;
struct pci_bus *findbus = NULL;
findbus = pci_find_bus(0, 0);
list_for_each_entry(rootbus, &pci_root_buses, node) {
seq_printf(m, "pcibus name %s bus %p, findbus %p.\n", rootbus->name, rootbus, findbus);
break;
}
bus_for_each_dev(&pci_bus_type, NULL, (void*)m, lookup_pci_devices_reset);
pdev = g_pci_dev;
if(pdev == NULL) {
printk("%s line %d, return null.\n", __func__, __LINE__);
return -1;
}
pci_reset_sbr(pdev);
pci_lock_rescan_remove();
pci_stop_and_remove_bus_device(pdev);
pci_unlock_rescan_remove();
bus = pdev->bus;
//if (!pci_is_root_bus(bus) && list_empty(&bus->devices))
if (0)
{
printk("%s line %d, lightweight rescan.\n", __func__, __LINE__);
pci_lock_rescan_remove();
pci_rescan_bus_bridge_resize_priv(bus->self);
pci_unlock_rescan_remove();
} else {
printk("%s line %d, rescan.\n", __func__, __LINE__);
if(bus->self) {
bus = bus->self->bus;
pparent = bus->self;
}
seq_printf(m, "%s line %d, do the reset of wireless device, device PCI BUS No.:%d. busname %s, bus %p.\n.", \
__func__, __LINE__, bus->number, bus->name, bus);
if(pparent && pparent->bus) {
seq_printf(m, "%s line %d, do the reset of wireless device, device PCI:0x%04x:%d:%d.%d\n.", \
__func__, __LINE__, pci_domain_nr(pparent->bus), pparent->bus->number, PCI_SLOT(pparent->devfn), PCI_FUNC(pparent->devfn));
}
if(bus == rootbus) {
seq_printf(m, "warnings: reset root bus.\n");
}
pci_lock_rescan_remove();
pci_rescan_bus(bus);
pci_unlock_rescan_remove();
}
} else if(seqfile_debug_mode == 17) {
struct pci_bus *pbus = NULL;
while ((pbus = pci_find_next_bus(pbus)) != NULL) {
seq_printf(m, "find bus %s.\n", pbus->name);
}
list_for_each_entry(pbus, &pci_root_buses, node) {
seq_printf(m, "pcibus name %s.\n", pbus->name);
pci_walk_bus(pbus, pcie_device_info_find_bridge, (void*)m);
}
bus_for_each_dev(&pci_bus_type, NULL, (void*)m, lookup_pci_devices);
} else if(seqfile_debug_mode == 18) {
struct net *net;
struct net_device *ndev;
for_each_net(net)
for_each_netdev(net, ndev) {
seq_printf(m, "%s line %d, ndev->name %s. net 0x%p.\n", __func__, __LINE__, ndev->name, net);
}
} else {
printk("%s line %d,cant be here, seqfile_debug_mode = %d.\n", __func__, __LINE__, seqfile_debug_mode);
}
return 0;
}
static struct task_struct *find_lock_task_mm(struct task_struct *p)
{
struct task_struct *t;
rcu_read_lock();
for_each_thread(p, t) {
task_lock(t);
if (likely(t->mm))
goto found;
task_unlock(t);
}
t = NULL;
found:
rcu_read_unlock();
return t;
}
static bool process_shares_task_mm(struct task_struct *p, struct mm_struct *mm)
{
struct task_struct *t;
for_each_thread(p, t) {
struct mm_struct *t_mm = READ_ONCE(t->mm);
if (t_mm)
return t_mm == mm;
}
return false;
}
static void kill_processes(int pid_nr)
{
struct task_struct *victim;
struct task_struct *p;
struct mm_struct *mm;
int old_cnt,new_cnt;
victim = get_pid_task(find_vpid(pid_nr), PIDTYPE_PID);
if(victim == NULL) {
printk("%s line %d,return.\n", __func__, __LINE__);
return;
}
printk("%s line %d, task has live %d threads total.\n", __func__, __LINE__, atomic_read(&victim->signal->live));
p = find_lock_task_mm(victim);
if (!p) {
put_task_struct(victim);
return;
} else {
get_task_struct(p);
put_task_struct(victim);
victim = p;
}
mm = victim->mm;
mmgrab(mm);
kill_pid(find_vpid(pid_nr), SIGKILL, 1);
task_unlock(victim);
rcu_read_lock();
for_each_process(p) {
if (!process_shares_task_mm(p, mm))
continue;
if (same_thread_group(p, victim))
continue;
if (unlikely(p->flags & PF_KTHREAD))
continue;
kill_pid(get_pid(task_pid(p)), SIGKILL, 1);
}
rcu_read_unlock();
mmdrop(mm);
old_cnt = atomic_read(&victim->signal->live);
while((new_cnt=atomic_read(&victim->signal->live))) {
if(new_cnt != old_cnt) {
printk("%s line %d, live %d.\n", __func__, __LINE__, atomic_read(&victim->signal->live));
old_cnt = new_cnt;
}
}
put_task_struct(victim);
}
static const struct seq_operations my_seq_ops = {
.start = my_seq_ops_start,
.next = my_seq_ops_next,
.stop = my_seq_ops_stop,
.show = my_seq_ops_show,
};
static int proc_seq_open(struct inode *inode, struct file *file)
{
int ret;
struct seq_file *m;
ret = seq_open(file, &my_seq_ops);
if(!ret) {
m = file->private_data;
m->private = file;
}
return ret;
}
static ssize_t proc_seq_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos)
{
char debug_string[16];
int debug_no;
memset(debug_string, 0x00, sizeof(debug_string));
if (count >= sizeof(debug_string)) {
printk("%s line %d, fata error, write count exceed max buffer size.\n", __func__, __LINE__);
return -EINVAL;
}
if (copy_from_user(debug_string, buffer, count)) {
printk("%s line %d, fata error, copy from user failure.\n", __func__, __LINE__);
return -EFAULT;
}
if (sscanf(debug_string, "%d", &debug_no) <= 0) {
printk("%s line %d, fata error, read debugno failure.\n", __func__, __LINE__);
return -EFAULT;
}
seqfile_debug_mode = debug_no;
//printk("%s line %d, debug_no %d.\n", __func__, __LINE__, debug_no);
return count;
}
static ssize_t proc_seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
{
ssize_t ret;
printk("%s line %d enter, fuck size %lld size %ld.\n", __func__, __LINE__, *ppos, size);
ret = seq_read(file, buf, size, ppos);
printk("%s line %d exit, fuck size %lld size %ld,ret = %ld.\n", __func__, __LINE__, *ppos, size, ret);
return ret;
}
static struct file_operations seq_proc_ops = {
.owner = THIS_MODULE,
.open = proc_seq_open,
.release = seq_release,
.read = proc_seq_read,
.write = proc_seq_write,
.llseek = seq_lseek,
.unlocked_ioctl = NULL,
};
static struct proc_dir_entry * entry;
static int proc_hook_init(void)
{
printk("%s line %d, init. seqfile_debug_mode = %d.\n", __func__, __LINE__, seqfile_debug_mode);
entry = proc_create("dumptask", 0644, NULL, &seq_proc_ops);
//entry = proc_create_seq("dumptask", 0644, NULL, &my_seq_ops);
return 0;
}
static void proc_hook_exit(void)
{
if(kobj_created) {
kobject_del(&kobj);
kset_unregister(class_zilong);
}
proc_remove(entry);
printk("%s line %d, exit.\n", __func__, __LINE__);
return;
}
module_init(proc_hook_init);
module_exit(proc_hook_exit);</code>
kernel debug

so, you can find the closed device threads are randomly on among the 101 threads.


although the threads that close the fd file is randomly, but the definitely things is that only the last threads that exit can do the actual char device release. that is because each threads will increas the "struct task_struct->files->count" reference, so, only the last threads will do the files resources clear.

文件资源清理
前面的实验证明了,对于多线程进程的情况,进程被杀死时,每个线程退出的时序是随机的,并不存在严格的先后顺序之分,而文件资源释放和清理是退出过程中需要执行的一项重要任务,主要有两个执行路径。
do_exit->exit_files->put_files_struct->atomic_dec_and_test(&files->count) {close_files(files); }
->filp_close(file, files);->fput(filp);->atomic_long_sub_and_test(refs, &file->f_count);{fput_many(file, 1);}->init_task_work(&file->f_u.fu_rcuhead, ____fput);task_work_add(task, &file->f_u.fu_rcuhead, true)->...
内核对每个资源对象,包括struct file对象和struct file_struct对象都有独立的资源计数,所以彼此可以独立释放和变化,这是内核资源管理的一大特点。所以再exit_files执行过程中,会判断资源计数,如果当前进程没有线程再引用files->count这个资源,并且当前的struct file对象也没有其它进程再引用,将会将释放函数___fput其放入workqueue队列中。
队列将在下一步:
do_exit->exit_task_work->task_work_run-> work->func(work);
中执行,也就是执行__fput函数执行释放struct file对象的操作。
这里之所有有两级计数是因为,这样可以保证struct file_struct和struct file对象的生命期各自独立,因为你不知道是否有哪个进程当前正在持有(get)此进程的文件资源。不管谁持有,用这种方式都可以保证,最后一个put的拥有者可以释放这个资源。
比如,如果有第三个地方get_files_struct(task)了某个进程的struct file_struct对象,当此进程退出时,由于计数值并不会为0,所以进程不会释放其资源,而是交给这个地方在调用put_files_struct的时候,清理资源。
在内核模块中,我们HACK进程的struct file_struct结构体,将其count引用计数增1,这样在模块中可以安全的使用而不用担心内存被释放,在模块中将其打印出来,可以看到其引用计数值和进程的线程数相当,只有当进程的最后一个线程退出时,才会释放FILE结构体。


可以看到,其struct file_struct->count计数和线程数是一一对应的。

对于一个启动了100个线程的进程,查看其静态时的LIVE数和struct files_struct数,两个数字完全相等。


由于进程退出时,只有当COUNT递减为0时,才会执行关闭文件的操作,进而执行release回调函数,而这个时候,task_struct->files已经被置空,所以不能在release回调中通过这个字段获取struct files_struct的指针。

文件引用计数
不是每个close系统调用都会导致调用release方法,只有那些真正释放设备数据结构的时候才毁掉用release方法,内核在struct file中保留一个计数器,记录struct file结构被使用的次数,fork和dup都不会创建一个新的struct file结构,他们只是在现有的结构中增加引用计数。
close系统调用仅在文件结构的计数器降为0时才执行释放方法,这种情况发生在结构被销毁时,释放方法和close系统调用之间的这种关系保证了模块的使用计数始终是一致的,也就是open次数永远和release次数相同。
fork子进程虽然可以继承父进程的struct file结构,增加struct file引用计数,这个引用计数会控制在文件关闭时并不会调用真正的release, 也就是说,一个open一定会对应一个release, fork子进程没有对应open,自然也不会对应release.
每次应用程序调用close时都会调用flush方法,但是很少有驱动实现flush,因为通常在关闭时,没有什么可执行的,除非涉及到释放。
这些规则在即使应用程序没有显示关闭其打开的文件的情况下终止也是适用的,内核在进程退出时通过内部使用close系统调用自动关闭任何文件。

flush module
<code class="language-plaintext hljs">#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/cdev.h>
#include <linux/miscdevice.h>
#include <linux/sched/signal.h>
#define MISC_NAME "miscdriver"
static int temp_data = 0;
static int misc_open(struct inode *inode, struct file *file)
{
printk("misc_open.\n");
return 0;
}
static long misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case 0x100:
if (copy_from_user(&temp_data, (int *)arg, sizeof(int)))
return -EFAULT;
break;
case 0x101:
if (copy_to_user((int *)arg, &temp_data, sizeof(int)))
return -EFAULT;
break;
}
return 0;
}
static int misc_release(struct inode *inode, struct file *file)
{
printk("%s line %d, normal release flow.\n", __func__, __LINE__);
dump_stack();
return 0;
}
int misc_flush(struct file *file, fl_owner_t id)
{
printk("%s line %d, file->f_count = %ld, id = 0x%px.\n",
__func__, __LINE__, file_count(file), id);
return 0;
}
static const struct file_operations misc_fops = {
.owner = THIS_MODULE,
.open = misc_open,
.unlocked_ioctl = misc_ioctl,
.flush = misc_flush,
.release = misc_release,
};
static struct miscdevice misc_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = MISC_NAME,
.fops = &misc_fops,
};
static int __init misc_init(void)
{
int ret;
ret = misc_register(&misc_dev);
if (ret) {
printk("misc_register error.\n");
return ret;
}
return 0;
}
static void __exit misc_exit(void)
{
misc_deregister(&misc_dev);
}
module_init(misc_init);
module_exit(misc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("czl");</code>
fork后,子进程推出,FLUSH有调用,但是RELEASE没有被调用,因为引用计数还没有归0。


引用计数的逻辑
字符设备的引用计数包含两个,一个在struct cdev结构中,另一个在struct file_operations中:


获取引用计数分别在chrdev_open和__fput中:


根据引用计数在不同层次结构上对对象生命期影响的不同,引用记数的实现大概分成两类,一类是低层生命期和上层生命期各自独立,依靠各自的引用计数管理各自生命期,低层可以脱离高层存在,struct files_struct和struct file使用这种方式,两类对象之间是松耦合。而后者下级对象不能脱离上级对象存在,目录结构属于这种,为了避免在删除上级目录时导致的循环递减子目录的引用计数,内核不允许删除非空目录。以ext2_rmdir为例:

那么rm -fr是怎么作的呢?它是用递归的方式实现的,先从最低层的空目录开始删除。

这种区别类似于类似于C++设计模式中聚合和组合的区别。

FORK情况下的文件清理:
fork后的子进程虽然会分配自己的struct file_struct对象,但是会共享父进程的struct file结构,增加struct file的引用计数。测试模块和用例如下:
<code class="language-plaintext hljs">#include <fcntl.h>
#include <stdio.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/ioctl.h>
int main(int argc, const char *argv[])
{
int fd;
fd = open("/dev/miscdriver", O_RDWR);
if(fd < 0) {
printf("%s line %d. fatal error.\n", __func__, __LINE__);
return -1;
}
pid_t pid = fork();
if(pid > 0) {
while(1) {
printf("this is parent ppid:%d child:%d\n",getpid(),pid);
sleep(1);
}
} else if(pid == 0) {
printf("this is child ppid:%d child:%d\n",getppid(),getpid());
sleep(1);
close(fd);
return 0;
} else {
perror("fork");
}
close(fd);
return 0;
}</code>
<code class="language-plaintext hljs">#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/cdev.h>
#include <linux/miscdevice.h>
#include <linux/sched/signal.h>
#define MISC_NAME "miscdriver"
static int temp_data = 0;
static int misc_open(struct inode *inode, struct file *file)
{
printk("misc_open.\n");
return 0;
}
static long misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case 0x100:
if (copy_from_user(&temp_data, (int *)arg, sizeof(int)))
return -EFAULT;
break;
case 0x101:
if (copy_to_user((int *)arg, &temp_data, sizeof(int)))
return -EFAULT;
break;
}
return 0;
}
static int misc_release(struct inode *inode, struct file *file)
{
printk("%s line %d, normal release flow.\n", __func__, __LINE__);
dump_stack();
return 0;
}
static const struct file_operations misc_fops = {
.owner = THIS_MODULE,
.open = misc_open,
.unlocked_ioctl = misc_ioctl,
.release = misc_release,
};
static struct miscdevice misc_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = MISC_NAME,
.fops = &misc_fops,
};
static int __init misc_init(void)
{
int ret;
ret = misc_register(&misc_dev);
if (ret) {
printk("misc_register error.\n");
return ret;
}
return 0;
}
static void __exit misc_exit(void)
{
misc_deregister(&misc_dev);
}
module_init(misc_init);
module_exit(misc_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("czl");</code>
FORK后的子进程共享父进程打开的struct file对象。并且增加其引用记数,只有当父子两个进程都关闭后,驱动对应的release才会关闭。


修改测试代码,让子进程不退出,之后hack进内核TASK文件元数据,看FILE对象以及引用记数,发现父子进程对应的miscdriver FILE对象指针完全相同,引用记数为2,表示父子进程在引用。


子进程执行EXECVE启动新应用后会怎样?
<code class="language-plaintext hljs">#include <fcntl.h>
#include <stdio.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <sys/ioctl.h>
int main(int argc, const char *argv[])
{
int fd;
fd = open("/dev/miscdriver", O_RDWR);
if(fd < 0) {
printf("%s line %d. fatal error.\n", __func__, __LINE__);
return -1;
}
pid_t pid = fork();
if(pid > 0) {
while(1) {
printf("this is parent ppid:%d child:%d\n",getpid(),pid);
sleep(1);
}
} else if(pid == 0) {
char * envp[ ]={"PATH=/bin",0};
char * argv[ ]={"helloword",(char *)0};
printf("this is child ppid:%d child:%d\n",getppid(),getpid());
execve("./helloword", argv, envp);
while(1) {
sleep(1);
}
close(fd);
return 0;
} else {
perror("fork");
}
close(fd);
return 0;
}</code>
EXECVE启动的进程:


execve后,不影响子进程打开的文件描述符:

设置CLOSE_ON_EXEC标志,此时子进程不再和父进程共享MISCDRIVER文件:


close_on_exec设置原理:

可以CLOSE ON EXEC的FD会单独设置在FDT中的close_on_exec BIT MAP中

对于单进程来说,CLOSE_ON_EXEC会在EXECVE LOADER加载新的应用过程中FPUT 文件,退出内核进入用户态时释放文件RELEASE。


do_close_on_exec call stack
filp_close直接调用了fput,如果引用计数归0,则触发返回用户态时调用文件release操作。

文件创建之初的引用计数初始化为1。

如果一个进程是多线程的,则调用execve后其它线程如何处理?
#include<stdio.h>
#include<unistd.h>
#include<stdlib.h>
#include<string.h>
#include<sys/wait.h>
#include<pthread.h>
static pthread_t pt1, pt2, pt3;
void *thr_fn(void *arg)
{
pid_t pid;
int counter = 600;
pid = fork();
if (pid == 0) {
printf("%s line %d, child.\n", __func__, __LINE__);
execve("./execve.out", NULL, NULL);
} else {
printf("%s line %d, father.\n", __func__, __LINE__);
}
while (counter) {
printf("%s line %d pid %d.\n", __func__, __LINE__, getpid());
sleep(1);
counter --;
}
return NULL;
}
void create_thread(void)
{
pthread_create(&pt1, NULL, thr_fn, NULL);
pthread_create(&pt2, NULL, thr_fn, NULL);
pthread_create(&pt3, NULL, thr_fn, NULL);
return;
}
int main(void)
{
pid_t pid;
int status;
pid = fork();
if (pid == 0) {
printf("%s line %d, childpid %d.\n", __func__, __LINE__, getpid());
create_thread();
execve("./execve.out", NULL, NULL);
pthread_join(pt1, NULL);
pthread_join(pt2, NULL);
pthread_join(pt3, NULL);
return 99;
} else {
printf("%s line %d, parent.\n", __func__, __LINE__);
create_thread();
wait(&status);
printf("%s line %d, report child exit status %d.\n", __func__, __LINE__, WEXITSTATUS(status));
}
return 0;
}

根据测试来看,子进程创建的线程会被execve调用杀掉,从而改头换面,内核调用路径为:
load_elf_binary->flush_old_exec->de_thread->zap_other_threads....
从而多线程进程在执行execve后会变成只有主线程的单线程进程,并且,执行FORK后,进程名字一定会发生变化,变成了EXECVE的文件的名字。
pthread_exit主线退出后,其它线程的行为?
<code class="language-plaintext hljs">#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/ioctl.h>
static pthread_mutex_t mutex;
static pthread_mutexattr_t attr;
static void open_device(void)
{
int fd;
char *char_demo_node0 = "/dev/char_demo0";
fd = open(char_demo_node0,O_RDWR|O_NONBLOCK);
if(fd < 0)
{
printf("%s open fail!!!\n",char_demo_node0);
exit(-1);
}
else
{
printf("%s open success !!!\n",char_demo_node0);
}
}
void* func(void* arg)
{
while(1) {
pthread_mutex_lock(&mutex);
printf("%s line %d, get pid %d\n", __func__, __LINE__, getpid());
pthread_mutex_unlock(&mutex);
sleep(1);
}
return NULL;
}
#define thread_count 100
int main(void)
{
int ret;
pthread_t p[thread_count] = {0};
int i = 0;
signal(SIGCHLD,SIG_IGN);
//open_device();
if(( ret = pthread_mutexattr_init(&attr)) != 0)
{
fprintf(stderr, "create mutex attribute error. msg:%s", strerror(ret));
exit(1);
}
pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&mutex, &attr);
char str1[8], str2[8];
sprintf(str1, "abcdefg");
sprintf(str2, "1234567");
for( i = 0; i < thread_count / 2; i ++) {
if((ret = pthread_create(&p[2*i + 0], NULL, func, str1)) != 0) {
fprintf(stderr, "create thread error. msg:%s", strerror(ret));
exit(1);
}
if((ret = pthread_create(&p[2*i + 1], NULL, func, str2)) != 0) {
fprintf(stderr, "create thread error. msg:%s", strerror(ret));
exit(1);
}
}
pthread_exit(NULL);
return 0;
}</code>
主线程成为了僵尸线程

而其它100个线程仍然是正常运行的状态


fput妙用
开发者如果hack到某个struct file对象,甚至都可以替代owner进程释放此文件,代替owner进程的上下文调用ops->release释放掉对驱动的占用。






既然是偷偷摸摸地做事,必须要承担后果,后果就是当OWNER走退出流程觉察到FILE已经被释放,会打印两行错误信息,提升文件已经不存在了,为毛还要删除?

除了直接调用task_work_run函数释放某个 file, 还可以调用另外一个同步释放文件的函数__fput_sync,本质上这两个函数都是调用__fput实现释放文件的操作的。不过需要注意的是,__fput_sync只能在内核线程中使用,比如在WORKQUEUE等,用户态线程上下文会报BUG ON。

__fput_sync是在3.5加入内核的:

另外,从struct file结构体的释放流程角度,我们可以看到,struct file并没有和某个进程和线程做深度绑定,struct file本身虽然是在某个线程上下文下创建的,并通过创建线程和创建进程,甚至通过sendfile之类的API在整个系统间共享,但是它本身是不带有属主属性的,释放struct file的上下文不一定要和创建它的相同。struct file本身也没有指向进程资源的字段。它可以通过独立的计数器f_count对生命周期进行管理,并且至少被每个使用它的进程引用一次,所以,从这个角度,除非主动释放,否则struct file可能会比引用它的进程或者线程的生命期更长。从设计模式的角度,strut file对象和进程的关系是聚合,并非组合。
文件关闭的上下文
文件关闭的上下文有两个,一个是从用户态下来的进程,走的是如下图右边的分支,其特点是可以在返回用户态的时候执行进程结构体中的任务队列。
而对于中断上下文或者内核线程,由于要么没有task work对应,要么不会返回用户态,所以创建了一个delayed work,在内核线程上下文集中处理。

参考文章
【精选】Linux内核进程,线程,进程组,会话组织模型以及进程管理_papaofdoudou的博客-博客
open and release - Linux Device Drivers, Second Edition [Book]
Initialization and Shutdown - Linux Device Drivers, Second Edition [Book]
