实验采用阿里云主机完成,操作系统是Ubuntu18.04,下载好linux-5.4.34.tar.xz 、mykernel-2.0_for_linux-5.4.34.patch相关文件。
相关配置:
xz -d linux-5.4.34.tar.xz tar -xvf linux-5.4.34.tar cd linux-5.4.34 patch -p1 < ../mykernel-2.0_for_linux-5.4.34.patch sudo apt install build-essential libncurses-dev bison flex libssl-dev libelf-dev make defconfig make -j4 sudo apt install qemu qemu-system-x86_64 -curses -kernel arch/x86/boot/bzImage
运行结果:
在mykernel文件夹下面,可以看到mymain.c 和 myinterrupt.c,代码如下所示:
1 void __init my_start_kernel(void) 2 { 3 int i = 0; 4 while(1) 5 { 6 i++; 7 if(i%100000 == 0) 8 pr_notice("my_start_kernel here %d \n",i); 9 } 10 }
1 void my_timer_handler(void) 2 { 3 pr_notice("\n>>>>>>>>>>>>>>>>>my_timer_handler here<<<<<<<<<<<<<<<<<<\n\n"); 4 }
可以看到在my_start_kernel里面程序循环打印"my_start_kernel here %d \n",然后mykernel能够周期性的产生时钟中断,中断处理程序就会调用my_timer_handler函数,此时就会打印"\n>>>>>>>>>>>>>>>>>my_timer_handler here<<<<<<<<<<<<<<<<<<\n\n"。
首先添加自己的进程控制块,用于进程的管理,可以看到进程拥有三种状态:unrunnable、runnable和stopped,每个进程都拥有自己的堆栈,并由ip、sp(对应eip寄存器和esp寄存器)进行控制。pcb块间以链表的形式串联起来。
1 #define MAX_TASK_NUM 4 2 #define KERNEL_STACK_SIZE 1024*2 3 /* CPU-specific state of this task */ 4 struct Thread { 5 unsigned long ip; 6 unsigned long sp; 7 }; 8 9 typedef struct PCB{ 10 int pid; 11 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ 12 unsigned long stack[KERNEL_STACK_SIZE]; 13 /* CPU-specific state of this task */ 14 struct Thread thread; 15 unsigned long task_entry; 16 struct PCB *next; 17 }tPCB; 18 19 void my_schedule(void);
修改mymain.c的my_start_kernel函数,此函数初始化了MAX_TASK_NUM个进程控制块,然后内嵌了汇编代码,这段代码会把pid为0的进程的sp和ip写入寄存器。
1 #include <linux/types.h> 2 #include <linux/string.h> 3 #include <linux/ctype.h> 4 #include <linux/tty.h> 5 #include <linux/vmalloc.h> 6 7 8 #include "mypcb.h" 9 10 tPCB task[MAX_TASK_NUM]; 11 tPCB * my_current_task = NULL; 12 volatile int my_need_sched = 0; 13 14 void my_process(void); 15 16 17 void __init my_start_kernel(void) 18 { 19 int pid = 0; 20 int i; 21 /* Initialize process 0*/ 22 task[pid].pid = pid; 23 task[pid].state = 0;/* -1 unrunnable, 0 runnable, >0 stopped */ 24 task[pid].task_entry = task[pid].thread.ip = (unsigned long)my_process; 25 task[pid].thread.sp = (unsigned long)&task[pid].stack[KERNEL_STACK_SIZE-1]; 26 task[pid].next = &task[pid]; 27 /*fork more process */ 28 for(i=1;i<MAX_TASK_NUM;i++) 29 { 30 memcpy(&task[i],&task[0],sizeof(tPCB)); 31 task[i].pid = i; 32 task[i].thread.sp = (unsigned long)(&task[i].stack[KERNEL_STACK_SIZE-1]); 33 task[i].next = task[i-1].next; 34 task[i-1].next = &task[i]; 35 } 36 /* start process 0 by task[0] */ 37 pid = 0; 38 my_current_task = &task[pid]; 39 asm volatile( 40 "movq %1,%%rsp\n\t" /* set task[pid].thread.sp to rsp */ 41 "pushq %1\n\t" /* push rbp */ 42 "pushq %0\n\t" /* push task[pid].thread.ip */ 43 "ret\n\t" /* pop task[pid].thread.ip to rip */ 44 : 45 : "c" (task[pid].thread.ip),"d" (task[pid].thread.sp) /* input c or d mean %ecx/%edx*/ 46 ); 47 } 48 49 int i = 0; 50 51 void my_process(void) 52 { 53 while(1) 54 { 55 i++; 56 if(i%10000000 == 0) 57 { 58 printk(KERN_NOTICE "this is process %d -\n",my_current_task->pid); 59 if(my_need_sched == 1) 60 { 61 my_need_sched = 0; 62 my_schedule(); 63 } 64 printk(KERN_NOTICE "this is process %d +\n",my_current_task->pid); 65 } 66 } 67 }
修改my_timer_handler函数,实现my_schedule调度函数,其中my_timer_handler循环的将my_need_sched置为1,然后进行进程调度。my_schedule函数选择进程链表中的下一个就绪进程进行切换,该函数执行的具体任务为保存当前进程(prev)的上下文,并调出下一个进程(next)的上下文。
1 #include <linux/types.h> 2 #include <linux/string.h> 3 #include <linux/ctype.h> 4 #include <linux/tty.h> 5 #include <linux/vmalloc.h> 6 7 #include "mypcb.h" 8 9 extern tPCB task[MAX_TASK_NUM]; 10 extern tPCB * my_current_task; 11 extern volatile int my_need_sched; 12 volatile int time_count = 0; 13 14 /* 15 * Called by timer interrupt. 16 * it runs in the name of current running process, 17 * so it use kernel stack of current running process 18 */ 19 void my_timer_handler(void) 20 { 21 if(time_count%1000 == 0 && my_need_sched != 1) 22 { 23 printk(KERN_NOTICE ">>>my_timer_handler here<<<\n"); 24 my_need_sched = 1; 25 } 26 time_count ++ ; 27 return; 28 } 29 30 void my_schedule(void) 31 { 32 tPCB * next; 33 tPCB * prev; 34 35 if(my_current_task == NULL 36 || my_current_task->next == NULL) 37 { 38 return; 39 } 40 printk(KERN_NOTICE ">>>my_schedule<<<\n"); 41 /* schedule */ 42 next = my_current_task->next; 43 prev = my_current_task; 44 if(next->state == 0)/* -1 unrunnable, 0 runnable, >0 stopped */ 45 { 46 my_current_task = next; 47 printk(KERN_NOTICE ">>>switch %d to %d<<<\n",prev->pid,next->pid); 48 /* switch to next process */ 49 asm volatile( 50 "pushq %%rbp\n\t" /* save rbp of prev */ 51 "movq %%rsp,%0\n\t" /* save rsp of prev */ 52 "movq %2,%%rsp\n\t" /* restore rsp of next */ 53 "movq $1f,%1\n\t" /* save rip of prev */ 54 "pushq %3\n\t" 55 "ret\n\t" /* restore rip of next */ 56 "1:\t" /* next process start here */ 57 "popq %%rbp\n\t" 58 : "=m" (prev->thread.sp),"=m" (prev->thread.ip) 59 : "m" (next->thread.sp),"m" (next->thread.ip) 60 ); 61 } 62 return; 63 }
重新编译运行,结果如下:
首先操作系统会启动0号进程,相关代码分析如下:
movq %1,%%rsp:rsp寄存器指向原堆栈的栈顶,%1指后面的task[pid].thread.sp
pushq %1:压栈当前进程rbp寄存器
pushq %0:压栈当前进程rip寄存器,%0指task[pid]. thread.ip
ret:将栈顶位置task[0]. thread.ip,也就是my_process(void)函数的地址放入rip寄存器
接下来是my_time_handler中断处理程序,该函数每隔1000 判断 my_need_sched 是否不等于1,如果是则将其置为1,使 myprocess 执行 my_schedule() ,然后进行进程切换,相关代码分析如下:
pushq %%rbp:保存prev进程的rbp的值,入栈
movq %%rsp,%0:当前rsp寄存器的值到prev->thread.sp,这时rsp寄存器指向进程的栈顶地址,也就是保存prev进程栈顶地址
movq %2,%%rsp:将next进程的栈顶地址next->thread.sp放?rsp寄存器,完成了进程0和进程1的堆栈切换
movq $1f,%1:保存prev进程当前rip寄存器的值到prev->thread.ip,$1f指标号1
pushq %3:把即将执行的next进程的指令地址next->thread.ip入栈
ret:将压入栈的next->thread.ip放入rip寄存器
popq %%rbp:将next进程堆栈基地址从堆栈中恢复到rbp寄存器中
原文:https://www.cnblogs.com/cgsilent/p/12871351.html