调度系统各个组建关系如下
下面分别讲述:
1 struct task_struct { 2 3 ... 4 int prio, static_prio, normal_prio; 5 6 const struct sched_class *sched_class; 7 struct sched_entity se; 8 unsigned int rt_priority; 9 10 unsigned int policy; 11 cpumask_t cpus_allowed; 12 unsigned int time_slice; 13 14 ... 15 16 }
1 struct sched_class { 2 const struct sched_class *next; 3 4 void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); 5 void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); 6 void (*yield_task) (struct rq *rq); 7 8 void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); 9 10 struct task_struct * (*pick_next_task) (struct rq *rq); 11 void (*put_prev_task) (struct rq *rq, struct task_struct *p); 12 13 #ifdef CONFIG_SMP 14 unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, 15 struct rq *busiest, unsigned long max_load_move, 16 struct sched_domain *sd, enum cpu_idle_type idle, 17 int *all_pinned, int *this_best_prio); 18 19 int (*move_one_task) (struct rq *this_rq, int this_cpu, 20 struct rq *busiest, struct sched_domain *sd, 21 enum cpu_idle_type idle); 22 #endif 23 24 void (*set_curr_task) (struct rq *rq); 25 void (*task_tick) (struct rq *rq, struct task_struct *p); 26 void (*task_new) (struct rq *rq, struct task_struct *p); 27 };
进程无法之间与调度器类交互,由|SCHED_XYX与之映射
SCHED_NORMA
SCHED_BATCH -> fair_sched_class 完全公平调度器
SCHED_IDLE
SCHED_RR
->rt_sched_class 实时调度器
SCHED_FIFO
3 就绪队列
1 /* 2 * This is the main, per-CPU runqueue data structure. 3 * 4 * Locking rule: those places that want to lock multiple runqueues 5 * (such as the load balancing or the thread migration code), lock 6 * acquire operations must be ordered by ascending &runqueue. 7 */ 8 struct rq { 9 /* runqueue lock: */ 10 spinlock_t lock; 11 12 /* 13 * nr_running and cpu_load should be in the same cacheline because 14 * remote CPUs use both these fields when doing load calculation. 15 */ 16 unsigned long nr_running; 17 #define CPU_LOAD_IDX_MAX 5 18 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 19 unsigned char idle_at_tick; 20 #ifdef CONFIG_NO_HZ 21 unsigned char in_nohz_recently; 22 #endif 23 /* capture load from *all* tasks on this cpu: */ 24 struct load_weight load; 25 unsigned long nr_load_updates; 26 u64 nr_switches; 27 28 struct cfs_rq cfs; 29 #ifdef CONFIG_FAIR_GROUP_SCHED 30 /* list of leaf cfs_rq on this cpu: */ 31 struct list_head leaf_cfs_rq_list; 32 #endif 33 struct rt_rq rt; 34 35 /* 36 * This is part of a global counter where only the total sum 37 * over all CPUs matters. A task can increase this counter on 38 * one CPU and if it got migrated afterwards it may decrease 39 * it on another CPU. Always updated under the runqueue lock: 40 */ 41 unsigned long nr_uninterruptible; 42 43 struct task_struct *curr, *idle; 44 unsigned long next_balance; 45 struct mm_struct *prev_mm; 46 47 u64 clock, prev_clock_raw; 48 s64 clock_max_delta; 49 50 unsigned int clock_warps, clock_overflows; 51 u64 idle_clock; 52 unsigned int clock_deep_idle_events; 53 u64 tick_timestamp; 54 55 atomic_t nr_iowait; 56 57 ... 58 struct lock_class_key rq_lock_key; 59 };
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1 #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) 2 #define this_rq() (&__get_cpu_var(runqueues)) 3 #define task_rq(p) cpu_rq(task_cpu(p)) 4 #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
4 调度实体
1 /* 2 * CFS stats for a schedulable entity (task, task-group etc) 3 * 4 * Current field usage histogram: 5 * 6 * 4 se->block_start 7 * 4 se->run_node 8 * 4 se->sleep_start 9 * 6 se->load.weight 10 */ 11 struct sched_entity { 12 struct load_weight load; /* for load-balancing */ 13 struct rb_node run_node; 14 unsigned int on_rq; 15 16 u64 exec_start; 17 u64 sum_exec_runtime; 18 u64 vruntime; 19 u64 prev_sum_exec_runtime; 20 21 };
Linux内核架构读书笔记 - 2.5.2 数据结构,布布扣,bubuko.com
原文:http://www.cnblogs.com/songbingyu/p/3696414.html