歡迎來到Linux教程網
Linux教程網
Linux教程網
Linux教程網
Linux教程網 >> Linux綜合 >> Linux內核 >> Linux內核搶占補丁的基本原理

Linux內核搶占補丁的基本原理

日期:2017/2/27 9:44:04   编辑:Linux內核
  作者 jklCPU在內核中運行時並不是處處不可搶占的,內核中存在一些空隙,在這時進行搶占是安全的,內核搶占補丁的基本原理就是將SMP可並行的代碼段看成是可以進行內核搶占的區域。2.4內核正好細化了多CPU下的內核線程同步機構,對不可並行的指令塊用spinlock和rwlock作了細致的表示,該補丁的實現可謂水到渠成。具體的方法就是在進程的任務結構上增加一個preempt_count變量作為內核搶占鎖,它隨著spinlock和rwlock一起加鎖和解鎖。當preempt_count為0時表示可以進行內核調度。內核調度器的入口為preempt_schedule(),它將當前進程標記為TASK_PREEMPTED狀態再調用schedule(),在TASK_PREEMPTED狀態,schedule()不會將進程從運行隊列中刪除。下面是內核搶占補丁的主要代碼示意:arch/i386/kernel/entry.S:preempt_count = 4 # 將task_strUCt中的flags用作preempt_count,flags被移到了別的位置ret_from_exception: # 從異常返回#ifdef CONFIG_SMPGET_CURRENT(%ebx)movl processor(%ebx),%eaxshll $CONFIG_X86_L1_CACHE_SHIFT,%eaxmovl SYMBOL_NAME(irq_stat)(,%eax),%ecx # softirq_activetestl SYMBOL_NAME(irq_stat)+4(,%eax),%ecx # softirq_mask#elsemovl SYMBOL_NAME(irq_stat),%ecx # softirq_activetestl SYMBOL_NAME(irq_stat)+4,%ecx # softirq_mask#endifjne handle_softirq#ifdef CONFIG_PREEMPTcliincl preempt_count(%ebx) # 異常的入口沒有禁止內核調度的指令,與ret_from_intr匹配一下#endifENTRY(ret_from_intr) # 硬件中斷的返回GET_CURRENT(%ebx)#ifdef CONFIG_PREEMPTclidecl preempt_count(%ebx) # 恢復內核搶占標志#endifmovl EFLAGS(%esp),%eax # mix EFLAGS and CSmovb CS(%esp),%altestl $(VM_MASK 3),%eax # return to VM86 mode or non-supervisor?jne ret_with_reschedule#ifdef CONFIG_PREEMPTcmpl $0,preempt_count(%ebx)jnz restore_all # 如果preempt_count非零則表示禁止內核搶占cmpl $0,need_resched(%ebx)jz restore_all #movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecxaddl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecxjnz restore_allincl preempt_count(%ebx)sticall SYMBOL_NAME(preempt_schedule)jmp ret_from_intr # 新進程返回,返回ret_from_intr恢復搶占標志後再返回#elsejmp restore_all#endifALIGNhandle_softirq:#ifdef CONFIG_PREEMPTcliGET_CURRENT(%ebx)incl preempt_count(%ebx)sti#endifcall SYMBOL_NAME(do_softirq)jmp ret_from_intrALIGNreschedule:call SYMBOL_NAME(schedule) # testjmp ret_from_sys_callinclude/asm/hw_irq.h:...#ifdef CONFIG_PREEMPT#define BUMP_CONTEX_SWITCH_LOCK \GET_CURRENT \"incl 4(%ebx)\n\t"#else#define BUMP_CONTEX_SWITCH_LOCK#endif#define SAVE_ALL \ 硬件中斷保護入口現場"cld\n\t" \"pushl %es\n\t" \"pushl %ds\n\t" \"pushl %eax\n\t" \"pushl %ebp\n\t" \"pushl %edi\n\t" \"pushl %esi\n\t" \"pushl %edx\n\t" \"pushl %ecx\n\t" \"pushl %ebx\n\t" \"movl $" STR(__KERNEL_DS) ",%edx\n\t" \"movl %edx,%ds\n\t" \"movl %edx,%es\n\t" \BUMP_CONTEX_SWITCH_LOCK # 硬件中斷的入口禁止內核搶占include/Linux/spinlock.h:#ifdef CONFIG_PREEMPT#define switch_lock_count() current->preempt_count#define in_ctx_sw_off() (switch_lock_count().counter) 判斷當前進程的搶占計數是否非零#define atomic_ptr_in_ctx_sw_off() (&switch_lock_count())#define ctx_sw_off() \ 禁止內核搶占do { \atomic_inc(atomic_ptr_in_ctx_sw_off()); \ 當前進程的內核搶占計數增1} while (0)#define ctx_sw_on_no_preempt() \ 允許內核搶占do { \atomic_dec(atomic_ptr_in_ctx_sw_off()); \ 當前進程的內核搶占計數減1} while (0)#define ctx_sw_on() \ 允許並完成內核搶占do { \if (atomic_dec_and_test(atomic_ptr_in_ctx_sw_off()) && \current->need_resched) \preempt_schedule(); \} while (0)#define spin_lock(lock) \do { \ctx_sw_off(); \ 進入自旋鎖時禁止搶占_raw_spin_lock(lock); \} while(0)#define spin_trylock(lock) ({ctx_sw_off(); _raw_spin_trylock(lock) ? \鎖定並測試原來是否上鎖1 : ({ctx_sw_on(); 0;});})#define spin_unlock(lock) \do { \_raw_spin_unlock(lock); \ctx_sw_on(); \ 離開自旋鎖時允許並完成內核搶占} while (0)#define read_lock(lock) ({ctx_sw_off(); _raw_read_lock(lock);})#define read_unlock(lock) ({_raw_read_unlock(lock); ctx_sw_on();})#define write_lock(lock) ({ctx_sw_off(); _raw_write_lock(lock);})#define write_unlock(lock) ({_raw_write_unlock(lock); ctx_sw_on();})#define write_trylock(lock) ({ctx_sw_off(); _raw_write_trylock(lock) ? \1 : ({ctx_sw_on(); 0;});})...include/asm/softirq.h:#define cpu_bh_disable(cpu) do { ctx_sw_off(); local_bh_count(cpu)++; barrier(); } while (0)#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--;ctx_sw_on(); } while (0)kernel/schedule.c:#ifdef CONFIG_PREEMPTasmlinkage void preempt_schedule(void){while (current->need_resched) {ctx_sw_off();current->state = TASK_PREEMPTED;schedule();current->state &= ~TASK_PREEMPTED;ctx_sw_on_no_preempt();}}#endifasmlinkage void schedule(void){struct schedule_data * sched_data;struct task_struct *prev, *next, *p;struct list_head *tmp;int this_cpu, c;#ifdef CONFIG_PREEMPTctx_sw_off();#endifif (!


current->active_mm) BUG();need_resched_back:prev = current;this_cpu = prev->processor;if (in_interrupt())goto scheduling_in_interrupt;release_kernel_lock(prev, this_cpu);/* Do "administrative" work here while we don't hold any locks */if (softirq_active(this_cpu) & softirq_mask(this_cpu))goto handle_softirq;handle_softirq_back:/** 'sched_data' is protected by the fact that we can run* only one process per CPU.*/sched_data = & aligned_data[this_cpu].schedule_data;spin_lock_irq(&runqueue_lock);/* move an exhausted RR process to be last.. */if (prev->policy == SCHED_RR)goto move_rr_last;move_rr_back:switch (prev->state) {case TASK_INTERRUPTIBLE:if (signal_pending(prev)) {prev->state = TASK_RUNNING;break;}default:#ifdef CONFIG_PREEMPTif (prev->state & TASK_PREEMPTED)break; 如果是內核搶占調度,則保留運行隊列#endifdel_from_runqueue(prev);#ifdef CONFIG_PREEMPTcase TASK_PREEMPTED:#endifcase TASK_RUNNING:}prev->need_resched = 0;/** this is the scheduler proper:*/repeat_schedule:/** Default process to select..*/next = idle_task(this_cpu);c = -1000;if (task_on_runqueue(prev))goto still_running;still_running_back:list_for_each(tmp, &runqueue_head) {p = list_entry(tmp, struct task_struct, run_list);if (can_schedule(p, this_cpu)) {int weight = goodness(p, this_cpu, prev->active_mm);if (weight > c)c = weight, next = p;}}/* Do we need to re-calculate counters? */if (!c)goto recalculate;/** from this point on nothing can prevent us from* switching to the next task, save this fact in* sched_data.*/sched_data->curr = next;#ifdef CONFIG_SMPnext->has_cpu = 1;next->processor = this_cpu;#endifspin_unlock_irq(&runqueue_lock);if (prev == next)goto same_process;#ifdef CONFIG_SMP/** maintain the per-process 'last schedule' value.* (this has to be recalculated even if we reschedule to* the same process) Currently this is only used on SMP,* and it's approximate, so we do not have to maintain* it while holding the runqueue spinlock.*/sched_data->last_schedule = get_cycles();/** We drop the scheduler lock early (it's a global spinlock),* thus we have to lock the previous process from getting* rescheduled during switch_to().*/#endif /* CONFIG_SMP */kstat.context_swtch++;/** there are 3 processes which are affected by a context switch:** prev == .... ==> (last => next)** It's the 'much more previous' 'prev' that is on next's stack,* but prev is set to (the just run) 'last' process by switch_to().* This might sound slightly confusing but makes tons of sense.*/prepare_to_switch();{struct mm_struct *mm = next->mm;struct mm_struct *oldmm = prev->active_mm;if (!

mm) {if (next->active_mm) BUG();next->active_mm = oldmm;atomic_inc(&oldmm->mm_count);enter_lazy_tlb(oldmm, next, this_cpu);} else {if (next->active_mm != mm) BUG();switch_mm(oldmm, mm, next, this_cpu);}if (!prev->mm) {prev->active_mm = NULL;mmdrop(oldmm);}}/** This just switches the register state and the* stack.*/switch_to(prev, next, prev);__schedule_tail(prev);same_process:reacquire_kernel_lock(current);if (current->need_resched)goto need_resched_back;#ifdef CONFIG_PREEMPTctx_sw_on_no_preempt();#endifreturn;recalculate:{struct task_struct *p;spin_unlock_irq(&runqueue_lock);read_lock(&tasklist_lock);for_each_task(p)p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);read_unlock(&tasklist_lock);spin_lock_irq(&runqueue_lock);}goto repeat_schedule;still_running:c = goodness(prev, this_cpu, prev->active_mm);next = prev;goto still_running_back;handle_softirq:do_softirq();goto handle_softirq_back;move_rr_last:if (!prev->counter) {prev->counter = NICE_TO_TICKS(prev->nice);move_last_runqueue(prev);}goto move_rr_back;scheduling_in_interrupt:printk("Scheduling in interrupt\n");BUG();return;}void schedule_tail(struct task_struct *prev){__schedule_tail(prev);#ifdef CONFIG_PREEMPTctx_sw_on();#endif}



Copyright © Linux教程網 All Rights Reserved