歡迎來到Linux教程網
Linux教程網
Linux教程網
Linux教程網
Linux教程網 >> Linux編程 >> Linux編程 >> Linux啟動過程中init/main.c中的start_kernel()函數中的lock_kernel()函數

Linux啟動過程中init/main.c中的start_kernel()函數中的lock_kernel()函數

日期:2017/3/1 11:10:19   编辑:Linux編程
  1. #ifndef __LINUX_SMPLOCK_H
  2. #define __LINUX_SMPLOCK_H
  3. #ifdef CONFIG_LOCK_KERNEL

//判斷內核是否支持內核鎖
//而s3c2410中arch/arm/configs/s3c2410的Code maturity level options下沒有定義,所以lock_kernel()什麼也不做

  1. #include <linux/sched.h>
  2. #include <linux/spinlock.h>
  3. #define kernel_locked() (current->lock_depth >= 0)
  4. extern int __lockfunc __reacquire_kernel_lock(void);
  5. extern void __lockfunc __release_kernel_lock(void);
  6. /*
  7. * Release/re-acquire global kernel lock for the scheduler
  8. */
  9. #define release_kernel_lock(tsk) do { \
  10. if (unlikely((tsk)->lock_depth >= 0)) \
  11. __release_kernel_lock(); \
  12. } while (0)
  13. /*
  14. * Non-SMP kernels will never block on the kernel lock,
  15. * so we are better off returning a constant zero from
  16. * reacquire_kernel_lock() so that the compiler can see
  17. * it at compile-time.
  18. */
  19. #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL)
  20. # define return_value_on_smp return
  21. #else
  22. # define return_value_on_smp
  23. #endif
  24. static inline int reacquire_kernel_lock(struct task_struct *task)
  25. {
  26. if (unlikely(task->lock_depth >= 0))
  27. return_value_on_smp __reacquire_kernel_lock();
  28. return 0;
  29. }
  30. extern void __lockfunc lock_kernel(void) __acquires(kernel_lock);
  31. extern void __lockfunc unlock_kernel(void) __releases(kernel_lock);
  32. #else
  33. #define lock_kernel() do { } while(0)
  34. #define unlock_kernel() do { } while(0)
  35. #define release_kernel_lock(task) do { } while(0)
  36. #define reacquire_kernel_lock(task) 0
  37. #define kernel_locked() 1
  38. #endif /* CONFIG_LOCK_KERNEL */
  39. #endif /* __LINUX_SMPLOCK_H */

//如果定義了CONFIG_LOCK_KERNEL,則轉到下面代碼

//判斷是使用big kernel semaphore還是big kernel lock
//而s3c2410中arch/arm/configs/s3c2410中沒有定義

  1. #ifdef CONFIG_PREEMPT_BKL //判斷是使用big kernel semaphore還是big kernel lock
  2. //而s3c2410中arch/arm/configs/s3c2410中沒有定義
  3. /*
  4. * The 'big kernel semaphore'
  5. *
  6. * This mutex is taken and released recursively by lock_kernel()
  7. * and unlock_kernel(). It is transparently dropped and reacquired
  8. * over schedule(). It is used to protect legacy code that hasn't
  9. * been migrated to a proper locking design yet.
  10. *
  11. * Note: code locked by this semaphore will only be serialized against
  12. * other code using the same locking facility. The code guarantees that
  13. * the task remains on the same CPU.
  14. *
  15. * Don't use in new code.
  16. */
  17. static DECLARE_MUTEX(kernel_sem);
  18. /*
  19. * Re-acquire the kernel semaphore.
  20. *
  21. * This function is called with preemption off.
  22. *
  23. * We are executing in schedule() so the code must be extremely careful
  24. * about recursion, both due to the down() and due to the enabling of
  25. * preemption. schedule() will re-check the preemption flag after
  26. * reacquiring the semaphore.
  27. */
  28. int __lockfunc __reacquire_kernel_lock(void)
  29. {
  30. struct task_struct *task = current;
  31. int saved_lock_depth = task->lock_depth;
  32. BUG_ON(saved_lock_depth < 0);
  33. task->lock_depth = -1;
  34. preempt_enable_no_resched();
  35. down(&kernel_sem);
  36. preempt_disable();
  37. task->lock_depth = saved_lock_depth;
  38. return 0;
  39. }
  40. void __lockfunc __release_kernel_lock(void)
  41. {
  42. up(&kernel_sem);
  43. }
  44. /*
  45. * Getting the big kernel semaphore.
  46. */
  47. void __lockfunc lock_kernel(void)
  48. {
  49. struct task_struct *task = current;
  50. int depth = task->lock_depth + 1;
  51. if (likely(!depth))
  52. /*
  53. * No recursion worries - we set up lock_depth _after_
  54. */
  55. down(&kernel_sem);
  56. task->lock_depth = depth;
  57. }
  58. void __lockfunc unlock_kernel(void)
  59. {
  60. struct task_struct *task = current;
  61. BUG_ON(task->lock_depth < 0);
  62. if (likely(--task->lock_depth < 0))
  63. up(&kernel_sem);
  64. }
  65. #else
  66. /*
  67. * The 'big kernel lock'
  68. *
  69. * This spinlock is taken and released recursively by lock_kernel()
  70. * and unlock_kernel(). It is transparently dropped and reacquired
  71. * over schedule(). It is used to protect legacy code that hasn't
  72. * been migrated to a proper locking design yet.
  73. *
  74. * Don't use in new code.
  75. */
  76. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag);
  77. /*
  78. * Acquire/release the underlying lock from the scheduler.
  79. *
  80. * This is called with preemption disabled, and should
  81. * return an error value if it cannot get the lock and
  82. * TIF_NEED_RESCHED gets set.
  83. *
  84. * If it successfully gets the lock, it should increment
  85. * the preemption count like any spinlock does.
  86. *
  87. * (This works on UP too - _raw_spin_trylock will never
  88. * return false in that case)
  89. */
  90. int __lockfunc __reacquire_kernel_lock(void)
  91. {
  92. while (!_raw_spin_trylock(&kernel_flag)) {
  93. if (test_thread_flag(TIF_NEED_RESCHED))
  94. return -EAGAIN;
  95. cpu_relax();
  96. }
  97. preempt_disable();
  98. return 0;
  99. }
  100. void __lockfunc __release_kernel_lock(void)
  101. {
  102. _raw_spin_unlock(&kernel_flag);
  103. preempt_enable_no_resched();
  104. }
  105. /*
  106. * These are the BKL spinlocks - we try to be polite about preemption.
  107. * If SMP is not on (ie UP preemption), this all goes away because the
  108. * _raw_spin_trylock() will always succeed.
  109. */
  110. #ifdef CONFIG_PREEMPT

//使用big kernel lock的情況下,判斷內核是否支持搶占式調度,支持則執行下面的代碼
//而我們使用的s3c2410是單處理器的,不存在多個CPU競爭資源的情況,所以不需要用大內核鎖/信號量來解決資源競爭的問題
//CONFIG_PREEMPT在arch/arm/configs/s3c2410_deconfig中Kernel Features下,在s3c2410中定義為# CONFIG_PREEMPT is not set

Copyright © Linux教程網 All Rights Reserved