歡迎來到Linux教程網
Linux教程網
Linux教程網
Linux教程網
Linux教程網 >> Unix知識 >> Unix基礎知識 >> kernel學習:copy_process

kernel學習:copy_process

日期:2017/3/3 14:55:08   编辑:Unix基礎知識

在do_fork中調用了copy_process,該函數及其重要。該函數創建進程描述符和子進程需要的其他數據結構。它定義在linux2.6.xxx/kernel/fork.c。

只對關鍵部分進行了注釋如下:

/* 
 * This creates a new process as a copy of the old one, 
 * but does not actually start it yet. 
 * 
 * It copies the registers, and all the appropriate 
 * parts of the process environment (as per the clone 
 * flags). The actual kick-off is left to the caller. 
 */
       
       
 /*這部分代碼是在2.6.38中實現的*/
static struct task_struct *copy_process(unsigned long clone_flags,  
                    unsigned long stack_start,  
                    struct pt_regs *regs,  
                    unsigned long stack_size,  
                    int __user *child_tidptr,  
                    struct pid *pid,  
                    int trace)  
{  
    int retval;  
    struct task_struct *p;//保存新的進程描述符地址  
    int cgroup_callbacks_done = 0;  
    /*CLONE_NEWNS和CLONE_FS是沖突的不能同時設置,否則出錯*/
    if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))  
        return ERR_PTR(-EINVAL);  
      
    /* 
     * Thread groups must share signals as well, and detached threads 
     * can only be started up within the thread group. 
     */
     /*CLONE_THREAD和CLONE_SIGHAND是沖突的不能同時設置,否則出錯*/
    if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))  
        return ERR_PTR(-EINVAL);  
      
    /* 
     * Shared signal handlers imply shared VM. By way of the above, 
     * thread groups also imply shared VM. Blocking this case allows 
     * for various simplifications in other code. 
     */
     /*CLONE_SIGHAND和CLONE_VM沖突不能同時設置。否則出錯*/
    if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))  
        return ERR_PTR(-EINVAL);  
      
    /* 
     * Siblings of global init remain as zombies on exit since they are 
     * not reaped by their parent (swapper). To solve this and to avoid 
	 *查看本欄目更多精彩內容:http://www.bianceng.cn/OS/unix/
     * multi-rooted process trees, prevent global and container-inits 
     * from creating siblings. 
     */
    if ((clone_flags & CLONE_PARENT) &&  
                current->signal->flags & SIGNAL_UNKILLABLE)  
        return ERR_PTR(-EINVAL);  
    /*調用系統安全框架創建進程,在配置內核時沒有選擇CONFIG_SECURITY,則系統安全框架函數為空*/
    retval = security_task_create(clone_flags);  
    if (retval)  
        goto fork_out;  
      
    retval = -ENOMEM;  
    /*來復制一份當前進程的進程描述符,為子進程描述符做准備 
    *該函數為子進程創建一個新的內核棧,並分配一個新的進程描述符和thread_info結構,然後 
    *把父進程的進程描述符和thread_info拷貝進去。這裡是完全拷貝,子進程和父進程的描述符完全 
    *相同。 
    */
    p = dup_task_struct(current);  
    if (!p)  
        goto fork_out;  
      
    ftrace_graph_init_task(p);  
      
    rt_mutex_init_task(p);  
      
#ifdef CONFIG_PROVE_LOCKING  
    DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);  
    DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);  
#endif  
      
    /*判斷是否超出了設置權限*/
    retval = -EAGAIN;  
    if (atomic_read(&p->real_cred->user->processes) >=  
            task_rlimit(p, RLIMIT_NPROC)) {  
        if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&  
            p->real_cred->user != INIT_USER)  
            goto bad_fork_free;  
    }  
      
    retval = copy_creds(p, clone_flags);  
    if (retval < 0)  
        goto bad_fork_free;  
      
    /* 
     * If multiple threads are within copy_process(), then this check 
     * triggers too late. This doesn't hurt, the check is only there 
     * to stop root fork bombs. 
     */
     /*判斷線程數量是否超出系統允許范圍,否則釋放已經申請到的資源*/
    retval = -EAGAIN;  
    if (nr_threads >= max_threads)//max_threads在kernel_fork中的fork_init中有定義。  
                    //系統最大進程數和系統的內存有關  
        goto bad_fork_cleanup_count;  
      
      
    /*下面的代碼主要是對子進程的描述符初始化和復制父進程的資源給子進程*/
          
          
    /*模塊引用計數操作*/
    if (!try_module_get(task_thread_info(p)->exec_domain->module))  
        goto bad_fork_cleanup_count;  
    /*execve系統調用數初始化為0*/
    p->did_exec = 0;  
    delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
    /*設置狀態標記,因為目前狀態表示是從父進程拷貝過來的*/
    copy_flags(clone_flags, p);  
    INIT_LIST_HEAD(&p->children);  
    INIT_LIST_HEAD(&p->sibling);  
    rcu_copy_process(p);  
    p->vfork_done = NULL;  
    spin_lock_init(&p->alloc_lock);  
      
    init_sigpending(&p->pending);  
      
    p->utime = cputime_zero;  
    p->stime = cputime_zero;  
    p->gtime = cputime_zero;  
    p->utimescaled = cputime_zero;  
    p->stimescaled = cputime_zero;  
#ifndef CONFIG_VIRT_CPU_ACCOUNTING  
    p->prev_utime = cputime_zero;  
    p->prev_stime = cputime_zero;  
#endif  
#if defined(SPLIT_RSS_COUNTING)  
    memset(&p->rss_stat, 0, sizeof(p->rss_stat));  
#endif  
      
    p->default_timer_slack_ns = current->timer_slack_ns;  
      
    task_io_accounting_init(&p->ioac);  
    acct_clear_integrals(p);  
      
    posix_cpu_timers_init(p);  
      
    p->lock_depth = -1;      /* -1 = no lock */
    do_posix_clock_monotonic_gettime(&p->start_time);  
    p->real_start_time = p->start_time;  
    monotonic_to_bootbased(&p->real_start_time);  
    p->io_context = NULL;  
    p->audit_context = NULL;  
    cgroup_fork(p);  
#ifdef CONFIG_NUMA  
    p->mempolicy = mpol_dup(p->mempolicy);  
    if (IS_ERR(p->mempolicy)) {  
        retval = PTR_ERR(p->mempolicy);  
        p->mempolicy = NULL;  
        goto bad_fork_cleanup_cgroup;  
    }  
    mpol_fix_fork_child_flag(p);  
#endif  
#ifdef CONFIG_TRACE_IRQFLAGS  
    p->irq_events = 0;  
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW  
    p->hardirqs_enabled = 1;  
#else  
    p->hardirqs_enabled = 0;  
#endif  
    p->hardirq_enable_ip = 0;  
    p->hardirq_enable_event = 0;  
    p->hardirq_disable_ip = _THIS_IP_;  
    p->hardirq_disable_event = 0;  
    p->softirqs_enabled = 1;  
    p->softirq_enable_ip = _THIS_IP_;  
    p->softirq_enable_event = 0;  
    p->softirq_disable_ip = 0;  
    p->softirq_disable_event = 0;  
    p->hardirq_context = 0;  
    p->softirq_context = 0;  
#endif  
#ifdef CONFIG_LOCKDEP  
    p->lockdep_depth = 0; /* no locks held yet */
    p->curr_chain_key = 0;  
    p->lockdep_recursion = 0;  
#endif  
      
#ifdef CONFIG_DEBUG_MUTEXES  
    p->blocked_on = NULL; /* not blocked yet */
#endif  
#ifdef CONFIG_CGROUP_MEM_RES_CTLR  
    p->memcg_batch.do_batch = 0;  
    p->memcg_batch.memcg = NULL;  
#endif  
      
    /* Perform scheduler related setup. Assign this task to a CPU. */
    sched_fork(p, clone_flags);  
      
    retval = perf_event_init_task(p);  
    if (retval)  
        goto bad_fork_cleanup_policy;  
      
    if ((retval = audit_alloc(p)))  
        goto bad_fork_cleanup_policy;  
    /* copy all the process information */
    if ((retval = copy_semundo(clone_flags, p)))  
        goto bad_fork_cleanup_audit;  
    if ((retval = copy_files(clone_flags, p)))  
        goto bad_fork_cleanup_semundo;  
    if ((retval = copy_fs(clone_flags, p)))  
        goto bad_fork_cleanup_files;  
    if ((retval = copy_sighand(clone_flags, p)))  
        goto bad_fork_cleanup_fs;  
    if ((retval = copy_signal(clone_flags, p)))  
        goto bad_fork_cleanup_sighand;  
    if ((retval = copy_mm(clone_flags, p)))  
        goto bad_fork_cleanup_signal;  
    if ((retval = copy_namespaces(clone_flags, p)))  
        goto bad_fork_cleanup_mm;  
    if ((retval = copy_io(clone_flags, p)))  
        goto bad_fork_cleanup_namespaces;  
    retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);  
    if (retval)  
        goto bad_fork_cleanup_io;  
      
    if (pid != &init_struct_pid) {  
        retval = -ENOMEM;  
        pid = alloc_pid(p->nsproxy->pid_ns);  
        if (!pid)  
            goto bad_fork_cleanup_io;  
      
        if (clone_flags & CLONE_NEWPID) {  
            retval = pid_ns_prepare_proc(p->nsproxy->pid_ns);  
            if (retval < 0)  
                goto bad_fork_free_pid;  
        }  
    }  
      
    p->pid = pid_nr(pid);  
    p->tgid = p->pid;  
    if (clone_flags & CLONE_THREAD)  
        p->tgid = current->tgid;  
      
    if (current->nsproxy != p->nsproxy) {  
        retval = ns_cgroup_clone(p, pid);  
        if (retval)  
            goto bad_fork_free_pid;  
    }  
      
    p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;  
    /* 
     * Clear TID on mm_release()? 
     */
    p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr:   
NULL;  
#ifdef CONFIG_FUTEX  
    p->robust_list = NULL;  
#ifdef CONFIG_COMPAT  
    p->compat_robust_list = NULL;  
#endif  
    INIT_LIST_HEAD(&p->pi_state_list);  
    p->pi_state_cache = NULL;  
#endif  
    /* 
     * sigaltstack should be cleared when sharing the same VM 
     */
    if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)  
        p->sas_ss_sp = p->sas_ss_size = 0;  
      
    /* 
     * Syscall tracing and stepping should be turned off in the 
     * child regardless of CLONE_PTRACE. 
     */
    user_disable_single_step(p);  
    clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);  
#ifdef TIF_SYSCALL_EMU  
    clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);  
#endif  
    clear_all_latency_tracing(p);  
      
    /* ok, now we should be set up.. */
    p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);  
    p->pdeath_signal = 0;  
    p->exit_state = 0;  
      
    /* 
     * Ok, make it visible to the rest of the system. 
     * We dont wake it up yet. 
     */
    p->group_leader = p;  
    INIT_LIST_HEAD(&p->thread_group);  
      
    /* Now that the task is set up, run cgroup callbacks if 
     * necessary. We need to run them before the task is visible 
     * on the tasklist. */
    cgroup_fork_callbacks(p);  
    cgroup_callbacks_done = 1;  
      
    /* Need tasklist lock for parent etc handling! */
    write_lock_irq(&tasklist_lock);  
      
    /* CLONE_PARENT re-uses the old parent */
    if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {  
        p->real_parent = current->real_parent;  
        p->parent_exec_id = current->parent_exec_id;  
    } else {  
        p->real_parent = current;  
        p->parent_exec_id = current->self_exec_id;  
    }  
      
    spin_lock(¤t->sighand->siglock);  
      
    /* 
     * Process group and session signals need to be delivered to just the 
     * parent before the fork or both the parent and the child after the 
     * fork. Restart if a signal comes in before we add the new process to 
     * it's process group. 
     * A fatal signal pending means that current will exit, so the new 
     * thread can't slip out of an OOM kill (or normal SIGKILL). 
     */
    recalc_sigpending();  
    if (signal_pending(current)) {  
        spin_unlock(¤t->sighand->siglock);  
        write_unlock_irq(&tasklist_lock);  
        retval = -ERESTARTNOINTR;  
        goto bad_fork_free_pid;  
    }  
      
    if (clone_flags & CLONE_THREAD) {  
        current->signal->nr_threads++;  
        atomic_inc(¤t->signal->live);  
        atomic_inc(¤t->signal->sigcnt);  
        p->group_leader = current->group_leader;  
        list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);  
    }  
      
    if (likely(p->pid)) {  
        tracehook_finish_clone(p, clone_flags, trace);  
      
        if (thread_group_leader(p)) {  
            if (clone_flags & CLONE_NEWPID)  
                p->nsproxy->pid_ns->child_reaper = p;  
      
            p->signal->leader_pid = pid;  
            p->signal->tty = tty_kref_get(current->signal->tty);  
            attach_pid(p, PIDTYPE_PGID, task_pgrp(current));  
            attach_pid(p, PIDTYPE_SID, task_session(current));  
            list_add_tail(&p->sibling, &p->real_parent->children);  
            list_add_tail_rcu(&p->tasks, &init_task.tasks);  
            __get_cpu_var(process_counts)++;  
        }  
        attach_pid(p, PIDTYPE_PID, pid);  
        nr_threads++;  
    }  
      
    total_forks++;  
    spin_unlock(¤t->sighand->siglock);  
    write_unlock_irq(&tasklist_lock);  
    proc_fork_connector(p);  
    cgroup_post_fork(p);  
    perf_event_fork(p);  
    /*返回子進程描述符的指針*/
    return p;  
      
bad_fork_free_pid:  
    if (pid != &init_struct_pid)  
        free_pid(pid);  
bad_fork_cleanup_io:  
    if (p->io_context)  
        exit_io_context(p);  
bad_fork_cleanup_namespaces:  
    exit_task_namespaces(p);  
bad_fork_cleanup_mm:  
    if (p->mm) {  
        task_lock(p);  
        if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)  
            atomic_dec(&p->mm->oom_disable_count);  
        task_unlock(p);  
        mmput(p->mm);  
    }  
bad_fork_cleanup_signal:  
    if (!(clone_flags & CLONE_THREAD))  
        free_signal_struct(p->signal);  
bad_fork_cleanup_sighand:  
    __cleanup_sighand(p->sighand);  
bad_fork_cleanup_fs:  
    exit_fs(p); /* blocking */
bad_fork_cleanup_files:  
    exit_files(p); /* blocking */
bad_fork_cleanup_semundo:  
    exit_sem(p);  
bad_fork_cleanup_audit:  
    audit_free(p);  
bad_fork_cleanup_policy:  
    perf_event_free_task(p);  
#ifdef CONFIG_NUMA  
    mpol_put(p->mempolicy);  
bad_fork_cleanup_cgroup:  
#endif  
    cgroup_exit(p, cgroup_callbacks_done);  
    delayacct_tsk_free(p);  
    module_put(task_thread_info(p)->exec_domain->module);  
bad_fork_cleanup_count:  
    atomic_dec(&p->cred->user->processes);  
    exit_creds(p);  
bad_fork_free:  
    free_task(p);  
fork_out:  
    return ERR_PTR(retval);  
}
Copyright © Linux教程網 All Rights Reserved