红联Linux门户
Linux帮助

Linux源码分析:completion的解读

发布时间:2014-11-23 21:58:24来源:linux网站作者:sg131971

/**
* wait_for_completion: - waits for completion of a task
* @x:  holds the state of this particular completion
*
* This waits to be signaled for completion of a specific task. It is NOT
* interruptible and there is no timeout.
*
* See also similar routines (i.e. wait_for_completion_timeout()) with timeout
* and interrupt capability. Also see complete().
*/ 
void __sched wait_for_completion(struct completion *x) 

wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); 

static long __sched 
wait_for_common(struct completion *x, long timeout, int state) 

might_sleep(); 
 
spin_lock_irq(&x->wait.lock); 
timeout = do_wait_for_common(x, timeout, state); 
spin_unlock_irq(&x->wait.lock); 
return timeout; 
}


注意:spin_lock_irq(&x->wait.lock)和spin_unlock_irq(&x->wait.lock)并非真正对应的一对自旋锁,因为在自旋锁保护中是不允许休眠和调度的。与他们相对应的解锁和上锁操作在do_wait_for_common(x, timeout, state)函数内部。


static inline long __sched 
do_wait_for_common(struct completion *x, long timeout, int state) 

if (!x->done) { 
DECLARE_WAITQUEUE(wait, current); 
 
wait.flags |= WQ_FLAG_EXCLUSIVE; 
__add_wait_queue_tail(&x->wait, &wait); 
do { 
if (signal_pending_state(state, current)) { 
timeout = -ERESTARTSYS; 
break; 

__set_current_state(state); 
spin_unlock_irq(&x->wait.lock); 
timeout = schedule_timeout(timeout); 
spin_lock_irq(&x->wait.lock); 
} while (!x->done && timeout); 
__remove_wait_queue(&x->wait, &wait); 
if (!x->done) 
return timeout; 

x->done--; 
return timeout ?: 1; 
}


函数完成的操作:声明等待队列,在队列末尾添加信号量,设置当前任务为TASK_UNINTERRUPTIBLE,释放自旋锁,进行任务调度,(任务被激活)获取自旋锁,移除等待队列。最后返回上一层函数,在上一层函数中释放自旋锁。


/**
* schedule_timeout - sleep until timeout
* @timeout: timeout value in jiffies
*
* Make the current task sleep until @timeout jiffies have
* elapsed. The routine will return immediately unless
* the current task state has been set (see set_current_state()).
*
* You can set the task state as follows -
*
* %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
* pass before the routine returns. The routine will return 0
*
* %TASK_INTERRUPTIBLE - the routine may return early if a signal is
* delivered to the current task. In this case the remaining time
* in jiffies will be returned, or 0 if the timer expired in time
*
* The current task state is guaranteed to be TASK_RUNNING when this
* routine returns.
*
* Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
* the CPU away without a bound on the timeout. In this case the return
* value will be %MAX_SCHEDULE_TIMEOUT.
*
* In all cases the return value is guaranteed to be non-negative.
*/ 
signed long __sched schedule_timeout(signed long timeout) 

struct timer_list timer; 
unsigned long expire; 
 
switch (timeout) 

case MAX_SCHEDULE_TIMEOUT: 
/*
* These two special cases are useful to be comfortable
* in the caller. Nothing more. We could take
* MAX_SCHEDULE_TIMEOUT from one of the negative value
* but I' d like to return a valid offset (>=0) to allow
* the caller to do everything it want with the retval.
*/ 
schedule(); 
goto out; 
default: 
/*
* Another bit of PARANOID. Note that the retval will be
* 0 since no piece of kernel is supposed to do a check
* for a negative retval of schedule_timeout() (since it
* should never happens anyway). You just have the printk()
* that will tell you if something is gone wrong and where.
*/ 
if (timeout < 0) { 
printk(KERN_ERR "schedule_timeout: wrong timeout " 
"value %lx\n", timeout); 
dump_stack(); 
current->state = TASK_RUNNING; 
goto out; 


 
expire = timeout + jiffies; 
 
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 
__mod_timer(&timer, expire, false, TIMER_NOT_PINNED); 
schedule(); 
del_singleshot_timer_sync(&timer); 
 
/* Remove the timer from the object tracker */ 
destroy_timer_on_stack(&timer); 
 
timeout = expire - jiffies; 
 
out: 
return timeout < 0 ? 0 : timeout; 
}


函数直接进去MAX_SCHEDULE_TIMEOUT的情况,调用schedule()。


/*
* schedule() is the main scheduler function.
*/ 
asmlinkage void __sched schedule(void) 

struct task_struct *prev, *next; 
unsigned long *switch_count; 
struct rq *rq; 
int cpu; 
 
need_resched: 
preempt_disable(); 
cpu = smp_processor_id(); 
rq = cpu_rq(cpu); 
rcu_sched_qs(cpu); 
prev = rq->curr; 
switch_count = &prev->nivcsw; 
 
release_kernel_lock(prev); 
need_resched_nonpreemptible: 
 
schedule_debug(prev); 
 
if (sched_feat(HRTICK)) 
hrtick_clear(rq); 
 
spin_lock_irq(&rq->lock); 
update_rq_clock(rq); 
clear_tsk_need_resched(prev); 
 
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 
if (unlikely(signal_pending_state(prev->state, prev))) 
prev->state = TASK_RUNNING; 
else 
deactivate_task(rq, prev, 1); 
switch_count = &prev->nvcsw; 

 
pre_schedule(rq, prev); 
 
if (unlikely(!rq->nr_running)) 
idle_balance(cpu, rq); 
 
put_prev_task(rq, prev); 
next = pick_next_task(rq); 
 
if (likely(prev != next)) { 
sched_info_switch(prev, next); 
perf_event_task_sched_out(prev, next, cpu); 
 
rq->nr_switches++; 
rq->curr = next; 
++*switch_count; 
 
context_switch(rq, prev, next); /* unlocks the rq */ 
/*
* the context switch might have flipped the stack from under
* us, hence refresh the local variables.
*/ 
cpu = smp_processor_id(); 
rq = cpu_rq(cpu); 
} else 
spin_unlock_irq(&rq->lock); 
 
post_schedule(rq); 
 
if (unlikely(reacquire_kernel_lock(current) < 0)) 
goto need_resched_nonpreemptible; 
 
preempt_enable_no_resched(); 
if (need_resched()) 
goto need_resched; 
}