Annotation of prex-old/sys/kern/sched.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2007, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * sched.c - scheduler
32: */
33:
34: /**
35: * General design:
36: *
37: * The Prex scheduler is based on the algorithm known as priority based
38: * multi level queue. Each thread is assigned the priority between
39: * 0 and 255. The lower number means higher priority like BSD unix.
40: * The scheduler maintains 256 level run queues mapped to each priority.
41: * The lowest priority (=255) is used only by an idle thread.
42: *
43: * All threads have two different types of priorities:
44: *
45: * - Base priority
46: * This is a static priority used for priority computation. A user
47: * mode program can change this value via system call.
48: *
49: * - Current priority
50: * An actual scheduling priority. A kernel may adjust this priority
51: * dynamically if it's needed.
52: *
53: * Each thread has one of the following state.
54: *
55: * - TH_RUN Running or ready to run
56: * - TH_SLEEP Sleep for some event
57: * - TH_SUSPEND Suspend count is not 0
58: * - TH_EXIT Terminated
59: *
60: * The thread is always preemptive even in the kernel mode.
61: * There are following 4 reasons to switch thread.
62: *
63: * 1) Block
64: * Thread is blocked for sleep or suspend.
65: * It is put on the tail of the run queue when it becomes
66: * runnable again.
67: *
68: * 2) Preemption
69: * If higher priority thread becomes runnable, the current
70: * thread is put on the the _head_ of the run queue.
71: *
72: * 3) Quantum expiration
73: * If the thread consumes its time quantum, it is put on
74: * the tail of the run queue.
75: *
76: * 4) Yield
77: * If the thread releases CPU by itself, it is put on
78: * the tail of the run queue.
79: *
80: * There are following three types of scheduling policies.
81: *
82: * - SCHED_FIFO First in-first-out
83: * - SCHED_RR Round robin (SCHED_FIFO + timeslice)
84: * - SCHED_OTHER Another scheduling (not supported)
85: */
86:
87: #include <kernel.h>
88: #include <queue.h>
89: #include <event.h>
90: #include <irq.h>
91: #include <thread.h>
92: #include <timer.h>
93: #include <vm.h>
94: #include <task.h>
95: #include <system.h>
96: #include <sched.h>
97:
98: static struct queue runq[NR_PRIOS]; /* run queues */
99: static struct queue wakeq; /* queue for waking threads */
100: static struct queue dpcq; /* queue for DPC threads */
101: static int top_prio; /* highest priority in runq */
102:
103: static struct event dpc_event; /* event for dpc */
104:
105: /*
106: * Search for highest-priority runnable thread.
107: */
108: static int
109: runq_top(void)
110: {
111: int prio;
112:
113: for (prio = 0; prio < MIN_PRIO; prio++)
114: if (!queue_empty(&runq[prio]))
115: break;
116: return prio;
117: }
118:
119: /*
120: * Put a thread on the tail of the run queue.
121: * The rescheduling flag is set if the priority is better than
122: * the currently running process.
123: */
124: static void
125: runq_enqueue(thread_t th)
126: {
127:
128: enqueue(&runq[th->prio], &th->link);
129: if (th->prio < top_prio) {
130: top_prio = th->prio;
131: cur_thread->need_resched = 1;
132: }
133: }
134:
135: /*
136: * Insert a thread to the head of the run queue.
137: * We don't change rescheduling flag here because this is called
138: * while thread switching.
139: */
140: static void
141: runq_insert(thread_t th)
142: {
143:
144: queue_insert(&runq[th->prio], &th->link);
145: if (th->prio < top_prio)
146: top_prio = th->prio;
147: }
148:
149: /*
150: * Pick up and remove the highest-priority thread from the run
151: * queue. At least, an idle thread will be returned because it
152: * always residents in the lowest priority queue.
153: */
154: static thread_t
155: runq_dequeue(void)
156: {
157: queue_t q;
158: thread_t th;
159:
160: q = dequeue(&runq[top_prio]);
161: th = queue_entry(q, struct thread, link);
162: if (queue_empty(&runq[top_prio]))
163: top_prio = runq_top();
164: return th;
165: }
166:
167: /*
168: * Remove the specified thread from the run queue.
169: */
170: static void
171: runq_remove(thread_t th)
172: {
173:
174: queue_remove(&th->link);
175: top_prio = runq_top();
176: }
177:
178: /*
179: * Process all pending woken threads.
180: * Rescheduling flag may be set.
181: * Note: The thread may be still in a suspend state after wakeup.
182: */
183: static void
184: wakeq_flush(void)
185: {
186: queue_t q;
187: thread_t th;
188:
189: while (!queue_empty(&wakeq)) {
190: /*
191: * Set a thread runnable.
192: */
193: q = dequeue(&wakeq);
194: th = queue_entry(q, struct thread, link);
195: th->sleep_event = 0;
196: th->state &= ~TH_SLEEP;
197:
198: if (th != cur_thread && th->state == TH_RUN)
199: runq_enqueue(th);
200: }
201: }
202:
203: /*
204: * sleep_expire - sleep timer is expired:
205: * @arg: thread to unsleep.
206: *
207: * Wake up the passed thread that is sleeping in sched_tsleep().
208: */
209: static void
210: sleep_expire(void *arg)
211: {
212:
213: sched_unsleep((thread_t)arg, SLP_TIMEOUT);
214: }
215:
216: /*
217: * sched_switch - This routine is called to reschedule the CPU.
218: *
219: * If the scheduling reason is preemption, the current thread
220: * will remain at the head of the run queue. So, the thread
221: * still has right to run first again among the same priority
222: * threads. For other scheduling reason, the current thread is
223: * inserted into the tail of the run queue.
224: */
225: static void
226: sched_switch(void)
227: {
228: thread_t prev, next;
229:
230: ASSERT(irq_level == 0);
231:
232: prev = cur_thread;
233: if (prev->state == TH_RUN) {
234: if (prev->prio > top_prio)
235: runq_insert(prev); /* preemption */
236: else
237: runq_enqueue(prev);
238: }
239: prev->need_resched = 0;
240:
241: /*
242: * This is the scheduler proper.
243: */
244: next = runq_dequeue();
245: if (next == prev)
246: return;
247: cur_thread = next;
248:
249: if (prev->task != next->task)
250: vm_switch(next->task->map);
251:
252: context_switch(&prev->context, &next->context);
253: }
254:
255: /*
256: * sched_tsleep - sleep the current thread until a wakeup is
257: * performed on the specified event.
258: * @timeout: time out value in msec. (0 means no timeout)
259: *
260: * This routine returns a sleep result. If the thread is woken
261: * by sched_wakeup()/sched_wakeone(), it returns 0. Otherwise,
262: * it will return the result value which is passed by sched_unsleep().
263: * We allow calling sched_sleep() with interrupt disabled.
264: *
265: * sched_sleep() is also defined as a wrapper macro for sched_tsleep()
266: * without timeout.
267: * Note that all sleep requests are interruptible with this kernel.
268: */
269: int
270: sched_tsleep(struct event *evt, u_long timeout)
271: {
272: int s;
273:
274: ASSERT(irq_level == 0);
275: ASSERT(evt);
276:
277: sched_lock();
278: interrupt_save(&s);
279: interrupt_disable();
280:
281: cur_thread->sleep_event = evt;
282: cur_thread->state |= TH_SLEEP;
283: enqueue(&evt->sleepq, &cur_thread->link);
284:
285: if (timeout != 0) {
286: timer_callout(&cur_thread->timeout, sleep_expire,
287: cur_thread, timeout);
288: }
289: wakeq_flush();
290: sched_switch(); /* Sleep here. Zzzz.. */
291:
292: interrupt_restore(s);
293: sched_unlock();
294: return cur_thread->sleep_result;
295: }
296:
297: /*
298: * sched_wakeup - wake up all threads sleeping on event.
299: *
300: * A thread can have sleep and suspend state simultaneously.
301: * So, the thread does not always run even if it woke up.
302: *
303: * Since this routine can be called from ISR at interrupt level, it
304: * should not touch any data of runq. Otherwise, we must frequently
305: * disable interrupts while accessing runq. Thus, this routine will
306: * temporary move the waking thread into wakeq, and the thread is
307: * moved to runq at more safer time in wakeq_flush().
308: *
309: * The woken thread will be put on the tail of runq regardless
310: * of its policy. If woken threads have same priority, next running
311: * thread is selected by FIFO order.
312: */
313: void
314: sched_wakeup(struct event *evt)
315: {
316: queue_t q;
317: thread_t th;
318:
319: irq_lock();
320: while (!queue_empty(&evt->sleepq)) {
321: /*
322: * Move a sleeping thread to the wake queue.
323: */
324: q = dequeue(&evt->sleepq);
325: th = queue_entry(q, struct thread, link);
326: th->sleep_result = 0;
327: enqueue(&wakeq, q);
328: timer_stop(&th->timeout);
329: }
330: irq_unlock();
331: }
332:
333: /*
334: * sched_wakeone - wake up one thread sleeping for the event.
335: *
336: * The highest priority thread is woken among sleeping threads.
337: * sched_wakeone() returns the thread ID of the woken thread, or
338: * NULL if no threads are sleeping.
339: */
340: thread_t
341: sched_wakeone(struct event *evt)
342: {
343: queue_t head, q;
344: thread_t top, th = NULL;
345:
346: irq_lock();
347: head = &evt->sleepq;
348: if (!queue_empty(head)) {
349: /*
350: * Select the highet priority thread in
351: * the sleeping threads, and move it to
352: * the wake queue.
353: */
354: q = queue_first(head);
355: top = queue_entry(q, struct thread, link);
356: while (!queue_end(head, q)) {
357: th = queue_entry(q, struct thread, link);
358: if (th->prio < top->prio)
359: top = th;
360: q = queue_next(q);
361: }
362: queue_remove(&top->link);
363: enqueue(&wakeq, &top->link);
364: timer_stop(&top->timeout);
365: }
366: irq_unlock();
367: return th;
368: }
369:
370: /*
371: * sched_unsleep - cancel sleep.
372: *
373: * sched_unsleep() removes the specified thread from its sleep
374: * queue. The specified sleep result will be passed to the sleeping
375: * thread as a return value of sched_tsleep().
376: */
377: void
378: sched_unsleep(thread_t th, int result)
379: {
380: sched_lock();
381: if (th->state & TH_SLEEP) {
382: irq_lock();
383: queue_remove(&th->link);
384: th->sleep_result = result;
385: enqueue(&wakeq, &th->link);
386: timer_stop(&th->timeout);
387: irq_unlock();
388:
389: }
390: sched_unlock();
391: }
392:
393: /*
394: * Yield the current processor to another thread.
395: *
396: * If a thread switching occurs, the current thread will be moved
397: * on the tail of the run queue regardless of its policy.
398: * Note that the current thread may run immediately again, if no
399: * other thread exists in the same priority queue.
400: */
401: void
402: sched_yield(void)
403: {
404: ASSERT(irq_level == 0);
405:
406: sched_lock();
407:
408: if (!queue_empty(&runq[cur_thread->prio]))
409: cur_thread->need_resched = 1;
410:
411: sched_unlock(); /* Switch current thread here */
412: }
413:
414: /*
415: * Suspend the specified thread.
416: * The scheduler must be locked before calling this routine.
417: * Note that the suspend count is handled in thread_suspend().
418: */
419: void
420: sched_suspend(thread_t th)
421: {
422: ASSERT(cur_thread->lock_count > 0);
423:
424: if (th->state == TH_RUN) {
425: if (th == cur_thread)
426: cur_thread->need_resched = 1;
427: else
428: runq_remove(th);
429: }
430: th->state |= TH_SUSPEND;
431: }
432:
433: /*
434: * Resume the specified thread.
435: * The scheduler must be locked before calling this routine.
436: */
437: void
438: sched_resume(thread_t th)
439: {
440: ASSERT(cur_thread->lock_count > 0);
441:
442: if (th->state & TH_SUSPEND) {
443: th->state &= ~TH_SUSPEND;
444: if (th->state == TH_RUN)
445: runq_enqueue(th);
446: }
447: }
448:
449: /*
450: * sched_tick() is called from timer_tick() once every tick.
451: * Check quantum expiration, and mark a rescheduling flag.
452: * We don't need locking in here.
453: */
454: void
455: sched_tick(void)
456: {
457:
458: cur_thread->total_ticks++;
459:
460: if (cur_thread->policy == SCHED_RR) {
461: if (--cur_thread->ticks_left <= 0) {
462: cur_thread->ticks_left = QUANTUM;
463: cur_thread->need_resched = 1;
464: }
465: }
466: }
467:
468: /*
469: * Set up stuff for thread scheduling.
470: */
471: void
472: sched_start(thread_t th)
473: {
474:
475: th->state = TH_RUN | TH_SUSPEND;
476: th->policy = SCHED_RR;
477: th->prio = PRIO_USER;
478: th->base_prio = PRIO_USER;
479: th->ticks_left = QUANTUM;
480: }
481:
482: /*
483: * Stop thread scheduling.
484: */
485: void
486: sched_stop(thread_t th)
487: {
488: ASSERT(irq_level == 0);
489: ASSERT(cur_thread->lock_count > 0);
490:
491: if (th == cur_thread) {
492: /*
493: * If specified thread is current thread, the
494: * scheduling lock count is force set to 1 to
495: * ensure the thread switching in the next
496: * sched_unlock().
497: */
498: cur_thread->lock_count = 1;
499: cur_thread->need_resched = 1;
500: } else {
501: if (th->state == TH_RUN)
502: runq_remove(th);
503: else if (th->state & TH_SLEEP)
504: queue_remove(&th->link);
505: }
506: timer_stop(&th->timeout);
507: th->state = TH_EXIT;
508: }
509:
510: /*
511: * sched_lock - lock the scheduler.
512: *
513: * The thread switch is disabled during scheduler locked. This
514: * is mainly used to synchronize the thread execution to protect
515: * global resources. Even when scheduler is locked, any interrupt
516: * handler can run. So, we have to use irq_lock() to synchronize
517: * a global data with ISR.
518: *
519: * Since the scheduling lock can be nested any number of times,
520: * the caller has the responsible to unlock the same number of
521: * locks.
522: */
523: void
524: sched_lock(void)
525: {
526:
527: cur_thread->lock_count++;
528: }
529:
530: /*
531: * sched_unlock - unlock scheduler.
532: *
533: * If nobody locks the scheduler anymore, it runs pending wake
534: * threads and check the reschedule flag. The thread switch is
535: * invoked if the rescheduling request exists.
536: *
537: * Note that this routine will be called at the end of the
538: * interrupt handler.
539: */
540: void
541: sched_unlock(void)
542: {
543: int s;
544:
545: ASSERT(cur_thread->lock_count > 0);
546:
547: interrupt_save(&s);
548: interrupt_disable();
549:
550: if (cur_thread->lock_count == 1) {
551: wakeq_flush();
552: while (cur_thread->need_resched) {
553:
554: /* Kick scheduler */
555: sched_switch();
556:
557: /*
558: * Now we run pending interrupts which fired
559: * during the thread switch. So, we can catch
560: * the rescheduling request from such ISRs.
561: * Otherwise, the reschedule may be deferred
562: * until _next_ sched_unlock() call.
563: */
564: interrupt_restore(s);
565: interrupt_disable();
566: wakeq_flush();
567: }
568: }
569: cur_thread->lock_count--;
570:
571: interrupt_restore(s);
572: }
573:
574: int
575: sched_getprio(thread_t th)
576: {
577:
578: return th->prio;
579: }
580:
581: /*
582: * sched_setprio - set priority of thread.
583: * @base: Base priority
584: * @prio: Current priority
585: *
586: * Thread switch may be invoked here by priority change.
587: * Called with scheduler locked.
588: */
589: void
590: sched_setprio(thread_t th, int base, int prio)
591: {
592:
593: th->base_prio = base;
594:
595: if (th == cur_thread) {
596: /*
597: * If we change the current thread's priority,
598: * rescheduling may be happened.
599: */
600: th->prio = prio;
601: top_prio = runq_top();
602: if (prio != top_prio)
603: cur_thread->need_resched = 1;
604: } else {
605: if (th->state == TH_RUN) {
606: /*
607: * Update the thread priority and adjust the
608: * run queue position for new priority.
609: */
610: runq_remove(th);
611: th->prio = prio;
612: runq_enqueue(th);
613: } else
614: th->prio = prio;
615: }
616: }
617:
618: int
619: sched_getpolicy(thread_t th)
620: {
621:
622: return th->policy;
623: }
624:
625: int
626: sched_setpolicy(thread_t th, int policy)
627: {
628: int err = 0;
629:
630: switch (policy) {
631: case SCHED_RR:
632: case SCHED_FIFO:
633: th->ticks_left = QUANTUM;
634: th->policy = policy;
635: break;
636: default:
637: err = -1;
638: break;
639: }
640: return err;
641: }
642:
643: /*
644: * DPC thread
645: *
646: * This is a kernel thread to process the pending call back request
647: * within DPC queue. Each DPC routine is called with the following
648: * conditions.
649: * - Interrupt is enabled.
650: * - Scheduler is unlocked.
651: */
652: static void
653: dpc_thread(u_long unused)
654: {
655: queue_t q;
656: struct dpc *dpc;
657:
658: for (;;) {
659:
660: /* Wait until next DPC request. */
661: sched_sleep(&dpc_event);
662:
663: while (!queue_empty(&dpcq)) {
664: q = dequeue(&dpcq);
665: dpc = queue_entry(q, struct dpc, link);
666: dpc->state = DPC_FREE;
667:
668: interrupt_enable();
669: (*dpc->func)(dpc->arg);
670: interrupt_disable();
671: }
672: }
673: /* NOTREACHED */
674: }
675:
676: /*
677: * Qeueue DPC (Deferred Procedure Call) request
678: *
679: * Call function at some later time in a DPC priority. This is
680: * typically used by device drivers to do the low-priority jobs.
681: * This routine can be called from ISR.
682: */
683: void
684: sched_dpc(struct dpc *dpc, void (*func)(void *), void *arg)
685: {
686: ASSERT(dpc);
687: ASSERT(func);
688:
689: irq_lock();
690: dpc->func = func;
691: dpc->arg = arg;
692: if (dpc->state != DPC_PENDING)
693: enqueue(&dpcq, &dpc->link);
694: dpc->state = DPC_PENDING;
695: sched_wakeup(&dpc_event);
696: irq_unlock();
697: }
698:
699: /*
700: * Initialize the global scheduler state.
701: */
702: void
703: sched_init(void)
704: {
705: int i;
706:
707: for (i = 0; i < NR_PRIOS; i++)
708: queue_init(&runq[i]);
709:
710: queue_init(&wakeq);
711: queue_init(&dpcq);
712: event_init(&dpc_event, "dpc");
713: top_prio = PRIO_IDLE;
714: cur_thread->need_resched = 1;
715:
716: /*
717: * Create a DPC thread.
718: */
719: if (kernel_thread(PRIO_DPC, dpc_thread, 0) == NULL)
720: panic("sched_init");
721:
722: printk("Time slice is %d msec\n", CONFIG_TIME_SLICE);
723: }
CVSweb