Annotation of prex/sys/kern/thread.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2008, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * thread.c - thread management routines.
32: */
33:
34: #include <kernel.h>
35: #include <kmem.h>
36: #include <task.h>
37: #include <thread.h>
38: #include <ipc.h>
39: #include <sched.h>
40: #include <sync.h>
41: #include <system.h>
42:
43: /* forward */
44: static void do_terminate(thread_t);
45:
46: static struct thread idle_thread;
47: static thread_t zombie;
48:
49: /* global */
50: thread_t cur_thread = &idle_thread;
51:
52: /*
53: * Allocate a new thread and attach a kernel stack to it.
54: * Returns thread pointer on success, or NULL on failure.
55: */
56: static thread_t
57: thread_alloc(void)
58: {
59: thread_t th;
60: void *stack;
61:
62: if ((th = kmem_alloc(sizeof(struct thread))) == NULL)
63: return NULL;
64:
65: if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL) {
66: kmem_free(th);
67: return NULL;
68: }
69: memset(th, 0, sizeof(struct thread));
70: th->kstack = stack;
71: th->magic = THREAD_MAGIC;
72: list_init(&th->mutexes);
73: return th;
74: }
75:
76: static void
77: thread_free(thread_t th)
78: {
79:
80: kmem_free(th->kstack);
81: kmem_free(th);
82: }
83:
84: /*
85: * Create a new thread.
86: *
87: * The context of a current thread will be copied to the
88: * new thread. The new thread will start from the return
89: * address of thread_create() call in user mode code.
90: * Since a new thread will share the user mode stack with
91: * a current thread, user mode applications are
92: * responsible to allocate stack for it. The new thread is
93: * initially set to suspend state, and so, thread_resume()
94: * must be called to start it.
95: */
96: int
97: thread_create(task_t task, thread_t *thp)
98: {
99: thread_t th;
100: int err = 0;
101: vaddr_t sp;
102:
103: sched_lock();
104: if (!task_valid(task)) {
105: err = ESRCH;
106: goto out;
107: }
108: if (!task_access(task)) {
109: err = EPERM;
110: goto out;
111: }
112: if ((th = thread_alloc()) == NULL) {
113: err = ENOMEM;
114: goto out;
115: }
116: /*
117: * First, we copy a new thread id as return value.
118: * This is done here to simplify all error recoveries
119: * of the subsequent code.
120: */
121: if (cur_task() == &kern_task)
122: *thp = th;
123: else {
124: if (umem_copyout(&th, thp, sizeof(th))) {
125: thread_free(th);
126: err = EFAULT;
127: goto out;
128: }
129: }
130: /*
131: * Initialize thread state.
132: */
133: th->task = task;
134: th->suscnt = task->suscnt + 1;
135: memcpy(th->kstack, cur_thread->kstack, KSTACK_SIZE);
136: sp = (vaddr_t)th->kstack + KSTACK_SIZE;
137: context_set(&th->ctx, CTX_KSTACK, sp);
138: context_set(&th->ctx, CTX_KENTRY, (vaddr_t)&syscall_ret);
139: list_insert(&task->threads, &th->task_link);
140: sched_start(th);
141: out:
142: sched_unlock();
143: return err;
144: }
145:
146: /*
147: * Permanently stop execution of the specified thread.
148: * If given thread is a current thread, this routine
149: * never returns.
150: */
151: int
152: thread_terminate(thread_t th)
153: {
154:
155: sched_lock();
156: if (!thread_valid(th)) {
157: sched_unlock();
158: return ESRCH;
159: }
160: if (!task_access(th->task)) {
161: sched_unlock();
162: return EPERM;
163: }
164: do_terminate(th);
165: sched_unlock();
166: return 0;
167: }
168:
169: /*
170: * Terminate thread-- the internal version of thread_terminate.
171: */
172: static void
173: do_terminate(thread_t th)
174: {
175: /*
176: * Clean up thread state.
177: */
178: msg_cleanup(th);
179: timer_cleanup(th);
180: mutex_cleanup(th);
181: list_remove(&th->task_link);
182: sched_stop(th);
183: th->excbits = 0;
184: th->magic = 0;
185:
186: /*
187: * We can not release the context of the "current"
188: * thread because our thread switching always
189: * requires the current context. So, the resource
190: * deallocation is deferred until another thread
191: * calls thread_terminate().
192: */
193: if (zombie != NULL) {
194: /*
195: * Deallocate a zombie thread which was killed
196: * in previous request.
197: */
198: ASSERT(zombie != cur_thread);
199: thread_free(zombie);
200: zombie = NULL;
201: }
202: if (th == cur_thread) {
203: /*
204: * If the current thread is being terminated,
205: * enter zombie state and wait for somebody
206: * to be killed us.
207: */
208: zombie = th;
209: } else {
210: thread_free(th);
211: }
212: }
213:
214: /*
215: * Load entry/stack address of the user mode context.
216: *
217: * The entry and stack address can be set to NULL.
218: * If it is NULL, old state is just kept.
219: */
220: int
221: thread_load(thread_t th, void (*entry)(void), void *stack)
222: {
223:
224: if (entry != NULL && !user_area(entry))
225: return EINVAL;
226: if (stack != NULL && !user_area(stack))
227: return EINVAL;
228:
229: sched_lock();
230: if (!thread_valid(th)) {
231: sched_unlock();
232: return ESRCH;
233: }
234: if (!task_access(th->task)) {
235: sched_unlock();
236: return EPERM;
237: }
238: if (entry != NULL)
239: context_set(&th->ctx, CTX_UENTRY, (vaddr_t)entry);
240: if (stack != NULL)
241: context_set(&th->ctx, CTX_USTACK, (vaddr_t)stack);
242:
243: sched_unlock();
244: return 0;
245: }
246:
247: thread_t
248: thread_self(void)
249: {
250:
251: return cur_thread;
252: }
253:
254: /*
255: * Release current thread for other thread.
256: */
257: void
258: thread_yield(void)
259: {
260:
261: sched_yield();
262: }
263:
264: /*
265: * Suspend thread.
266: *
267: * A thread can be suspended any number of times.
268: * And, it does not start to run again unless the
269: * thread is resumed by the same count of suspend
270: * request.
271: */
272: int
273: thread_suspend(thread_t th)
274: {
275:
276: sched_lock();
277: if (!thread_valid(th)) {
278: sched_unlock();
279: return ESRCH;
280: }
281: if (!task_access(th->task)) {
282: sched_unlock();
283: return EPERM;
284: }
285: if (++th->suscnt == 1)
286: sched_suspend(th);
287:
288: sched_unlock();
289: return 0;
290: }
291:
292: /*
293: * Resume thread.
294: *
295: * A thread does not begin to run, unless both thread
296: * suspend count and task suspend count are set to 0.
297: */
298: int
299: thread_resume(thread_t th)
300: {
301: int err = 0;
302:
303: ASSERT(th != cur_thread);
304:
305: sched_lock();
306: if (!thread_valid(th)) {
307: err = ESRCH;
308: goto out;
309: }
310: if (!task_access(th->task)) {
311: err = EPERM;
312: goto out;
313: }
314: if (th->suscnt == 0) {
315: err = EINVAL;
316: goto out;
317: }
318:
319: th->suscnt--;
320: if (th->suscnt == 0) {
321: if (th->task->suscnt == 0) {
322: sched_resume(th);
323: }
324: }
325: out:
326: sched_unlock();
327: return err;
328: }
329:
330: /*
331: * thread_schedparam - get/set scheduling parameter.
332: *
333: * If the caller has CAP_NICE capability, all operations are
334: * allowed. Otherwise, the caller can change the parameter
335: * for the threads in the same task, and it can not set the
336: * priority to higher value.
337: */
338: int
339: thread_schedparam(thread_t th, int op, int *param)
340: {
341: int prio, policy, err = 0;
342:
343: sched_lock();
344: if (!thread_valid(th)) {
345: err = ESRCH;
346: goto out;
347: }
348: if (th->task == &kern_task) {
349: err = EPERM;
350: goto out;
351: }
352: if (th->task != cur_task() && !task_capable(CAP_NICE)) {
353: err = EPERM;
354: goto out;
355: }
356:
357: switch (op) {
358: case OP_GETPRIO:
359: prio = sched_getprio(th);
360: err = umem_copyout(&prio, param, sizeof(prio));
361: break;
362:
363: case OP_SETPRIO:
364: if ((err = umem_copyin(param, &prio, sizeof(prio))))
365: break;
366: if (prio < 0) {
367: prio = 0;
368: } else if (prio >= PRIO_IDLE) {
369: prio = PRIO_IDLE - 1;
370: } else {
371: /* DO NOTHING */
372: }
373:
374: if (prio < th->prio && !task_capable(CAP_NICE)) {
375: err = EPERM;
376: break;
377: }
378: /*
379: * If a current priority is inherited for mutex,
380: * we can not change the priority to lower value.
381: * In this case, only the base priority is changed,
382: * and a current priority will be adjusted to
383: * correct value, later.
384: */
385: if (th->prio != th->baseprio && prio > th->prio)
386: prio = th->prio;
387:
388: mutex_setprio(th, prio);
389: sched_setprio(th, prio, prio);
390: break;
391:
392: case OP_GETPOLICY:
393: policy = sched_getpolicy(th);
394: err = umem_copyout(&policy, param, sizeof(policy));
395: break;
396:
397: case OP_SETPOLICY:
398: if ((err = umem_copyin(param, &policy, sizeof(policy))))
399: break;
400: if (sched_setpolicy(th, policy))
401: err = EINVAL;
402: break;
403:
404: default:
405: err = EINVAL;
406: break;
407: }
408: out:
409: sched_unlock();
410: return err;
411: }
412:
413: /*
414: * Idle thread.
415: *
416: * This routine is called only once after kernel
417: * initialization is completed. An idle thread has the
418: * role of cutting down the power consumption of a
419: * system. An idle thread has FIFO scheduling policy
420: * because it does not have time quantum.
421: */
422: void
423: thread_idle(void)
424: {
425:
426: for (;;) {
427: machine_idle();
428: sched_yield();
429: }
430: /* NOTREACHED */
431: }
432:
433: /*
434: * Create a thread running in the kernel address space.
435: *
436: * A kernel thread does not have user mode context, and its
437: * scheduling policy is set to SCHED_FIFO. kthread_create()
438: * returns thread ID on success, or NULL on failure.
439: *
440: * Important: Since sched_switch() will disable interrupts in
441: * CPU, the interrupt is always disabled at the entry point of
442: * the kernel thread. So, the kernel thread must enable the
443: * interrupt first when it gets control.
444: *
445: * This routine assumes the scheduler is already locked.
446: */
447: thread_t
448: kthread_create(void (*entry)(void *), void *arg, int prio)
449: {
450: thread_t th;
451: vaddr_t sp;
452:
453: ASSERT(cur_thread->locks > 0);
454:
455: /*
456: * If there is not enough core for the new thread,
457: * just drop to panic().
458: */
459: if ((th = thread_alloc()) == NULL)
460: return NULL;
461:
462: th->task = &kern_task;
463: memset(th->kstack, 0, KSTACK_SIZE);
464: sp = (vaddr_t)th->kstack + KSTACK_SIZE;
465: context_set(&th->ctx, CTX_KSTACK, sp);
466: context_set(&th->ctx, CTX_KENTRY, (vaddr_t)entry);
467: context_set(&th->ctx, CTX_KARG, (vaddr_t)arg);
468: list_insert(&kern_task.threads, &th->task_link);
469:
470: /*
471: * Start scheduling of this thread.
472: */
473: sched_start(th);
474: sched_setpolicy(th, SCHED_FIFO);
475: sched_setprio(th, prio, prio);
476: sched_resume(th);
477: return th;
478: }
479:
480: /*
481: * Terminate kernel thread.
482: */
483: void
484: kthread_terminate(thread_t th)
485: {
486:
487: ASSERT(th);
488: ASSERT(th->task == &kern_task);
489:
490: sched_lock();
491: do_terminate(th);
492: sched_unlock();
493: }
494:
495: /*
496: * Return thread information for ps command.
497: */
498: int
499: thread_info(struct info_thread *info)
500: {
501: u_long index, target = info->cookie;
502: list_t i, j;
503: thread_t th;
504: task_t task;
505: int err = 0, found = 0;
506:
507: sched_lock();
508:
509: /*
510: * Search a target thread from the given index.
511: */
512: index = 0;
513: i = &kern_task.link;
514: do {
515: task = list_entry(i, struct task, link);
516: j = list_first(&task->threads);
517: do {
518: th = list_entry(j, struct thread, task_link);
519: if (index++ == target) {
520: found = 1;
521: goto done;
522: }
523: j = list_next(j);
524: } while (j != &task->threads);
525: i = list_next(i);
526: } while (i != &kern_task.link);
527: done:
528: if (found) {
529: info->policy = th->policy;
530: info->prio = th->prio;
531: info->time = th->time;
532: info->task = th->task;
533: strlcpy(info->taskname, task->name, MAXTASKNAME);
534: strlcpy(info->slpevt,
535: th->slpevt ? th->slpevt->name : "-", MAXEVTNAME);
536: } else {
537: err = ESRCH;
538: }
539: sched_unlock();
540: return err;
541: }
542:
543: #ifdef DEBUG
544: void
545: thread_dump(void)
546: {
547: static const char state[][4] = \
548: { "RUN", "SLP", "SUS", "S&S", "EXT" };
549: static const char pol[][5] = { "FIFO", "RR " };
550: list_t i, j;
551: thread_t th;
552: task_t task;
553:
554: printf("\nThread dump:\n");
555: printf(" mod thread task stat pol prio base time "
556: "susp sleep event\n");
557: printf(" --- -------- -------- ---- ---- ---- ---- -------- "
558: "---- ------------\n");
559:
560: i = &kern_task.link;
561: do {
562: task = list_entry(i, struct task, link);
563: j = list_first(&task->threads);
564: do {
565: th = list_entry(j, struct thread, task_link);
566:
567: printf(" %s %08x %8s %s%c %s %3d %3d %8d %4d %s\n",
568: (task == &kern_task) ? "Knl" : "Usr", th,
569: task->name, state[th->state],
570: (th == cur_thread) ? '*' : ' ',
571: pol[th->policy], th->prio, th->baseprio,
572: th->time, th->suscnt,
573: th->slpevt != NULL ? th->slpevt->name : "-");
574:
575: j = list_next(j);
576: } while (j != &task->threads);
577: i = list_next(i);
578: } while (i != &kern_task.link);
579: }
580: #endif
581:
582: /*
583: * The first thread in system is created here by hand.
584: * This thread will become an idle thread when thread_idle()
585: * is called later in main().
586: */
587: void
588: thread_init(void)
589: {
590: void *stack;
591: vaddr_t sp;
592:
593: if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL)
594: panic("thread_init: out of memory");
595:
596: memset(stack, 0, KSTACK_SIZE);
597: idle_thread.kstack = stack;
598: idle_thread.magic = THREAD_MAGIC;
599: idle_thread.task = &kern_task;
600: idle_thread.state = TH_RUN;
601: idle_thread.policy = SCHED_FIFO;
602: idle_thread.prio = PRIO_IDLE;
603: idle_thread.baseprio = PRIO_IDLE;
604: idle_thread.locks = 1;
605:
606: sp = (vaddr_t)stack + KSTACK_SIZE;
607: context_set(&idle_thread.ctx, CTX_KSTACK, sp);
608: list_insert(&kern_task.threads, &idle_thread.task_link);
609: }
CVSweb