Annotation of prex-old/sys/kern/thread.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2007, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * thread.c - thread management routines.
32: */
33:
34: #include <kernel.h>
35: #include <kmem.h>
36: #include <task.h>
37: #include <thread.h>
38: #include <ipc.h>
39: #include <sched.h>
40: #include <sync.h>
41: #include <system.h>
42:
43: struct thread idle_thread;
44: thread_t cur_thread = &idle_thread;
45: static thread_t zombie;
46:
47: /*
48: * Allocate a new thread and attach kernel stack for it.
49: * Returns thread pointer on success, or NULL on failure.
50: */
51: static thread_t
52: thread_alloc(void)
53: {
54: thread_t th;
55: void *stack;
56:
57: if ((th = kmem_alloc(sizeof(struct thread))) == NULL)
58: return NULL;
59: memset(th, 0, sizeof(struct thread));
60:
61: if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL) {
62: kmem_free(th);
63: return NULL;
64: }
65: th->kstack = stack;
66: th->magic = THREAD_MAGIC;
67: list_init(&th->mutexes);
68: return th;
69: }
70:
71: static void
72: thread_free(thread_t th)
73: {
74:
75: kmem_free(th->kstack);
76: kmem_free(th);
77: }
78:
79: /*
80: * Create a new thread within the specified task.
81: *
82: * The context of a current thread will be copied to the new thread.
83: * The new thread will start from the return address of thread_create()
84: * call in user mode code. Since a new thread will share the user
85: * mode stack with a current thread, user mode applications are
86: * responsible to allocate stack for it. The new thread is initially
87: * set to suspend state, and so, thread_resume() must be called to
88: * start it.
89: *
90: * The following scheduling parameters are reset to default values
91: * in the created thread.
92: * - Thread State
93: * - Scheduling Policy
94: * - Scheduling Priority
95: */
96: int
97: thread_create(task_t task, thread_t *thp)
98: {
99: thread_t th;
100: int err = 0;
101:
102: sched_lock();
103: if (!task_valid(task)) {
104: err = ESRCH;
105: goto out;
106: }
107: if (!task_access(task)) {
108: err = EPERM;
109: goto out;
110: }
111: if ((th = thread_alloc()) == NULL) {
112: err = ENOMEM;
113: goto out;
114: }
115: /*
116: * At first, we copy a new thread id as return value.
117: * This is done here to simplify all error recoveries
118: * of the subsequent code.
119: */
120: if (cur_task() == &kern_task)
121: *thp = th;
122: else {
123: if (umem_copyout(&th, thp, sizeof(thread_t))) {
124: thread_free(th);
125: err = EFAULT;
126: goto out;
127: }
128: }
129: /*
130: * Initialize thread state.
131: */
132: th->task = task;
133: th->suspend_count = task->suspend_count + 1;
134: memcpy(th->kstack, cur_thread->kstack, KSTACK_SIZE);
135: context_init(&th->context, (u_long)th->kstack + KSTACK_SIZE);
136: list_insert(&task->threads, &th->task_link);
137: sched_start(th);
138: out:
139: sched_unlock();
140: return err;
141: }
142:
143: /*
144: * Permanently stop execution of the specified thread.
145: * If given thread is a current thread, this routine never returns.
146: */
147: int
148: thread_terminate(thread_t th)
149: {
150: int err;
151:
152: sched_lock();
153: if (!thread_valid(th)) {
154: err = ESRCH;
155: } else if (!task_access(th->task)) {
156: err = EPERM;
157: } else {
158: err = thread_kill(th);
159: }
160: sched_unlock();
161: return err;
162: }
163:
164: /*
165: * Kill a thread regardless of the current task state.
166: *
167: * This may be used to terminate a kernel thread under the non-context
168: * condition. For example, a device driver may terminate its interrupt
169: * thread even if a current task does not have the capability to
170: * terminate it.
171: */
172: int
173: thread_kill(thread_t th)
174: {
175: /*
176: * Clean up thread state.
177: */
178: msg_cleanup(th);
179: timer_cleanup(th);
180: mutex_cleanup(th);
181: list_remove(&th->task_link);
182: sched_stop(th);
183: th->exc_bitmap = 0;
184: th->magic = 0;
185:
186: /*
187: * We can not release the context of the "current" thread
188: * because our thread switching always requires the current
189: * context. So, the resource deallocation is deferred until
190: * another thread calls thread_kill().
191: */
192: if (zombie != NULL) {
193: /*
194: * Deallocate a zombie thread which was killed
195: * in previous request.
196: */
197: ASSERT(zombie != cur_thread);
198: thread_free(zombie);
199: zombie = NULL;
200: }
201: if (th == cur_thread) {
202: /*
203: * If the current thread is being terminated,
204: * enter zombie state and wait for sombody
205: * to be killed us.
206: */
207: zombie = th;
208: } else
209: thread_free(th);
210: return 0;
211: }
212:
213: /*
214: * Load entry/stack address of the user mode context.
215: *
216: * The entry and stack address can be set to NULL.
217: * If it is NULL, old state is just kept.
218: */
219: int
220: thread_load(thread_t th, void (*entry)(void), void *stack)
221: {
222: int err = 0;
223:
224: if ((entry != NULL && !user_area(entry)) ||
225: (stack != NULL && !user_area(stack)))
226: return EINVAL;
227:
228: sched_lock();
229: if (!thread_valid(th)) {
230: err = ESRCH;
231: } else if (!task_access(th->task)) {
232: err = EPERM;
233: } else {
234: if (entry != NULL)
235: context_set(&th->context, CTX_UENTRY, (u_long)entry);
236: if (stack != NULL)
237: context_set(&th->context, CTX_USTACK, (u_long)stack);
238: }
239: sched_unlock();
240: return 0;
241: }
242:
243: thread_t
244: thread_self(void)
245: {
246:
247: return cur_thread;
248: }
249:
250: /*
251: * Release current thread for other thread.
252: */
253: void
254: thread_yield(void)
255: {
256:
257: sched_yield();
258: }
259:
260: /*
261: * Suspend thread.
262: *
263: * A thread can be suspended any number of times. And, it does
264: * not start to run again unless the thread is resumed by the
265: * same count of suspend request.
266: */
267: int
268: thread_suspend(thread_t th)
269: {
270: int err = 0;
271:
272: sched_lock();
273: if (!thread_valid(th)) {
274: err = ESRCH;
275: } else if (!task_access(th->task)) {
276: err = EPERM;
277: } else {
278: if (++th->suspend_count == 1)
279: sched_suspend(th);
280: }
281: sched_unlock();
282: return 0;
283: }
284:
285: /*
286: * Resume thread.
287: *
288: * A thread does not begin to run, unless both thread
289: * suspend count and task suspend count are set to 0.
290: */
291: int
292: thread_resume(thread_t th)
293: {
294: int err = 0;
295:
296: ASSERT(th != cur_thread);
297:
298: sched_lock();
299: if (!thread_valid(th)) {
300: err = ESRCH;
301: } else if (!task_access(th->task)) {
302: err= EPERM;
303: } else if (th->suspend_count == 0) {
304: err = EINVAL;
305: } else {
306: th->suspend_count--;
307: if (th->suspend_count == 0 && th->task->suspend_count == 0)
308: sched_resume(th);
309: }
310: sched_unlock();
311: return err;
312: }
313:
314: /*
315: * thread_schedparam - get/set scheduling parameter.
316: * @th: target thread
317: * @op: operation ID
318: * @param: pointer to parameter
319: *
320: * If the caller has CAP_NICE capability, all operations are allowed.
321: * Otherwise, the caller can change the parameter for the threads in
322: * the same task, and it can not set the priority to higher value.
323: */
324: int
325: thread_schedparam(thread_t th, int op, int *param)
326: {
327: int prio, policy, err = 0;
328: int capable = 0;
329:
330: sched_lock();
331: if (!thread_valid(th)) {
332: sched_unlock();
333: return ESRCH;
334: }
335: if (task_capable(CAP_NICE))
336: capable = 1;
337:
338: if (th->task != cur_task() && !capable) {
339: sched_unlock();
340: return EPERM;
341: }
342: if ((th->task == &kern_task) &&
343: (op == OP_SETPRIO || op == OP_SETPOLICY)) {
344: sched_unlock();
345: return EPERM;
346: }
347: switch (op) {
348: case OP_GETPRIO:
349: prio = sched_getprio(th);
350: err = umem_copyout(&prio, param, sizeof(int));
351: break;
352: case OP_SETPRIO:
353: if ((err = umem_copyin(param, &prio, sizeof(int))))
354: break;
355: if (prio < 0)
356: prio = 0;
357: else if (prio >= PRIO_IDLE)
358: prio = PRIO_IDLE - 1;
359:
360: if (prio < th->prio && !capable) {
361: err = EPERM;
362: break;
363: }
364: /*
365: * If a current priority is inherited for mutex,
366: * we can not change the priority to lower value.
367: * In this case, only the base priority is changed,
368: * and a current priority will be adjusted to correct
369: * value, later.
370: */
371: if (th->prio != th->base_prio && prio > th->prio)
372: prio = th->prio;
373:
374: mutex_setprio(th, prio);
375: sched_setprio(th, prio, prio);
376: break;
377: case OP_GETPOLICY:
378: policy = sched_getpolicy(th);
379: err = umem_copyout(&policy, param, sizeof(int));
380: break;
381: case OP_SETPOLICY:
382: if ((err = umem_copyin(param, &policy, sizeof(int))))
383: break;
384: if (sched_setpolicy(th, policy))
385: err = EINVAL;
386: break;
387: default:
388: err = EINVAL;
389: break;
390: }
391: sched_unlock();
392: return err;
393: }
394:
395: /*
396: * Idle thread.
397: *
398: * This routine is called only once after kernel initialization
399: * is completed. An idle thread has the role of cutting down the power
400: * consumption of a system. An idle thread has FIFO scheduling policy
401: * because it does not have time quantum.
402: */
403: void
404: thread_idle(void)
405: {
406:
407: for (;;) {
408: machine_idle();
409: sched_yield();
410: }
411: /* NOTREACHED */
412: }
413:
414: /*
415: * Create a thread running in the kernel address space.
416: *
417: * A kernel thread does not have user mode context, and its
418: * scheduling policy is set to SCHED_FIFO. kernel_thread() returns
419: * thread ID on success, or NULL on failure. We assume scheduler
420: * is already locked.
421: *
422: * Important: Since sched_switch() will disable interrupts in CPU,
423: * the interrupt is always disabled at the entry point of the kernel
424: * thread. So, the kernel thread must enable the interrupt first when
425: * it gets control.
426: */
427: thread_t
428: kernel_thread(int prio, void (*entry)(u_long), u_long arg)
429: {
430: thread_t th;
431:
432: if ((th = thread_alloc()) == NULL)
433: return NULL;
434:
435: th->task = &kern_task;
436: memset(th->kstack, 0, KSTACK_SIZE);
437: context_init(&th->context, (u_long)th->kstack + KSTACK_SIZE);
438: context_set(&th->context, CTX_KENTRY, (u_long)entry);
439: context_set(&th->context, CTX_KARG, arg);
440: list_insert(&kern_task.threads, &th->task_link);
441:
442: sched_start(th);
443: sched_setpolicy(th, SCHED_FIFO);
444: sched_setprio(th, prio, prio);
445: sched_resume(th);
446: return th;
447: }
448:
449: /*
450: * Return thread information for ps command.
451: */
452: int
453: thread_info(struct info_thread *info)
454: {
455: u_long index, target = info->cookie;
456: list_t i, j;
457: thread_t th;
458: task_t task;
459:
460: sched_lock();
461: index = 0;
462: i = &kern_task.link;
463: do {
464: task = list_entry(i, struct task, link);
465: j = &task->threads;
466: j = list_first(j);
467: do {
468: th = list_entry(j, struct thread, task_link);
469: if (index++ == target)
470: goto found;
471: j = list_next(j);
472: } while (j != &task->threads);
473: i = list_next(i);
474: } while (i != &kern_task.link);
475:
476: sched_unlock();
477: return ESRCH;
478: found:
479: info->state = th->state;
480: info->policy = th->policy;
481: info->prio = th->prio;
482: info->base_prio = th->base_prio;
483: info->suspend_count = th->suspend_count;
484: info->total_ticks = th->total_ticks;
485: info->id = th;
486: info->task = th->task;
487: strlcpy(info->task_name, task->name, MAXTASKNAME);
488: strlcpy(info->sleep_event,
489: th->sleep_event ? th->sleep_event->name : "-", 12);
490:
491: sched_unlock();
492: return 0;
493: }
494:
495: #if defined(DEBUG) && defined(CONFIG_KDUMP)
496: void
497: thread_dump(void)
498: {
499: static const char state[][4] = \
500: { "RUN", "SLP", "SUS", "S&S", "EXT" };
501: static const char pol[][5] = { "FIFO", "RR " };
502: list_t i, j;
503: thread_t th;
504: task_t task;
505:
506: printk("Thread dump:\n");
507: printk(" mod thread task stat pol prio base ticks "
508: "susp sleep event\n");
509: printk(" --- -------- -------- ---- ---- ---- ---- -------- "
510: "---- ------------\n");
511:
512: i = &kern_task.link;
513: do {
514: task = list_entry(i, struct task, link);
515: j = &task->threads;
516: j = list_first(j);
517: do {
518: th = list_entry(j, struct thread, task_link);
519: printk(" %s %08x %8s %s%c %s %3d %3d %8d %4d %s\n",
520: (task == &kern_task) ? "Knl" : "Usr", th,
521: task->name, state[th->state],
522: (th == cur_thread) ? '*' : ' ',
523: pol[th->policy], th->prio, th->base_prio,
524: th->total_ticks, th->suspend_count,
525: th->sleep_event ? th->sleep_event->name : "-");
526: j = list_next(j);
527: } while (j != &task->threads);
528: i = list_next(i);
529: } while (i != &kern_task.link);
530: }
531: #endif
532:
533: /*
534: * The first thread in system is created here by hand. This thread
535: * will become an idle thread when thread_idle() is called later.
536: */
537: void
538: thread_init(void)
539: {
540: void *stack;
541:
542: if ((stack = kmem_alloc(KSTACK_SIZE)) == NULL)
543: panic("thread_init");
544:
545: memset(stack, 0, KSTACK_SIZE);
546: idle_thread.kstack = stack;
547: idle_thread.magic = THREAD_MAGIC;
548: idle_thread.task = &kern_task;
549: idle_thread.state = TH_RUN;
550: idle_thread.policy = SCHED_FIFO;
551: idle_thread.prio = PRIO_IDLE;
552: idle_thread.base_prio = PRIO_IDLE;
553: idle_thread.lock_count = 1;
554:
555: context_init(&idle_thread.context, (u_long)stack + KSTACK_SIZE);
556: list_insert(&kern_task.threads, &idle_thread.task_link);
557: }
CVSweb