Annotation of prex/sys/kern/device.c, Revision 1.1.1.1
1.1 nbrk 1: /*-
2: * Copyright (c) 2005-2007, Kohsuke Ohtani
3: * All rights reserved.
4: *
5: * Redistribution and use in source and binary forms, with or without
6: * modification, are permitted provided that the following conditions
7: * are met:
8: * 1. Redistributions of source code must retain the above copyright
9: * notice, this list of conditions and the following disclaimer.
10: * 2. Redistributions in binary form must reproduce the above copyright
11: * notice, this list of conditions and the following disclaimer in the
12: * documentation and/or other materials provided with the distribution.
13: * 3. Neither the name of the author nor the names of any co-contributors
14: * may be used to endorse or promote products derived from this software
15: * without specific prior written permission.
16: *
17: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27: * SUCH DAMAGE.
28: */
29:
30: /*
31: * device.c - device I/O support routines
32: */
33:
34: /*
35: * The device_* system calls are interfaces for user mode
36: * applications to access the specific device object which is
37: * handled by the related device driver. A device driver is an
38: * execution module different from a kernel on Prex. The routines
39: * in this file have the following role to handle the device I/O.
40: *
41: * - Manage the name space for device objects.
42: * - Forward user I/O requests to the drivers after checking
43: * parameters.
44: *
45: * The driver module(s) and kernel are dynamically linked
46: * at system boot.
47: */
48:
49: #include <kernel.h>
50: #include <irq.h>
51: #include <page.h>
52: #include <kmem.h>
53: #include <task.h>
54: #include <timer.h>
55: #include <sched.h>
56: #include <exception.h>
57: #include <vm.h>
58: #include <device.h>
59: #include <system.h>
60:
61: /* forward declarations */
62: static device_t device_create(struct devio *, const char *, int);
63: static int device_destroy(device_t);
64: static int device_broadcast(int, int);
65:
66: static void machine_bootinfo(struct boot_info **);
67: static void _machine_reset(void);
68: static void _machine_idle(void);
69: static int _task_capable(cap_t);
70: static void *_phys_to_virt(void *);
71: static void *_virt_to_phys(void *);
72:
73: #ifdef DEBUG
74: #define _debug_attach debug_attach
75: #define _debug_dump debug_dump
76: #define _printf printf
77: #define _panic panic
78: #else
79: #define _debug_attach nosys
80: #define _debug_dump nosys
81: #define _printf nosys
82: #define _panic machine_reset
83: static void nosys(void);
84: #endif
85:
86: typedef void (*dkifn_t)(void);
87:
88: #define DKIENT(func) (dkifn_t)(func)
89:
90: /*
91: * Driver-Kernel Interface (DKI)
92: */
93: static const dkifn_t driver_service[] = {
94: /* 0 */ DKIENT(device_create),
95: /* 1 */ DKIENT(device_destroy),
96: /* 2 */ DKIENT(device_broadcast),
97: /* 3 */ DKIENT(umem_copyin),
98: /* 4 */ DKIENT(umem_copyout),
99: /* 5 */ DKIENT(umem_strnlen),
100: /* 6 */ DKIENT(kmem_alloc),
101: /* 7 */ DKIENT(kmem_free),
102: /* 8 */ DKIENT(kmem_map),
103: /* 9 */ DKIENT(page_alloc),
104: /* 10 */ DKIENT(page_free),
105: /* 11 */ DKIENT(page_reserve),
106: /* 12 */ DKIENT(irq_attach),
107: /* 13 */ DKIENT(irq_detach),
108: /* 14 */ DKIENT(irq_lock),
109: /* 15 */ DKIENT(irq_unlock),
110: /* 16 */ DKIENT(timer_callout),
111: /* 17 */ DKIENT(timer_stop),
112: /* 18 */ DKIENT(timer_delay),
113: /* 19 */ DKIENT(timer_count),
114: /* 20 */ DKIENT(timer_hook),
115: /* 21 */ DKIENT(sched_lock),
116: /* 22 */ DKIENT(sched_unlock),
117: /* 23 */ DKIENT(sched_tsleep),
118: /* 24 */ DKIENT(sched_wakeup),
119: /* 25 */ DKIENT(sched_dpc),
120: /* 26 */ DKIENT(_task_capable),
121: /* 27 */ DKIENT(exception_post),
122: /* 28 */ DKIENT(machine_bootinfo),
123: /* 29 */ DKIENT(_machine_reset),
124: /* 30 */ DKIENT(_machine_idle),
125: /* 31 */ DKIENT(_phys_to_virt),
126: /* 32 */ DKIENT(_virt_to_phys),
127: /* 33 */ DKIENT(_debug_attach),
128: /* 34 */ DKIENT(_debug_dump),
129: /* 35 */ DKIENT(_printf),
130: /* 36 */ DKIENT(_panic),
131: };
132:
133: static struct list device_list; /* list of the device objects */
134:
135: /*
136: * Increment reference count on an active device.
137: * It returns 0 on success, or -1 if the device is invalid.
138: */
139: static int
140: device_hold(device_t dev)
141: {
142: int err = -1;
143:
144: sched_lock();
145: if (device_valid(dev)) {
146: dev->refcnt++;
147: err = 0;
148: }
149: sched_unlock();
150: return err;
151: }
152:
153: /*
154: * Decrement the reference count on a device. If the
155: * reference count becomes zero, we can release the
156: * resource for the target device. Assumes the device
157: * is already validated by caller.
158: */
159: static void
160: device_release(device_t dev)
161: {
162:
163: sched_lock();
164: if (--dev->refcnt == 0) {
165: list_remove(&dev->link);
166: kmem_free(dev);
167: }
168: sched_unlock();
169: }
170:
171: /*
172: * Look up a device object by device name.
173: * Return device ID on success, or NULL on failure.
174: * This must be called with scheduler locked.
175: */
176: static device_t
177: device_lookup(const char *name)
178: {
179: list_t head, n;
180: device_t dev;
181:
182: if (name == NULL)
183: return NULL;
184:
185: head = &device_list;
186: for (n = list_first(head); n != head; n = list_next(n)) {
187: dev = list_entry(n, struct device, link);
188: if (!strncmp(dev->name, name, MAXDEVNAME))
189: return dev;
190: }
191: return NULL;
192: }
193:
194: /*
195: * device_create - create new device object.
196: *
197: * A device object is created by the device driver to provide
198: * I/O services to applications.
199: * Returns device ID on success, or 0 on failure.
200: */
201: static device_t
202: device_create(struct devio *io, const char *name, int flags)
203: {
204: device_t dev;
205: size_t len;
206:
207: ASSERT(irq_level == 0);
208:
209: len = strnlen(name, MAXDEVNAME);
210: if (len == 0 || len >= MAXDEVNAME) /* Invalid name? */
211: return 0;
212:
213: sched_lock();
214: if ((dev = device_lookup(name)) != NULL) {
215: /*
216: * Error - the device name is already used.
217: */
218: sched_unlock();
219: return 0;
220: }
221: if ((dev = kmem_alloc(sizeof(struct device))) == NULL) {
222: sched_unlock();
223: return 0;
224: }
225: strlcpy(dev->name, name, len + 1);
226: dev->devio = io;
227: dev->flags = flags;
228: dev->refcnt = 1;
229: dev->magic = DEVICE_MAGIC;
230: list_insert(&device_list, &dev->link);
231: sched_unlock();
232: return dev;
233: }
234:
235: /*
236: * Destroy a device object. If some other threads still
237: * refer the target device, the destroy operating will be
238: * pending until its reference count becomes 0.
239: */
240: static int
241: device_destroy(device_t dev)
242: {
243: int err = 0;
244:
245: ASSERT(irq_level == 0);
246:
247: sched_lock();
248: if (device_valid(dev))
249: device_release(dev);
250: else
251: err = ENODEV;
252: sched_unlock();
253: return err;
254: }
255:
256: /*
257: * device_open - open the specified device.
258: *
259: * Even if the target driver does not have an open
260: * routine, this function does not return an error. By
261: * using this mechanism, an application can check whether
262: * the specific device exists or not. The open mode
263: * should be handled by an each device driver if it is
264: * needed.
265: */
266: int
267: device_open(const char *name, int mode, device_t *devp)
268: {
269: char str[MAXDEVNAME];
270: device_t dev;
271: size_t len;
272: int err = 0;
273:
274: if (!task_capable(CAP_DEVIO))
275: return EPERM;
276:
277: if (umem_strnlen(name, MAXDEVNAME, &len))
278: return EFAULT;
279: if (len == 0)
280: return ENOENT;
281: if (len >= MAXDEVNAME)
282: return ENAMETOOLONG;
283:
284: if (umem_copyin(name, str, len + 1))
285: return EFAULT;
286:
287: sched_lock();
288: if ((dev = device_lookup(str)) == NULL) {
289: sched_unlock();
290: return ENXIO;
291: }
292: device_hold(dev);
293: sched_unlock();
294:
295: if (dev->devio->open != NULL)
296: err = (*dev->devio->open)(dev, mode);
297:
298: if (!err)
299: err = umem_copyout(&dev, devp, sizeof(dev));
300: device_release(dev);
301: return err;
302: }
303:
304: /*
305: * device_close - close a device.
306: *
307: * Even if the target driver does not have close routine,
308: * this function does not return any errors.
309: */
310: int
311: device_close(device_t dev)
312: {
313: int err = 0;
314:
315: if (!task_capable(CAP_DEVIO))
316: return EPERM;
317:
318: if (device_hold(dev))
319: return ENODEV;
320:
321: if (dev->devio->close != NULL)
322: err = (*dev->devio->close)(dev);
323:
324: device_release(dev);
325: return err;
326: }
327:
328: /*
329: * device_read - read from a device.
330: *
331: * Actual read count is set in "nbyte" as return.
332: * Note: The size of one block is device dependent.
333: */
334: int
335: device_read(device_t dev, void *buf, size_t *nbyte, int blkno)
336: {
337: size_t count;
338: int err;
339:
340: if (!task_capable(CAP_DEVIO))
341: return EPERM;
342:
343: if (device_hold(dev))
344: return ENODEV;
345:
346: if (dev->devio->read == NULL) {
347: device_release(dev);
348: return EBADF;
349: }
350: if (umem_copyin(nbyte, &count, sizeof(count))) {
351: device_release(dev);
352: return EFAULT;
353: }
354: err = (*dev->devio->read)(dev, buf, &count, blkno);
355: if (err == 0)
356: err = umem_copyout(&count, nbyte, sizeof(count));
357:
358: device_release(dev);
359: return err;
360: }
361:
362: /*
363: * device_write - write to a device.
364: *
365: * Actual write count is set in "nbyte" as return.
366: */
367: int
368: device_write(device_t dev, void *buf, size_t *nbyte, int blkno)
369: {
370: size_t count;
371: int err;
372:
373: if (!task_capable(CAP_DEVIO))
374: return EPERM;
375:
376: if (device_hold(dev))
377: return ENODEV;
378:
379: if (dev->devio->write == NULL) {
380: device_release(dev);
381: return EBADF;
382: }
383: if (umem_copyin(nbyte, &count, sizeof(count))) {
384: device_release(dev);
385: return EFAULT;
386: }
387: err = (*dev->devio->write)(dev, buf, &count, blkno);
388: if (err == 0)
389: err = umem_copyout(&count, nbyte, sizeof(count));
390:
391: device_release(dev);
392: return err;
393: }
394:
395: /*
396: * device_ioctl - I/O control request.
397: *
398: * A command and an argument are completely device dependent.
399: * The ioctl routine of each driver must validate the user buffer
400: * pointed by the arg value.
401: */
402: int
403: device_ioctl(device_t dev, u_long cmd, void *arg)
404: {
405: int err = EBADF;
406:
407: if (!task_capable(CAP_DEVIO))
408: return EPERM;
409:
410: if (device_hold(dev))
411: return ENODEV;
412:
413: if (dev->devio->ioctl != NULL)
414: err = (*dev->devio->ioctl)(dev, cmd, arg);
415:
416: device_release(dev);
417: return err;
418: }
419:
420: /*
421: * device_broadcast - broadcast an event to all device objects.
422: *
423: * If "force" argument is true, a kernel will continue event
424: * notification even if some driver returns error. In this case,
425: * this routine returns EIO error if at least one driver returns
426: * an error.
427: *
428: * If force argument is false, a kernel stops the event processing
429: * when at least one driver returns an error. In this case,
430: * device_broadcast will return the error code which is returned
431: * by the driver.
432: */
433: static int
434: device_broadcast(int event, int force)
435: {
436: device_t dev;
437: list_t head, n;
438: int err, ret = 0;
439:
440: sched_lock();
441: head = &device_list;
442: for (n = list_first(head); n != head; n = list_next(n)) {
443: dev = list_entry(n, struct device, link);
444: if (dev->devio->event != NULL) {
445: /*
446: * Call driver's event routine.
447: */
448: err = (*dev->devio->event)(event);
449: if (err) {
450: if (force)
451: ret = EIO;
452: else {
453: ret = err;
454: break;
455: }
456: }
457: }
458: }
459: sched_unlock();
460: return ret;
461: }
462:
463: /*
464: * Return device information (for devfs).
465: */
466: int
467: device_info(struct info_device *info)
468: {
469: u_long index, target = info->cookie;
470: device_t dev;
471: struct devio *io;
472: list_t head, n;
473: int err = ESRCH;
474:
475: sched_lock();
476: index = 0;
477: head = &device_list;
478: for (n = list_first(head); n != head; n = list_next(n)) {
479: dev = list_entry(n, struct device, link);
480: io = dev->devio;
481: if (index == target) {
482: info->id = dev;
483: info->flags = dev->flags;
484: strlcpy(info->name, dev->name, MAXDEVNAME);
485: err = 0;
486: break;
487: }
488: index++;
489: }
490: sched_unlock();
491: return err;
492: }
493:
494: #ifndef DEBUG
495: /*
496: * nonexistent driver service.
497: */
498: static void
499: nosys(void)
500: {
501: }
502: #endif
503:
504: /*
505: * Check the capability of the current task.
506: */
507: static int
508: _task_capable(cap_t cap)
509: {
510:
511: return task_capable(cap);
512: }
513:
514: /*
515: * Return boot information
516: */
517: static void
518: machine_bootinfo(struct boot_info **info)
519: {
520: ASSERT(info != NULL);
521:
522: *info = boot_info;
523: }
524:
525: static void
526: _machine_reset(void)
527: {
528:
529: machine_reset();
530: }
531:
532: static void
533: _machine_idle(void)
534: {
535:
536: machine_idle();
537: }
538:
539: /*
540: * Address translation (physical -> virtual)
541: */
542: static void *
543: _phys_to_virt(void *phys)
544: {
545:
546: return phys_to_virt(phys);
547: }
548:
549: /*
550: * Address translation (virtual -> physical)
551: */
552: static void *
553: _virt_to_phys(void *virt)
554: {
555:
556: return virt_to_phys(virt);
557: }
558:
559: /*
560: * Initialize device driver module.
561: */
562: void
563: device_init(void)
564: {
565: struct module *mod;
566: void (*drv_entry)(const dkifn_t *);
567:
568: list_init(&device_list);
569:
570: mod = &boot_info->driver;
571: if (mod == NULL)
572: return;
573:
574: drv_entry = (void (*)(const dkifn_t *))mod->entry;
575: if (drv_entry == NULL)
576: return;
577: /*
578: * Call all initialization functions in drivers.
579: */
580: (*drv_entry)(driver_service);
581: }
CVSweb